diff --git a/.ChangeLog_template b/.ChangeLog_template
new file mode 100644
index 0000000000..5186c33143
--- /dev/null
+++ b/.ChangeLog_template
@@ -0,0 +1,102 @@
+===============================================================
+Tag name:
+Originator(s):
+Date:
+One-line Summary:
+
+Purpose of changes:
+
+Requirements for tag:
+
+Test level of tag: regular, short, tools, build_namelist, doc
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+
+Describe any changes made to the namelist:
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by:
+
+List any svn externals directories updated (cime, rtm, cism, etc.):
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+CLM testing:
+
+[... Remove before making trunk_tag. Available test levels:
+
+ a) regular (must be run before handing off a tag to SEs and must be run
+ before committing a tag)
+ b) build_namelist (if namelists and/or build_system changed))
+ c) tools (only if tools are modified and no CLM source is modified)
+ d) short (for use during development and in rare cases where only a small
+ change with known behavior is added ... eg. a minor bug fix)
+ e) doc (no source testing required)
+
+... ]
+
+ build-namelist tests:
+
+ yellowstone
+
+ unit-tests (components/clm/src):
+
+ yellowstone
+
+ tools-tests (components/clm/test/tools):
+
+ yellowstone
+
+ PTCLM testing (components/clm/tools/shared/PTCLM/test):
+
+ yellowstone
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel
+ yellowstone_pgi
+ yellowstone_gnu (clm45 only)
+ hobart_nag
+
+CLM tag used for the baseline comparisons:
+
+Changes answers relative to baseline:
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000000..2ec83ff776
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,46679 @@
+===============================================================
+Tag name: clm4_5_1_r120
+Originator(s): andre (Benjamin Andre,UCAR/CSEG,303-497-1391)
+Date: Sat Aug 29 22:58:57 MDT 2015
+One-line Summary: CLM 5 nitrogen models Flexible CN and LUNA
+
+Purpose of changes:
+ CLM 5 nitrogen models Flexible CN (Bardan Ghimire, LBNL)
+ and LUNA (Chonggang Xu, LANL). The LUNA model predicts
+ photosynthetic capacities as measured by Vc, max25 and Jmax25
+ under different environmental conditions (see Ali et al 2015).
+
+Requirements for tag: regular
+
+Bugs fixed (include bugzilla ID): none
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/ - 2208
+ https://github.com/CESM-Development/cime/issues - 115, 116
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist:
+ addition of use_luna and use_flexibleCN. use_flexibleCN adds
+ additional namelist options in the clm_nitrogen group. See xml
+ definitions file for details.
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self, Bardan Ghimire, Chonggang Xu
+
+List any svn externals directories updated (cime, rtm, cism, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do:
+
+ src/biogeochem/NutrientCompetitionFlexibleCNMod.F90 - flexibleCN.
+ cimetest/testmods_dirs/clm/flexibleCN - flexible cn regression test
+ src/biogeophys/LunaMod.F90 - luna model
+ cimetest/testmods_dirs/clm/luna - luna regression case
+
+List all existing files that have been modified, and describe the changes:
+
+ new namelist controls for flexibleCN and luna, clm_nitrogen namelist group:
+ bld/namelist_files/namelist_definition_clm4_5.xml
+ bld/namelist_files/namelist_defaults_clm4_5.xml
+ bld/CLMBuildNamelist.pm
+ src/main/clm_varctl.F90
+ src/main/controlMod.F90
+
+ flexibleCN
+ src/biogeochem/CNVegCarbonStateType.F90 - Michaelis-Menten Nitrogen uptake
+ src/biogeochem/CNVegNitrogenStateType.F90 - Michaelis-Menten Nitrogen uptake
+
+ src/biogeochem/CNGRespMod.F90 - excess carbon storage
+ src/biogeochem/CNGapMortalityMod.F90 - excess carbon storage
+
+ src/biogeochem/NutrientCompetitionFactoryMod.F90 - add flexible cn option
+ src/biogeochem/NutrientCompetitionMethodMod.F90 - modify interface to accomidate flexiblecn
+ src/biogeochem/CNPhenologyMod.F90 - floating cn evergreen phenology
+ src/biogeochem/CNDriverMod.F90 - update function call args
+ src/main/pftconMod.F90 - flexible cn pft variables
+
+ src/main/histFileMod.F90 - nlev canopy
+ src/main/clm_driver.F90 - update function call args
+ src/main/clm_instMod.F90 - update function call args
+ src/biogeophys/WaterfluxType.F90 - additional water flux vars
+ src/biogeophys/SoilWaterMovementMod.F90 - soil water work around
+
+ LUNA
+ src/main/clm_varcon.F90 - new constant for luna
+ src/biogeophys/PhotosynthesisMod.F90 - luna use of vcmax25 and jmax25
+ src/biogeophys/CanopyFluxesMod.F90 - luna calculation of vcmax25 and jmax25
+ src/main/atm2lndType.F90 - state data needed for luna
+ src/biogeophys/FrictionVelocityMod.F90 - luna variables
+ src/biogeophys/WaterStateType.F90 - luna variables
+ src/biogeophys/TemperatureType.F90 - luna variables
+ src/biogeophys/SolarAbsorbedType.F90 - luna variables
+ src/biogeophys/QSatMod.F90 - saturated vapor pressure density
+ src/biogeophys/SoilHydrologyType.F90 - luna var
+ src/biogeophys/CanopyStateType.F90 - update vcmax and jmax for luna
+
+ cimetest/ExpectedTestFails.xml - update for cime bugs 115 and 116
+ cimetest/testlist_clm.xml - update test list for aux_clm_short, new luna and flexibleCN tests
+
+
+CLM testing: regular, build-namelist
+
+ build-namelist tests:
+
+ yellowstone - unit tests : pass, other pass
+
+ unit-tests (components/clm/src):
+
+ yellowstone - ok
+
+ tools-tests (components/clm/test/tools): n/a
+
+ PTCLM testing (components/clm/tools/shared/PTCLM/test): n/a
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel - 40 ok, 45 ok
+ yellowstone_pgi - 40 ok, 45 ok
+ yellowstone_gnu (clm45 only) ok
+ hobart_nag - not run, tests hang, see bug 2208
+
+ Testing notes:
+
+ * new namelist group clm_nitrogen causes all nlcomp tests to fail
+ * introduces new tests for flexibleCN and luna that do not
+ have baselines in clm4_5_1_r119.
+ * two new expected fails believed to be related to cime issues
+ 115 and 116.
+ * removes the existing aux_clm_short tests and replaces them with
+ a new set of SMS, ERS and ERP tests that are replicated for
+ yellowstone gnu, intel and pgi.
+
+CLM tag used for the baseline comparisons: clm4_5_1_r119
+
+Changes answers relative to baseline: none
+
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r119
+Originator(s): erik (Erik Kluzek)
+Date: Wed Aug 26 22:29:10 MDT 2015
+One-line Summary: Bring hobart/nag bug fixes to trunk, and fix a few bugs
+
+Purpose of changes:
+
+Bring hobart/nag bug fixes to trunk. Fix ncl6.3.0 bug for getregional script.
+Fix use_c13 bug. Update RTM to handle regional direction files. Make sure _r8
+constants in ED have a decimal point, so the NAG compiler will treat them as
+double-precision rather than as integer*2.
+
+Move testing from goldbach to hobart. For hobart_nag make all of the tests
+on just one node (24 processors).
+
+Requirements for tag: compile run with hobart/nag (fix bugs 2205 and 2199)
+ move testing from goldbach to hobart
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 2206 (PTCLM stopped working with pft number in surface dataset filenames for mksurfdata.pl)
+ 2205 (Problems with some constants in ED for NAG compiler)
+ 2199 (crayftn compiler issue with continuation in middle of string)
+ 2180 (ncl6.3.0 bug for getregional script)
+ 2174 (use_c13 bug, unformatted write caused model to die)
+ 2156 (Update RTM to handle regional direction files)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, klindsay
+
+List any svn externals directories updated (cime, rtm, cism, etc.): PTCLM, cime and rtm
+ cime to cime2.0.07
+ rtm to rtm1_0_52
+ PTCLM to PTCLM2_150826
+
+List all files eliminated: Move goldbach to hobart
+
+D components/clm/test/tools/tests_posttag_goldbach_nompi
+
+List all files added and what they do: Move goldbach to hobart
+
+A components/clm/test/tools/tests_posttag_hobart_nompi
+
+List all existing files that have been modified, and describe the changes:
+
+------------ Move goldbach to hobart, remove PGI option for hobart
+M components/clm/test/tools/test_driver.sh
+
+M components/clm/bld/unit_testers/build-namelist_test.pl -- Fix ED tests so megan off
+
+M components/clm/cimetest/testlist_clm.xml --- Move goldbach tests to hobart
+ Make 2-node hobart_nag tests on a single node
+
+M components/clm/tools/shared/mkmapgrids/mkscripgrid.ncl -- Fix so will
+ work with ncl6.3.0, bug 2180
+
+M components/clm/src/README.unit_testing --- add some notes about unit-testing
+
+------------ Bug 2205, some _r8 constants in ED don't have a decimal point
+------------ and the NAG compiler then treats them as integer*2.
+M components/clm/src/ED/main/EDCLMLinkMod.F90
+M components/clm/src/ED/main/EDRestVectorMod.F90
+M components/clm/src/ED/main/EDInitMod.F90
+M components/clm/src/ED/fire/SFMainMod.F90
+M components/clm/src/ED/biogeophys/EDPhotosynthesisMod.F90
+M components/clm/src/ED/biogeophys/EDBtranMod.F90
+M components/clm/src/ED/biogeochem/EDGrowthFunctionsMod.F90
+M components/clm/src/ED/biogeochem/EDCohortDynamicsMod.F90
+M components/clm/src/ED/biogeochem/EDPhysiologyMod.F90
+M components/clm/src/ED/biogeochem/EDPatchDynamicsMod.F90
+
+------------ Bug 2199, write to iulog was unformatted, which caused the model
+------------ to die after it had already done formatted writes.
+M components/clm/src/soilbiogeochem/SoilBiogeochemCarbonStateType.F90
+M components/clm/src/biogeochem/CNVegCarbonStateType.F90
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone yes
+
+ unit-tests (components/clm/src):
+
+ yellowstone yes
+
+ tools testing (components/clm/test/tools):
+
+ yellowstone yes
+
+ PTCLM testing (components/clm/tools/shared/PTCLM/test):
+
+ yellowstone yes
+
+ regular tests (aux_clm40, aux_clm45, aux_clm_short):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ yellowstone_gnu (clm45 only) yes
+ hobart_nag yes
+ hobart_pgi yes
+ hobart_intel yes
+
+CLM tag used for the baseline comparisons: clm4_5_1_r118
+
+Changes answers relative to baseline: no
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r118
+Originator(s): sacks (Bill Sacks)
+Date: Wed Aug 5 16:22:33 MDT 2015
+One-line Summary: Minor rework of glc coupling fields
+
+Purpose of changes:
+
+ This makes CLM compatible with recent CIME changes.
+
+ (1) Use renamed coupler field, in both clm40 and clm45
+
+ (2) In clm45 code, rework clm_cpl_indices to use glc_elevclass_mod (simpler
+ and more robust than the earlier code)
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (cime, rtm, cism, etc.):
+
+ cime1.1.11 -> cime2.0.0
+ cism2_0_09 -> cism2_1_02
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+========= Main changes
+M components/clm/src/cpl/clm_cpl_indices.F90
+M components/clm/src/cpl/lnd_import_export.F90
+M components/clm/src_clm40/main/clm_cpl_indices.F90
+M components/clm/src_clm40/main/lnd_import_export.F90
+
+========= Document new unit testing method needed for yellowstone, due to cime update
+M components/clm/src/README.unit_testing
+
+========= Rework test mods due to a fundamental change in how the forced
+ decrease / increase in glc area works
+M components/clm/cimetest/testmods_dirs/clm/glcMEC_decrease/user_nl_cism
+M components/clm/cimetest/testmods_dirs/clm/glcMEC_increase/user_nl_cism
+
+========= New failures, which seem to be attributable to the cime update,
+ unrelated to my changes. However, the NCK and CME test failures seem
+ dependent on the order in which tests are run, so these problems are
+ hard to reproduce. Running them as single tests leads to PASSes.
+M components/clm/cimetest/ExpectedTestFails.xml
++ CFAIL CME_Ld5.f10_f10.ICN.yellowstone_intel
++ FAIL NCK_Ld1.f10_f10.ICRUCLM45.yellowstone_intel.clm-default
++ RUN ERS_D_Mmpi-serial_Ld5.1x1_brazil.ICLM45ED.yellowstone_gnu.clm-edTest
+
+
+CLM testing:
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok - but see caveat below!
+
+As noted above, there are three new test failures. The ERS_D ICLM45ED failure
+seems to be a legitimate bug in CLM. The other two (CFAIL
+CME_Ld5.f10_f10.ICN.yellowstone_intel and FAIL
+NCK_Ld1.f10_f10.ICRUCLM45.yellowstone_intel.clm-default) seem to be intermittent
+failures, likely due to a bug in the test system or elsewhere in cime. These
+sometimes pass and sometimes fail. They always seem to pass when run as single
+tests, but sometimes fail when run as part of a test suite. It's not clear if
+the new cime is to blame directly, or if these are arising now simply because
+tests are being run in a different order.
+
+golbach-nag does not run out-of-the-box with this tag. However, it should run
+out-of-the-box if you merge in the next commit in cime master
+(4b52ec73086a4290323dddfde6087a6d6d12ab96). I did my changes with that commit
+merged in, but this hadn't come to master in time for me to include it in this
+CLM tag.
+
+CLM tag used for the baseline comparisons: clm4_5_1_r117
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: Configurations with CISM (IG), both CLM4 and CLM45
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ Likely larger than roundoff/same climate (but not investigated closely)
+
+ These changes are due to a complete rework of the coupling between CISM
+ and CLM, manifested as major changes in the CIME and CISM externals. (The
+ changes in CLM are not directly responsible for the answer changes.)
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r117
+Originator(s): sacks (Bill Sacks)
+Date: Tue Jul 28 06:01:04 MDT 2015
+One-line Summary: Repartition rain vs. snow from atmosphere
+
+Purpose of changes:
+
+ Add an option to repartition rain vs. snow from atmosphere based on
+ near-surface temperature. This repartitioning uses a ramp-based partitioning
+ that is also used in datm: we ignore the rain vs. snow partitioning sent from
+ the atmosphere, and generate our own rain vs. snow partitioning. A sensible
+ heat flux is generated to conserve energy with this repartitioning.
+
+ The motivation for this is two-fold:
+
+ (1) There are biases in CAM which cause rain to be generated in cold
+ conditions. This is particularly a problem for glacier surface mass
+ balance in Greenland. Andrew Gettelman has suggested putting in place
+ this workaround in CLM until CAM can find a robust fix.
+
+ (2) With the downscaling to glacier elevation classes, it is useful to have
+ a different rain/snow partitioning in each elevation class.
+
+ This repartitioning is on by default in CLM5, off by default in CLM4.5.
+
+ If / when the CAM bias is fixed, we could potentially change this code so
+ that it just does the repartitioning over the do_smb filter, similarly to the
+ other downscaling in atm2lndMod. (Rather than doing this correction
+ everywhere - which we do now in order to correct the rain vs. snow
+ partitioning bias in CAM.)
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist:
+
+ Removed glcmec_downscale_rain_snow_convert option, added
+ repartition_rian_snow option
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Sean Swenson reviewed the calculation of the sensible heat
+flux correction
+
+List any svn externals directories updated (cime, rtm, cism, etc.):
+
+ cime1.1.10 -> cime1.1.11
+ This creates a new shared routine for partitioning rain vs. snow, now shared
+ between datm and CLM.
+
+List all files eliminated:
+
+List all files added and what they do:
+
+========= Add unit tests for repartitioning of rain vs snow, and supporting
+ utility code
+A components/clm/src/main/test/atm2lnd_test/test_sens_heat_from_precip_conversion.pf
+A components/clm/src/main/test/atm2lnd_test/CMakeLists.txt
+A components/clm/src/main/test/atm2lnd_test/test_partition_precip.pf
+A components/clm/src/main/test/atm2lnd_test
+A components/clm/src/unit_test_shr/unittestArrayMod.F90
+A components/clm/src/unit_test_shr/test/unittestArray_test/CMakeLists.txt
+A components/clm/src/unit_test_shr/test/unittestArray_test/test_unittestArray.pf
+A components/clm/src/unit_test_shr/test/unittestArray_test
+
+List all existing files that have been modified, and describe the changes:
+
+========= Repartition rain vs snow from atmosphere, and add a sensible heat flux
+ correction for energy conservation
+M components/clm/src/biogeophys/EnergyFluxType.F90
+M components/clm/src/main/clm_driver.F90
+M components/clm/src/main/clm_varctl.F90
+M components/clm/src/main/controlMod.F90
+M components/clm/src/main/atm2lndType.F90
+M components/clm/src/main/lnd2atmType.F90
+M components/clm/src/main/atm2lndMod.F90
+M components/clm/src/main/lnd2atmMod.F90
+
+========= Remove glcmec_rain_snow_threshold
+M components/clm/src/main/clm_varcon.F90
+
+========= Remove glcmec_downscale_rain_snow_convert option, add
+ repartition_rain_snow option
+M components/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+M components/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M components/clm/bld/CLMBuildNamelist.pm
+
+========= Add an IG CLM5 test
+M components/clm/cimetest/testlist_clm.xml
+
+========= Remove glcmec_downscale_rain_snow_convert setting (which no longer exists)
+M components/clm/cimetest/testmods_dirs/clm/glcMEC_changeFlags/user_nl_clm
+
+========= Add unit tests for repartitioning of rain vs snow, and supporting
+ utility code
+M components/clm/src/main/CMakeLists.txt
+M components/clm/src/main/test/CMakeLists.txt
+M components/clm/src/biogeophys/CMakeLists.txt
+M components/clm/src/unit_test_shr/test/CMakeLists.txt
+M components/clm/src/unit_test_shr/unittestSimpleSubgridSetupsMod.F90
+M components/clm/src/unit_test_shr/CMakeLists.txt
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok (changes namelists, as expected)
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r116
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM5 cases
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ Likely new climate, but not investigated closely
+
+ Answer changes are due to new rain vs. snow partitioning, which is on by
+ default in CLM5.
+
+ Also changes answers for
+ ERP_D_Ld5.f19_g16.ICLM45GLCMEC.yellowstone_intel.clm-glcMEC_changeFlags
+ (expected, since it no longer downscales precip).
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r116
+Originator(s): sacks (Bill Sacks)
+Date: Wed Jul 22 06:39:28 EDT 2015
+One-line Summary: Rename some history fields
+
+Purpose of changes:
+
+ (1) Make QSNOMELT point to qflx_snomelt, as it does in CLM4.0, rather than
+ qflx_snow_drain (previously qflx_snow_melt)
+
+ (2) Turn on QSNOFRZ by default (parallels QSNOMELT)
+
+ (3) For the 3 history fields that have FOO and FOO_NODYNLNDUSE versions:
+ Rename FOO to FOO_TO_COUPLER and FOO_NODYNLNDUSE to FOO. This is at Sean
+ Swenson's suggestion: He points out that the version without the dyn landuse
+ adjustment (and, soon, the sensible heat adjustment from rain/snow
+ conversion) is the one most people will be interested in, so should be the
+ one without the suffix.
+
+ (4) Tweak test lists:
+
+ (a) Move prealpha & prebeta goldbach tests to hobart
+
+ (b) Move an aux_clm45 pgi test to intel: With recent versions of cime
+ (starting with cime1.1.0), threading tests with pgi take a very long time. So
+ this test took 3 hours with pgi, vs 20 min with intel.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ - 2178 (QSNOMELT incorrect in clm4.5)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (cime, rtm, cism, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+========= See summary above
+M components/clm/src/main/lnd2atmType.F90
+M components/clm/src/biogeophys/WaterfluxType.F90
+M components/clm/src/biogeophys/EnergyFluxType.F90
+M components/clm/cimetest/testlist_clm.xml
+
+
+CLM testing:
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: NOT RUN
+ mac: ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r115
+
+Changes answers relative to baseline: YES
+
+ Just changes a few diagnostic fields, for CLM4.5 and CLM5:
+
+ - QSNOMELT: changed to qflx_snomelt rather than qflx_snow_drain
+
+ - QRUNOFF: differs for cases with transient landcover
+
+ - FSH: differs for cases with CISM
+
+ - QSNWCPICE: differs for cases with CISM
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r115
+Originator(s): sacks (Bill Sacks)
+Date: Wed Jul 15 05:26:37 MDT 2015
+One-line Summary: Remove redundant code, rename a variable
+
+Purpose of changes:
+
+ (1) Remove some redundant code in SnowHydrologyMod, related to 'void'. This
+ was supposed to be removed a long time ago. (Apparently the issue this was
+ trying to fix was fixed in a different, more robust way.)
+
+ (2) Remove redundant, unused copy of accumulMod in utils/ (newer copy is in
+ main/)
+
+ (3) Rename qflx_snow_melt to qflx_snow_drain, to avoid confusion with the
+ existing qflx_snomelt.
+
+ (4) Clarify documentation of snowdp_col
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (cime, rtm, cism, etc.): none
+
+List all files eliminated:
+
+========= Redundant and unused (copy in main/ is used)
+D components/clm/src/utils/accumulMod.F90
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+========= Rename qflx_snow_melt to qflx_snow_drain
+M components/clm/src/biogeophys/CanopyHydrologyMod.F90
+M components/clm/src/biogeophys/LakeHydrologyMod.F90
+M components/clm/src/biogeophys/BalanceCheckMod.F90
+M components/clm/src/biogeophys/WaterfluxType.F90
+M components/clm/src/biogeophys/SoilTemperatureMod.F90
+M components/clm/src/biogeophys/SnowHydrologyMod.F90
+ - also remove redundant lines of code related to 'void' (see above)
+M components/clm/src/biogeophys/LakeTemperatureMod.F90
+
+========= Clarify documentation for snowdp_col
+M components/clm/src/biogeophys/HydrologyNoDrainageMod.F90
+M components/clm/src/biogeophys/WaterStateType.F90
+
+========= Remove SMS_Lm25.f10_f10.ICLM45BGCCROP.goldbach_nag.clm-reduceOutput
+ (runs out of wall-clock time, and we have sufficient test coverage of
+ that configuration)
+M components/clm/cimetest/testlist_clm.xml
+
+
+CLM testing:
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r114
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r114
+Originator(s): sacks (Bill Sacks)
+Date: Fri Jul 10 19:34:57 MDT 2015
+One-line Summary: Update cime external, remove genf90-generated files
+
+Purpose of changes:
+
+ Main purpose is to update the cime external to the version in cesm1_4_beta05.
+
+ This also required updating the unit test build to use genf90 during the
+ build rather than relying on already-generated files.
+
+ Making this change led to some genf90'd files being regenerated in-source
+ during the unit test build, which would lead these files to be updated every
+ time we make a tag. To avoid this annoyance, I have removed the genf90'd
+ files from the repository: These are not needed any more in either the unit
+ test or system builds, and it simplifies things to remove them.
+
+ Then I added an svn:ignore property to ignore files generated by genf90
+ during the unit test build.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (cime, rtm, cism, etc.):
+
+ cime1.0.7 -> cime1.1.10
+
+ Among other things, this brings in Jay's big batch system refactor
+
+List all files eliminated:
+
+========= Remove genf90-generated files, and some scripts that were used to
+ create them (these are now created as part of the unit test or system build)
+D components/clm/src/dyn_subgrid/dynVarMod.F90
+D components/clm/src/dyn_subgrid/dynVarTimeInterpMod.F90
+D components/clm/src/dyn_subgrid/dynVarTimeUninterpMod.F90
+D components/clm/src/dyn_subgrid/do_genf90
+D components/clm/src/unit_test_stubs/utils/do_genf90
+D components/clm/src/unit_test_stubs/utils/restUtilMod_stub.F90
+D components/clm/src/unit_test_stubs/main/ncdio_var.F90
+D components/clm/src/unit_test_stubs/main/ncdio_pio_fake.F90
+D components/clm/src/unit_test_stubs/main/do_genf90
+D components/clm/src/utils/restUtilMod.F90
+D components/clm/src/main/ncdio_pio.F90
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+========= Generate files with genf90 rather than using pre-generated files
+M components/clm/src/CMakeLists.txt
+
+========= Document new, simpler method for building and running the unit tests
+ (thanks largely to new default options in run_tests.py)
+M components/clm/src/README.unit_testing
+
+========= Add svn:ignore property to ignore files generated by genf90 during the
+ unit test build
+ M components/clm/src/dyn_subgrid
+ M components/clm/src/unit_test_stubs/utils
+ M components/clm/src/unit_test_stubs/main
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r112
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: ALL
+ - what platforms/compilers: intel
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ roundoff, according to cime documentation
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A: Trusting Sean Santos's cime documentation.
+
+ These changes were likely due to this change in cime:
+
+ commit 0d7eab6bd112565ba9eb6eb82b74127ae5a5f390
+ Author: Sean Patrick Santos
+ Date: Fri May 15 12:35:31 2015 -0600
+
+ Use our native gamma/erf implementations on Intel
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r113
+Originator(s): sacks (Bill Sacks)
+Date: Thu Jul 9 10:01:13 MDT 2015
+One-line Summary: Support backwards compatibility of restart variable names
+
+Purpose of changes:
+
+ Previously, if a restart variable was renamed, backwards compatibility was
+ implemented in an ad-hoc manner. A key point is taht none of these ad-hoc
+ solutions allowed backwards compatibility when running
+ initInterp. (initInterp would just skip any variable if it could not find an
+ exact match on the input [template] file.)
+
+ This tag provides a standard mechanism for putting in place backwards
+ compatibility when renaming a restart variable. This backwards compatibility
+ carries over to initInterp, by communicating the necessary metadata through a
+ new attribute on the restart file: 'varnames_on_old_files'.
+
+ In order to use this new mechanism, give a colon-delimited list of variable
+ names in the varname argument to restartvar. For example, if a restart
+ variable FOO has been renamed to BAR, then specify varname='BAR:FOO'. Note
+ that this list is searched in order, and the first item should be the current
+ restart variable name.
+
+ Also, applied this new mechanism to the recently-added LIQCAN
+ variable. Previously, backwards compatibility of this variable was handled in
+ an ad-hoc manner, which did not work when running initInterp.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Erik
+
+List any svn externals directories updated (cime, rtm, cism, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do:
+
+========= Add module for higher-level netcdf utilities. Currently contains
+ routine for finding a variable on a netcdf file from a list of
+ possible variables. Also add unit tests for this routine.
+A components/clm/src/main/ncdio_utils.F90
+A components/clm/src/main/test/ncdio_utils_test/test_ncdio_utils.pf
+A components/clm/src/main/test/ncdio_utils_test/CMakeLists.txt
+A components/clm/src/main/test/ncdio_utils_test
+
+List all existing files that have been modified, and describe the changes:
+
+========= Allow multiple possible names in reading restart files and in reading
+ the 'input' file in initInterp
+M components/clm/src/utils/restUtilMod.F90.in
+M components/clm/src/utils/restUtilMod.F90
+M components/clm/src/main/initInterp.F90
+
+========= Apply new mechanism to recently-added LIQCAN restart field. Also
+ remove redundant setting of snocan_patch to 0 if it isn't found on the
+ restart file - not needed since initCold is always called.
+M components/clm/src/biogeophys/WaterStateType.F90
+
+========= Changes to support unit testing of ncdio_utils
+M components/clm/src/main/CMakeLists.txt
+M components/clm/src/main/test/CMakeLists.txt
+M components/clm/src/unit_test_stubs/main/ncdio_pio_fake.F90.in
+M components/clm/src/unit_test_stubs/main/ncdio_pio_fake.F90
+
+========= Fix path to genf90 for new cime organization
+M components/clm/src/unit_test_stubs/main/do_genf90
+M components/clm/src/unit_test_stubs/main/ncdio_var.F90
+
+CLM testing:
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r112
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r112
+Originator(s): oleson (Keith Oleson,UCAR/TSS,303-497-1332)
+Date: Wed Jul 1 10:14:11 MDT 2015
+One-line Summary: Justin Perket snow on vegetation
+
+Purpose of changes: Incorporate Justin Perket's snow on vegetation changes
+
+Requirements for tag:
+
+Test level of tag: regular, build-namelist, unit_tests
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: Add snowveg_flag item
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Keith Oleson, Justin Perket, Erik Kluzek
+
+List any svn externals directories updated (cime, rtm, cism, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+M components/clm/src/README.unit_testing Add note that instructions are for bash shell
+M components/clm/src/biogeophys/CanopyFluxesMod.F90 snow/liq on vegetation
+M components/clm/src/biogeophys/WaterStateType.F90 history/restart handling for snow/liq on vegetation
+M components/clm/src/biogeophys/BalanceCheckMod.F90 line spaces only
+M components/clm/src/biogeophys/WaterfluxType.F90 history handling for snow on vegetation
+M components/clm/src/biogeophys/CanopyHydrologyMod.F90 snow/liq on vegetation and snowveg_flag handling
+M components/clm/src/biogeophys/SurfaceAlbedoMod.F90 snow on vegetation optical properties
+M components/clm/src/main/controlMod.F90 line spaces only
+M components/clm/bld/test_build_namelist/t/input/namelist_defaults_clm4_5_test.xml snowveg_flag handling
+M components/clm/bld/namelist_files/namelist_defaults_clm4_5.xml snowveg_flag handling
+M components/clm/bld/namelist_files/namelist_definition_clm4_5.xml snowveg_flag handling
+M components/clm/bld/CLMBuildNamelist.pm snowveg_flag handling
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok
+ All CLM45 and CLM50 tests have namelist differences; this is expected due
+ to addition of new namelist item
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r111
+
+Changes answers relative to baseline: Yes, for CLM50
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM50
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ New climate. See Justin Perket (perketj@umich.edu)
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? NA
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: NA
+
+ URL for LMWG diagnostics output used to validate new climate: NA
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r111
+Originator(s): sacks (Bill Sacks)
+Date: Fri Jun 12 20:19:25 MDT 2015
+One-line Summary: Remove temporary hack to get bfb results in InitSnowLayers
+
+Purpose of changes:
+
+ In order to get bit-for-bit results in clm4_5_1_r110 (relative to r109), we
+ put in place a temporary hack in InitSnowLayers that set dz based on the old
+ equations rather than the new, more general ones - thus avoiding
+ roundoff-level changes. This looked like:
+
+ if (abs(dz(c,0)-3.59_r8) < eps) then ! TODO remove
+ col%dz(c, 0) = snow_depth(c)-col%dz(c,-4)-col%dz(c,-3)-col%dz(c,-2)-col%dz(c,-1)
+ if (abs(dz(c,0)-3.59_r8) > eps) &
+ call endrun(msg=errmsg(__FILE__, __LINE__))
+ endif
+
+ This tag removes that temporary hack.
+
+ In this way, we have separated the answer-changing from non-answer-changing
+ parts of the r110 refactor. Note that the above code confirms that the
+ differences are no larger than roundoff (eps was 1e-9 in the above case, but
+ some tests showed that it could have been much smaller - e.g., ~ 1e-15).
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (cime, rtm, cism, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M components/clm/src/biogeophys/SnowHydrologyMod.F90
+
+CLM testing:
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r110
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM4.5 / CLM5 cold start
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ roundoff
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ See above code sample, which confirms that the changes were no greater
+ than roundoff-level.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r110
+Originator(s): sacks (Bill Sacks)
+Date: Fri Jun 12 15:30:11 MDT 2015
+One-line Summary: Add flexibility to have more snow layers
+
+Purpose of changes:
+
+ Generalize snow code so that it no longer assumes 5 snow layers. Instead,
+ make the number of snow layers (and the maximum SWE in the snow pack) a
+ runtime parameter, allowing 3 - 12 snow layers.
+
+ Most changes were made by Leo van Kampenhout (l.vankampenhout@uu.nl).
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ - 2182 (possible threading issue with optimized pgi builds)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist:
+ - new namelist parameters: nlevsno, h2osno_max
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (cime, rtm, cism, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do:
+
+========= Add tests with different number of snow layers
+A components/clm/cimetest/testmods_dirs/clm/snowlayers_12/user_nl_clm
+A components/clm/cimetest/testmods_dirs/clm/snowlayers_12/include_user_mods
+A components/clm/cimetest/testmods_dirs/clm/snowlayers_12
+A components/clm/cimetest/testmods_dirs/clm/snowlayers_3_monthly/user_nl_clm
+A components/clm/cimetest/testmods_dirs/clm/snowlayers_3_monthly/include_user_mods
+A components/clm/cimetest/testmods_dirs/clm/snowlayers_3_monthly
+
+========= Add unit tests for snow pack initialization (note: these were added
+ mainly to facilitate debugging InitSnowLayers; since this routine is
+ only used in cold-start, these are not critical unit tests, and can be
+ removed if the maintenance cost proves too high)
+A components/clm/src/biogeophys/test/SnowHydrology_test/CMakeLists.txt
+A components/clm/src/biogeophys/test/SnowHydrology_test/README
+A components/clm/src/biogeophys/test/SnowHydrology_test/test_SnowHydrology.pf
+A components/clm/src/biogeophys/test/SnowHydrology_test
+
+List all existing files that have been modified, and describe the changes:
+
+========= Major rework to remove assumption of 5 snow layers - instead allow
+ runtime-setable number of snow layers, between 3 and 12.
+ Also, clean up white space throughout file, and add mode/indentation
+ emacs line.
+M components/clm/src/biogeophys/SnowHydrologyMod.F90
+
+========= Minor changes to remove assumption of 5 snow layers
+M components/clm/src/biogeophys/SurfaceRadiationMod.F90
+M components/clm/src/biogeophys/SoilTemperatureMod.F90
+ - also: remove unused variables, fix array argument declarations to
+ conform to conventions
+M components/clm/src/main/initVerticalMod.F90
+ - also: clean up some white space
+
+========= Add namelist variables to control number of snow layers and maximum SWE
+M components/clm/src/main/clm_varcon.F90
+M components/clm/src/main/clm_varpar.F90
+M components/clm/src/main/controlMod.F90
+M components/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+M components/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M components/clm/bld/CLMBuildNamelist.pm
+
+========= Minor changes needed for unit testing of SnowHydrologyMod
+M components/clm/src/biogeophys/CMakeLists.txt
+M components/clm/src/biogeophys/test/CMakeLists.txt
+M components/clm/src/biogeophys/SnowSnicarMod.F90
+M components/clm/src/biogeophys/SnowHydrologyMod.F90
+M components/clm/src/biogeophys/AerosolMod.F90
+M components/clm/src/main/CMakeLists.txt
+M components/clm/src/unit_test_stubs/main/histFileMod_stub.F90
+
+========= Add tests with different number of snow layers
+M components/clm/cimetest/testlist_clm.xml
+
+========= Unrelated change: remove unused variables in associate statements
+M components/clm/src/biogeophys/SoilFluxesMod.F90
+
+========= Remove a test that now passes (hooray for weird compiler bugs!)
+M components/clm/cimetest/ExpectedTestFails.xml
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok
+ All CLM45 and CLM50 tests have namelist differences; this is expected due
+ to addition of 2 new namelist items.
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r109
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r109
+Originator(s): sacks (Bill Sacks)
+Date: Sat Jun 6 06:12:02 MDT 2015
+One-line Summary: Fix bug in DivideSnowLayers
+
+Purpose of changes:
+
+ Fix bug in DivideSnowLayers. Leo van Kampenhout (l.vankampenhout@uu.nl)
+ discovered the bug and determined how to fix it. He found this bug in the
+ course of refactoring this routine to introduce loops; without this bug fix,
+ answers differed with his new logic that removes duplication.
+
+ Specifically: Logic using many IF-statements is employed to see whether or
+ not a layer may be subdivided, depending on the layer thickness. Currently,
+ the test for subdividing the BOTTOM layer are only reachable when the layer
+ above it was also too thick. As it turns out, this is faulty as a situation
+ can arise where the bottom layers grows even though the layer above it was
+ not divided, i.e. dumped mass to it. The current understanding is that this
+ happens through meltwater percolation (liquid h2o is translated to thickness
+ as well).
+
+ Note that the indentation has not been appropriately corrected, this is because this
+ fix is only temporary (less cluttered logic will be implemented next).
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 2183 (incorrect logic for sub-dividing bottom
+snow layer in DivideSnowLayers)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Mark Flanner
+
+List any svn externals directories updated (cime, rtm, cism, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+========= Bug fix, described above
+M components/clm/src/biogeophys/SnowHydrologyMod.F90
+
+========= Remove failures from here, now that we're using the file in cimetest
+M components/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+========= Change a failure type from FAIL to RUN (presumably due to new test
+ reporting)
+M components/clm/cimetest/ExpectedTestFails.xml
+
+CLM testing:
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r108
+
+Changes answers relative to baseline: YES
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All clm4.5 and clm5
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ Larger than roundoff. While not investigated carefully, Leo showed that
+ the impacts are relatively small, so this is believed NOT to be
+ climate-changing.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r108
+Originator(s): andre (Benjamin Andre,UCAR/CSEG,303-497-1391)
+Date: Fri May 29 15:14:26 MDT 2015
+One-line Summary: Crop changes from Sam Levis
+
+Purpose of changes: Crop model changes from Sam Levis. Increases the number of
+crops to 64, with 78 total pfts. Requires new parameters file, surface dataset,
+and land use timeseries files.
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 2146, 2155
+
+Known bugs (include bugzilla ID): 2180, 2182
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: n/a
+
+Describe any changes made to the namelist: n/a
+
+List any changes to the defaults for the boundary datasets:
+ Regenerate surface data sets and land use timeseries to increase the number of
+ pfts and crops with data from Levis. New raw datasets:
+ rawdata/pftlanduse.3minx3min.simyr2000.c110913/mksrf_78pft_landuse_rc2000_c130927.nc
+ rawdata/pftlandusedyn.0.5x0.5.simyr1850-2005.c090630/mksrf_78pft_landuse_rc2000_c150130.nc
+
+Describe any substantial timing or memory changes:
+ Increased memory usage for all crop runs. Exact consequences requires further study.
+
+Code reviewed by: andre, levis
+
+List any svn externals directories updated (cime, rtm, cism, etc.): n/a
+
+List all files eliminated: n/a
+
+List all files added and what they do:
+ components/clm/cimetest/ExpectedTestFails.xml - new expected fails file for upcoming cime xfail integration
+ components/clm/tools/clm4_5/mksurfdata_map/Makefile.data - automate generating all surface data sets
+
+List all existing files that have been modified, and describe the changes:
+ clm/bld/CLMBuildNamelist.pm - increase max pft, add info to error message, fix quoted empty string processing (bug 2146)
+ clm/bld/namelist_files/namelist_defaults_clm4_5.xml - point to new datasets
+ clm/bld/namelist_files/namelist_defaults_clm4_5_tools.xml - update rawdata sets
+ clm/bld/namelist_files/namelist_definition_clm4_5.xml - update sim year range to avoid special cases for testing
+ clm/bld/test_build_namelist/t/test_do_harvest.pm - update test (bug 2146)
+ clm/bld/test_build_namelist/t/test_do_transient_crops.pm - update test (bug 2146)
+ clm/bld/test_build_namelist/t/test_do_transient_pfts.pm - update test (bug 2146)
+
+ clm/cimetest/testmods_dirs/clm/crop_trans_f10/user_nl_clm - point to new datafiles
+ clm/cimetest/testmods_dirs/clm/crop_trans_sville/user_nl_clm - point to new datafiles
+
+ clm/src/biogeophys/WaterStateType.F90 - workaround for pgi compiler bug
+
+ clm/src/biogeochem/CNNDynamicsMod.F90 - new crop model
+ clm/src/biogeochem/CNPhenologyMod.F90
+ clm/src/biogeochem/CNVegStructUpdateMod.F90
+ clm/src/biogeochem/CropType.F90
+ clm/src/biogeochem/NutrientCompetitionCLM45defaultMod.F90
+ clm/src/biogeochem/VOCEmissionMod.F90
+ clm/src/biogeophys/CanopyFluxesMod.F90
+ clm/src/biogeophys/PhotosynthesisMod.F90
+ clm/src/main/PatchType.F90
+ clm/src/main/clm_varpar.F90
+ clm/src/main/pftconMod.F90
+ clm/src/main/subgridRestMod.F90
+ clm/src/main/surfrdMod.F90
+
+ clm/tools/clm4_5/mksurfdata_map/README
+ clm/tools/clm4_5/mksurfdata_map/mksurfdata.pl - move file writes into functions. write to __dataset_name__.namelist
+ clm/tools/clm4_5/mksurfdata_map/src/mklaiMod.F90 - update for new crops
+ clm/tools/clm4_5/mksurfdata_map/src/mkpftConstantsMod.F90
+ clm/tools/clm4_5/mksurfdata_map/src/mkpftMod.F90 - update for new crops, fix bug 2155.
+
+ clm/tools/clm4_5/mksurfdata_map/src/mksurfdat.F90 - work around for an issue causing abort during urban dataset generation
+ clm/tools/clm4_5/mksurfdata_map/src/mkurbanparMod.F90
+
+
+CLM testing: regular
+
+ build-namelist tests:
+
+ yellowstone unit tests - ok
+
+ unit-tests (components/clm/src):
+
+ yellowstone_intel - ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ - yellowstone aux clm40 intel - ok
+ - yellowstone aux clm40 pgi - ok
+ - yellowstone aux clm45 intel - ok
+ - all namelist fail - new datasets and parameters
+ - crop - new crop model - baseline failures expected
+ - ed - new parameters file - baseline failures expected, ok'd by rfisher
+ - ERP_D_Ld5.f19_g16.ICLM45GLCMEC.yellowstone_intel.clm-glcMEC_changeFlags - baseline compare fails, roundoff level, unknown cause, ok'd by sacks
+ - ERP_E_Ld5.f19_g16.I1850CRUCLM45CN.yellowstone_intel.clm-default - baseline compare fails, only occurs with esmf - ok'd by mvertens
+ - yellowstone aux clm45 gnu - ok
+ - all namelist fail - new datasets and parameters
+ - crop - new crop model - baseline failures expected
+ - yellowstone aux clm45 pgi -
+ - all namelist fail - new datasets and parameters
+ - crop - new crop model - baseline failures expected
+ - ed - new parameters file - baseline failures expected, ok'd by rfisher
+ - ERI_D_Ld9.f19_g16.I1850CLM45CN.yellowstone_pgi - roundoff in cpl baseline
+ - ERI_D_Ld9.f19_g16.I1850CLM45CN.yellowstone_pgi.clm-drydepnomegan - roundoff in baseline
+ - ERP_P15x2_Lm13.f10_f10.IHISTCLM45BGC.yellowstone_pgi.clm-monthly - new xfail, #2182, dies at runtime in optimized, threaded pgi builds only.
+ - yellowstone mksurfdata_map unit_testers - ok
+ - yellowstone tools tests - ok, known issues with PTCLMmkdata (bug 2180)
+
+CLM tag used for the baseline comparisons: clm4_5_1_r106 (bit for bit with clm4_5_1_r107)
+
+Changes answers relative to baseline: yes
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: crop, ed
+ - what platforms/compilers: all
+ - nature of change : answer changes updated crop model. approved by levis.
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - simulations with all pfts everywhere were run with merged code. Levis compared current runs with his archived runs.
+
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r107
+Originator(s): andre (Benjamin Andre,UCAR/CSEG,303-497-1391)
+Date: Tue May 19 10:05:49 MDT 2015
+One-line Summary: Update externals to use github version of cime1.0.7.
+
+Purpose of changes: Switch the cime external from using svn to github. Update cime to cime1.0.7.
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self
+
+List any svn externals directories updated (cime, rtm, cism, etc.): cime 1.0.7
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes: N/A
+
+CLM testing: regular
+
+ build-namelist tests:
+ unit-tests: ok
+ system-tests: not run
+
+ unit-tests (models/lnd/clm/src):
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel 40 - ok; 45 - ok
+ yellowstone_pgi 40 - ok; 45 - ok
+ yellowstone_gnu (clm45 only) 45 - ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r106
+
+Changes answers relative to baseline: none
+
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r106
+Originator(s): erik/fvitt
+Date: Thu May 14 13:22:51 MDT 2015
+One-line Summary: Fix CO2 forcing for MEGAN
+
+Purpose of changes:
+
+Bring in changes from Francis Vitt, and Louisa Emmons to correct CO2 forcing
+for MEGAN and dry-deposition. Previously, the fixed value of CO2 was being used
+rather than using the CO2 forcing sent in from the atmosphere model.
+
+Also fix some issues with clm4_0 code where some urban diagnostic fields have
+a different fill-value pattern on restart from startup. Fill-value is now only
+set over non-land, and areas without urban, are set to zero.
+
++M models/lnd/clm/src_clm40/biogeochem/VOCEmissionMod.F90
++ - use time-dependent atmospheric CO2 concentrations rather than
++ the CCSM_CO2_PPMV constant value
++
++M models/lnd/clm/src_clm40/biogeochem/DryDepVelocity.F90
++ - science updates and bug fixes provided by Maria Val Martin
++
++M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90
++ - use time-dependent atmospheric CO2 concentrations rather than
++ the CCSM_CO2_PPMV constant value
++ - use 10-day average of LAI rather than 1-day average
++
++M models/lnd/clm/src/biogeochem/DryDepVelocity.F90
++ - science updates and bug fixes provided by Maria Val Martin
++
++M models/lnd/clm/src/biogeophys/CanopyStateType.F90
++ - get 10-day average of LAI rather than 1-day average
+
+Requirements for tag: Fix 2177 and some 2165 clm40 tests
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 2177 (MEGAN improperly uses constant CO2 rather than time varying)
+ 2176 (ED doesn't work with MEGAN -- partial just turn MEGAN off when ED on)
+ 2165 (some clm40 tests have history files differ on restart in urban fillvalue)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: Turn MEGAN off when ED on
+ Have build-namelist make sure MEGAN is off when ED is on
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self,fvitt,simone,emmons
+
+List any svn externals directories updated (cime, rtm, cism, etc.): cime, cism
+ cime up to cime0_3_21 (bring in optional orbital calculation)
+ cism up to cism2_0_09 (just bring the branch to the trunk)
+
+List all files eliminated: None
+
+List all files added and what they do: Turn MEGAN off for ED tests
+
+A components/clm/cimetest/testmods_dirs/clm/edTest/shell_commands
+
+List all existing files that have been modified, and describe the changes:
+
+ M components/clm/src_clm40/biogeophys/UrbanInitMod.F90 -- Initialize to
+ zero over land
+ M components/clm/src_clm40/main/clm_initializeMod.F90 --- Call urbanInit
+ before reading restart files
+ M components/clm/src_clm40/main/accFldsMod.F90 ---------- Change running mean
+ from 1 day to 10 days
+ M components/clm/src_clm40/main/clmtypeInitMod.F90 ------ Initilize to spval
+
+ M components/clm/src_clm40/biogeochem/VOCEmissionMod.F90
++ - use time-dependent atmospheric CO2 concentrations rather than
++ the CCSM_CO2_PPMV constant value
++ - use 10-day average of LAI rather than 1-day average
+ M components/clm/src_clm40/biogeochem/DryDepVelocity.F90
++ - science updates and bug fixes provided by Maria Val Martin
+
+ M components/clm/src/biogeochem/VOCEmissionMod.F90
++ - use time-dependent atmospheric CO2 concentrations rather than
++ the CCSM_CO2_PPMV constant value
++ - use 10-day average of LAI rather than 1-day average
+ M components/clm/src/biogeochem/DryDepVelocity.F90
++ - science updates and bug fixes provided by Maria Val Martin
+ M components/clm/src/biogeophys/CanopyStateType.F90
++ - get 10-day average of LAI rather than 1-day average
+
+ M README_cime --- Update documentation
+
+ M components/clm/bld/CLMBuildNamelist.pm -- Check that MEGAN off when ED on
+ M components/clm/bld/unit_testers/build-namelist_test.pl - Add check for
+ MEGAN off when ED on
+ M components/clm/bld/clm.buildnml --- only copy drv_flds_in over if it
+ was actually created.
+
+ M components/clm/tools/README ---- Have documentation point to gen_domain
+ under cime/tools/mapping
+
+CLM testing: regular
+
+ build-namelist tests:
+
+ yellowstone yes
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone yes
+
+ regular tests (aux_clm40, aux_clm45):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ yellowstone_gnu yes
+ goldbach_nag yes
+ goldbach_intel yes
+ goldbach_pgi yes
+
+CLM tag used for the baseline comparisons: clm4_5_1_r105
+
+Changes answers relative to baseline: Yes -- MEGAN diagnostic fields only!
+ as well as dry-deposition because of science update
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All with MEGAN on
+ - what platforms/compilers: All
+ - nature of change: Diagnostic fields change
+
+ VOC emissions change
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r105
+Originator(s): erik (Erik)
+Date: Thu Apr 16 13:23:19 MDT 2015
+One-line Summary: Move test lists to beneath active components, change build scripts from cshell
+ to perl, move to new cime directory structure
+
+Purpose of changes:
+
+ * Move CESM test lists from under scripts to under active components.
+ Now clm and rtm have their own CESM test lists under their "cimetest" directory.
+ * Change build scripts from cshell to perl
+ cshell is buggy with arbitrary problems with line lengths and number of arguments
+ cshell doesn't allow long scripts to be broken up into subroutine -- perl does
+:::::::::: Get unit-testing working with directory structure change
+ M src/CMakeLists.txt
+ M src/README.unit_testing
+
+:::::::::: Get tools testing working with directory structure change
+ M test/tools/TBLtools.sh
+ M test/tools/TSMCFGtools.sh
+ M test/tools/TSMscript_tools.sh
+ M test/tools/TCBCFGtools.sh
+ M test/tools/TCBscripttools.sh
+ M test/tools/TSMncl_tools.sh
+ M test/tools/TBLCFGtools.sh
+ M test/tools/TSMtools.sh
+ M test/tools/TBLscript_tools.sh
+ M test/tools/TCBtools.sh
+ M test/tools/test_driver.sh
+
+:::::::::: Get tools working with directory structure change
+ M tools/clm4_0/mksurfdata_map/mksurfdata.pl
+ M tools/clm4_5/mksurfdata_map/mksurfdata.pl
+ M tools/shared/ncl_scripts/getco2_historical.ncl
+
+:::::::::: Updates to build
+ M bld/CLMBuildNamelist.pm
+ M bld/configure
+ M bld/queryDefaultNamelist.pl
+ M bld/listDefaultNamelist.pl
+ M bld/unit_testers/xFail/wrapClmTests.pl
+ M bld/unit_testers/xFail/expectedFail.pm
+ M bld/test_build_namelist/test_build_namelist.pl
+ M bld/namelist_files/checkmapfiles.ncl
+ M bld/namelist_files/namelist_definition.xsl
+
+:::::::::: Updates to documentation with new directory structure
+ M doc/README
+ M doc/UsersGuide/co2_streams.txt
+ M doc/Quickstart.userdatasets
+ M doc/Quickstart.GUIDE
+ M doc/KnownLimitations
+ M tools/README
+ M tools/README.filecopies
+ M tools/clm4_0/interpinic/README
+ M tools/clm4_0/mksurfdata_map/README
+ M tools/clm4_5/refactorTools/associate/README
+ M tools/clm4_5/refactorTools/clmType/README
+ M tools/clm4_5/mksurfdata_map/README
+ M tools/shared/mkmapgrids/README
+ M tools/shared/mkmapdata/README
+ M tools/shared/mkprocdata_map/README
+ M tools/shared/ncl_scripts/README
+ M tools/README.testing
+ M bld/README
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone yes
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone yes
+
+ regular tests (aux_clm40, aux_clm45, and aux_clm_short):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ yellowstone_gnu yes
+ goldbach_nag yes
+ goldbach_intel yes
+ goldbach_pgi yes
+
+CLM tag used for the baseline comparisons: clm4_5_1_r104
+
+Changes answers relative to baseline:
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r104
+Originator(s): erik (Erik)
+Date: Tue Jan 27 11:22:48 MST 2015
+One-line Summary: Update externals to latest cesm beta tag + bring in shared build for clm4_5/clm5_0 for testing
+
+Purpose of changes:
+
+* Update externals to cesm1_3_beta15+ shared clm4_5/clm5_0 library build for testing.
+* Fix BG1850CN @ f09 by changing fglcmask (Bill Sacks)
+* Update more prealpha/prebeta tests to test with clm4_5
+* Create datasets for clm4_5 at ne16 and ne120 resolution
+ (for ne120 create rcp8.5 and rcp4.5 transient datasets)
+ M models/lnd/clm/src/cpl/lnd_comp_mct.F90 ----------- Add only for lnd_import_export use statement
+
+------------ Change so sample subsetting uses the high resolution datasets
+ M models/lnd/clm/tools/shared/ncl_scripts/README.getregional
+ M models/lnd/clm/tools/shared/ncl_scripts/sample_inlist
+ M models/lnd/clm/tools/shared/ncl_scripts/sample_outlist
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone yes
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone yes
+
+ tools testing:
+
+ yellowstone yes
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ yellowstone_gnu (clm45 only) yes
+ goldbach_nag yes
+ goldbach_intel yes
+ goldbach_pgi yes
+
+CLM tag used for the baseline comparisons: clm4_5_1_r103
+
+Changes answers relative to baseline: YES!
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: all
+ - what platforms/compilers: time-change all, roundoff-intel
+ - nature of change: roundoff
+
+ Small change in driver changes time-stamps on history files by roundoff (drvseq5_1_05).
+ Normal cprnc comparison then does NOT compare fields and calls files different. Changes
+ in the intel build (on yellowstone) change answers to roundoff for intel on yellowstone
+ (Machines update between Machines_141125 and Machines_150106a causes answers to change)
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r103
+Originator(s): sacks (Bill Sacks)
+Date: Thu Jan 1 06:15:57 MST 2015
+One-line Summary: enable transient crops
+
+Purpose of changes:
+
+(1) Allow transient crops! Note that carbon and nitrogen conservation still is
+ not done, but this at least allows crop areas to evolve in time.
+
+(2) Add control flags for which pieces of the transient dynamics should be done:
+ transient natural PFTs, transient crops, and/or harvest.
+
+(3) Reworked both source code and unit tests to be able to use the true CLM time
+ manager in unit tests rather than a stub version. Also added functionality
+ to time_info_type to be able to take the date from the end of the current
+ time step or the beginning of the time step. This flexibility was needed
+ because: (a) for crops, with an annual update, I wanted the update time to
+ be consistent with the glacier update time: the first time step after
+ crossing the year boundary (so take time from the start of the time step);
+ (b) for transient PFTs and harvest, for consistency with what was being done
+ before, we need to take the time from the end of the time step.
+
+(4) Make CNBalanceCheck more modular and object-oriented. Also, bypass the
+ balance check for newly-active columns, which is needed to avoid balance
+ check errors with transient crops.
+
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: New control flags that control which
+aspects of transient subgrid dynamics (and harvest) are turned on/off. This lets
+you turn on/off transient natural PFTs, transient crops, and/or harvest
+independently.
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: some pieces reviewed by Erik
+
+ scripts: append_nl_value_n03_scripts4_141201 -> append_nl_value_n07_scripts4_141201
+ esmf_wrf_timemgr: esmf_wrf_timemgr_141028 -> esmf_wrf_timemgr_141217
+
+List all files eliminated:
+
+========= No longer use stub time manager - use true time manager instead
+D models/lnd/clm/src/unit_test_stubs/utils/clm_time_manager_stub.F90
+
+List all files added and what they do:
+
+========= Add transient crops
+A models/lnd/clm/src/dyn_subgrid/dyncropFileMod.F90
+
+========= Reads and stores namelist items controlling transient dynamics. This
+ allows turning off select pieces of the transient subgrid behavior.
+A models/lnd/clm/src/dyn_subgrid/dynSubgridControlMod.F90
+
+========= Add unit test utilities that wrap the clm time manager
+A models/lnd/clm/src/unit_test_shr/unittestTimeManagerMod.F90
+
+========= Start adding unit tests for the clm time manager
+A models/lnd/clm/src/utils/test/clm_time_manager_test/test_clm_time_manager.pf
+A models/lnd/clm/src/utils/test/clm_time_manager_test/CMakeLists.txt
+A models/lnd/clm/src/utils/test/clm_time_manager_test
+A models/lnd/clm/src/utils/test/CMakeLists.txt
+A models/lnd/clm/src/utils/test
+
+========= Test logic for new control flags
+A models/lnd/clm/bld/test_build_namelist/t/test_do_transient_pfts.pm
+A models/lnd/clm/bld/test_build_namelist/t/test_do_harvest.pm
+A models/lnd/clm/bld/test_build_namelist/t/test_do_transient_crops.pm
+
+List all existing files that have been modified, and describe the changes:
+
+========= Reworked both source code and unit tests to be able to use the true
+ CLM time manager in unit tests rather than a stub version. Also added
+ functionality to time_info_type to be able to take the date from the
+ end of the current time step or the beginning of the time step. Note
+ that some unit test builds now need to link against the
+ esmf_wrf_timemgr library, if they use the time manager either directly
+ or indirectly.
+M models/lnd/clm/src/utils/clm_time_manager.F90
+M models/lnd/clm/src/dyn_subgrid/dynTimeInfoMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynVarTimeInterpMod.F90.in
+M models/lnd/clm/src/dyn_subgrid/dynVarTimeInterpMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynFileMod.F90
+M models/lnd/clm/src/unit_test_stubs/dyn_subgrid/dynFileMod_stub.F90
+M models/lnd/clm/src/unit_test_stubs/utils/CMakeLists.txt
+M models/lnd/clm/src/CMakeLists.txt
+M models/lnd/clm/src/unit_test_shr/CMakeLists.txt
+M models/lnd/clm/src/dyn_subgrid/test/dynTimeInfo_test/test_dynTimeInfo.pf
+M models/lnd/clm/src/dyn_subgrid/test/dynTimeInfo_test/CMakeLists.txt
+M models/lnd/clm/src/dyn_subgrid/test/dynVar_test/test_dynVarShared.F90
+M models/lnd/clm/src/dyn_subgrid/test/dynVar_test/test_dynVarTimeInterp.pf
+M models/lnd/clm/src/dyn_subgrid/test/dynVar_test/test_dynVarTimeUninterp.pf
+M models/lnd/clm/src/dyn_subgrid/test/dynVar_test/CMakeLists.txt
+M models/lnd/clm/src/biogeophys/test/Daylength_test/CMakeLists.txt
+M models/lnd/clm/src/biogeophys/test/Irrigation_test/CMakeLists.txt
+M models/lnd/clm/src/utils/CMakeLists.txt
+
+========= Changes related to new control flags, as well as the rework of the
+ time_info%set_current_year interface
+M models/lnd/clm/src/dyn_subgrid/dynpftFileMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynHarvestMod.F90
+
+========= Changes related to new control flags and addition of transient crops
+M models/lnd/clm/src/dyn_subgrid/dynSubgridDriverMod.F90
+
+========= Moved flanduse_timeseries and other control flags into dynSubgridControlMod
+M models/lnd/clm/src/main/clm_varctl.F90
+M models/lnd/clm/src/main/controlMod.F90
+M models/lnd/clm/src/main/restFileMod.F90
+M models/lnd/clm/src/main/subgridRestMod.F90
+M models/lnd/clm/src/main/clm_varpar.F90
+M models/lnd/clm/src/biogeochem/CNDriverMod.F90
+M models/lnd/clm/src/biogeochem/CNFireMod.F90
+
+========= Make CNBalanceCheck more modular and object-oriented
+M models/lnd/clm/src/biogeochem/CNBalanceCheckMod.F90
+ - also, bypass balance checks for newly-active columns (needed to
+ avoid balance check errors for newly-active crop columns)
+M models/lnd/clm/src/biogeochem/CNVegNitrogenStateType.F90
+M models/lnd/clm/src/biogeochem/CNVegCarbonStateType.F90
+M models/lnd/clm/src/main/clm_instMod.F90
+M models/lnd/clm/src/main/clm_driver.F90
+ - also make alt_calc operate over inactive as well as active points
+
+========= Added new control flags
+M models/lnd/clm/bld/CLMBuildNamelist.pm
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+
+========= Make test files (more) consistent with actual files. This may not have
+ been necessary.
+M models/lnd/clm/bld/test_build_namelist/t/input/namelist_defaults_clm4_5_test.xml
+M models/lnd/clm/bld/test_build_namelist/t/input/namelist_definition_clm4_5_test.xml
+
+========= Minor changes (e.g., changes to comments and other small changes)
+M models/lnd/clm/src/unit_test_stubs/utils/spmdMod_stub.F90
+M models/lnd/clm/src/dyn_subgrid/do_genf90
+M models/lnd/clm/src/dyn_subgrid/dynVarTimeUninterpMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynVarMod.F90
+M models/lnd/clm/src/soilbiogeochem/SoilBiogeochemVerticalProfileMod.F90
+M models/lnd/clm/src/biogeophys/ActiveLayerMod.F90
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok
+
+ Note that there are differences from baseline due to new control flags
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r102
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r102
+Originator(s): sacks (Bill Sacks)
+Date: Sat Dec 27 06:52:20 MST 2014
+One-line Summary: make new input datasets to support transient crops
+
+Purpose of changes:
+
+While the main purpose of this tag was to make new input datasets to support
+transient crops, it also includes a number of reworks of mksurfdata_map, which
+were either central or tangential to this overall goal. Specifically:
+
+(1) Update mksurfdata_map to be able to generate datasets with transient
+ crops. Currently the logic uses the non-prognostic-crop raw data for the
+ transient time series: It takes the area of the generic crop from that
+ timeseries to specify the transient PCT_CROP area, and sets the PCT_CFT
+ areas based on the year-2000 areas.
+
+(2) Rewrite mksurfdata_map code that normalizes pct_pft to account for special
+ landunits. The code to handle urban was very confusing, and I hope this new
+ code is at least astep towards being less confusing. Note that this
+ introduces roundoff-level differences.
+
+(3) Introduce new mksurfdata_map utility routines: ncd_def_spatial_var and
+ ncd_put_time_slice. These encapsulate behavior that used to be duplicated in
+ the code.
+
+(4) Add mksurfdata_map unit tests using the new pfunit-based unit testing
+ framework. However, I have NOT done a full migration of the mksurfdata_map
+ unit tests. Thus, there are still some tests that use the old unit testing
+ framework that I put in place (which leveraged the test stuff that Erik set
+ up for csm_share a while ago). These tests can be migrated to pfunit
+ incrementally: as someone touches code that is under test using the old
+ framework, they could move the relevant tests into the new pfunit-based
+ framework.
+
+(5) Create new input datasets, based on the above changes:
+
+ - new flanduse_timeseries files created because I have changed the
+ information on these files
+
+ - new surface datasets created because I introduced roundoff-level changes
+ in the surface datasets, and so regenerated all surface datasets now in
+ order to save someone a headache later.
+
+ - new initial conditions files created so that out-of-the-box initial
+ conditions will be compatible with the surface datasets, according to
+ various consistency checks.
+
+ See the following files in inputdata for documentation of how the new
+ datasets were created:
+
+ lnd/clm2/surfdata_map/README_c141219
+ lnd/clm2/initdata_map/README_c141226
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist:
+- new surface datasets
+- new flanduse_timeseries files
+- new initial conditions files
+
+List any changes to the defaults for the boundary datasets:
+- new surface datasets
+- new flanduse_timeseries files
+- new initial conditions files
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Most changes sent to Erik for review, although I can't
+remember how much he actually reviewed.
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts: append_nl_value_n02_scripts4_141201 -> append_nl_value_n03_scripts4_141201
+ - point tropicAtl_subset tests to new file
+
+List all files eliminated:
+
+========= Move tests elsewhere
+D models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkpftMod.F90
+
+
+List all files added and what they do:
+
+========= New object-oriented class for storing and operating on pct_pft
+ data. Encapsulating a bunch of behavior in here allowed me to simplify
+ other code. Before this, I was keeping track of two separate
+ representations of pct_pft: First it was stored as % of grid cell,
+ then it was later converted into % of landunit together with the
+ landunit's % of grid cell. This was starting to get hard to manage,
+ because certain operations could only be done on the first
+ representation, and other operations could only be done on the second
+ representation – and at some point in the processing pipeline, the
+ conversion happened and the first representation was no longer
+ usable. Now there is a single representation, and the class allows any
+ desired operation to be performed on that single representation. This
+ adds some complexity within the class, but removes complexity from the
+ rest of the code, particularly mksurfdat.F90.
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkpctPftTypeMod.F90
+
+========= This new module contains routines that operate on both instances of
+ pct_pft_type (pctnatpft and pctcft) at once. Thus, this contains
+ higher-level logic than was appropriate for mkpctPftTypeMod. Yet, I
+ wanted these routines in a separate module from mkpftMod as an aid to
+ testing, since mkpftMod has a bunch of dependencies.
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkpftUtilsMod.F90
+
+========= Moved constants from other places into here, partly to centralize
+ them, and partly to remove problems with circular dependencies
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkpftConstantsMod.F90
+
+========= Add automated test of making a transient crop surface dataset
+A models/lnd/clm/test/tools/nl_files/mksrfdt_10x15_crp_1850-2000
+
+========= Add input file for creating a transient smallville dataset for testing
+ transient crops
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/single_point_dynpft_files/README.landuse_timeseries_smallvilleIA_hist_simyr1850-1855
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/single_point_dynpft_files/landuse_timeseries_smallvilleIA_hist_simyr1850-1855.txt
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/single_point_dynpft_files/README
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/single_point_dynpft_files
+
+========= Add pfunit-based unit tests for mksurfdata_map
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/CMakeLists.txt
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/test/mkpftUtils_test/test_adjust_total_veg_area.pf
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/test/mkpftUtils_test/CMakeLists.txt
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/test/mkpftUtils_test/test_convert_from_p2g.pf
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/test/mkpftUtils_test
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/test/mkpctPftType_test/CMakeLists.txt
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/test/mkpctPftType_test/test_mkpctPftType.pf
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/test/mkpctPftType_test
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/test/CMakeLists.txt
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/test
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/README.unit_testing
+
+
+List all existing files that have been modified, and describe the changes:
+
+========= Changes that take advantage of some of the other refactoring described
+ here, plus add logic to allow input dataset to not contain crops even
+ when generating a transient dataset for crops; also add PCT_CROP and
+ PCT_CFT on the landuse_timeseries output file
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkpftMod.F90
+
+========= Changes that take advantage of some of the other refactoring described
+ here, especially making use of the new pct_pft_type methods. Plus: (a)
+ save pctcft from the initial input file so it can be used when
+ generating landuse_timeseries, (b) remove unwanted landunit percents
+ from the landuse_timeseries file, (c) add transient PCT_CROP and
+ PCT_CFT, (d) complete rewrite of the code that normalizes pct_pft to
+ account for special landunits: the code to handle urban was very
+ confusing, and I hope this new code is at least a step towards being
+ less confusing [this change introduces roundoff-level differences],
+ (e) remove some error-checking code that is now embedded in the
+ pct_pft_type routines
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mksurfdat.F90
+
+========= Updated for new files
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/Srcfiles
+
+========= Added two new routines: ncd_def_spatial_var and
+ ncd_put_time_slice. These encapsulate behavior that used to be
+ duplicated in the code. Also, moved convert_latlon from mkutilsMod to
+ here, since it is really related to netcdf stuff.
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkncdio.F90
+
+========= Simplify this module significantly by using the new
+ ncd_def_spatial_var. Also change what fields are present on the
+ transient landuse file: remove some no-longer-desired fields (% of
+ special landunits, etc.).
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkfileMod.F90
+
+========= Use new ncd_def_spatial_var
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mksoilMod.F90
+
+========= Delete routines that have been moved to a more appropriate place
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkutilsMod.F90
+
+========= Remove tests from old test framework for code that I have deleted or
+ migrated to my new modules; and fix some minor errors that appeared
+ when runnng the old unit tests with gfortran.
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkchecksMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkutilsMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkgridmapMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/Srcfiles
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mksurfdata_map.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkdomainMod.F90
+
+========= Trivial changes (change 'use' statements to reflect migrated code, add
+ comments, etc.)
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mklaiMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkdomainMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkurbanparMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkgridmapMod.F90
+
+========= Add logic needed for creating transient crop datasets. The main
+ differences are (a) for crop, we create a year-2000 surface dataset
+ together with the transient dataset (rather than a year-1850 surface
+ dataset), and (b) we always use the non-crop transient raw data, even
+ when creating a transient crop dataset.
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata.pl
+
+========= New fsurdat, flanduse_timeseries and finidat files
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+========= Add automated test of making a transient crop surface dataset
+M models/lnd/clm/test/tools/input_tests_master
+M models/lnd/clm/test/tools/tests_pretag_yellowstone_nompi
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok
+
+ tools tests:
+
+ yellowstone: ok
+
+ Note that there were diffs in baseline comparisons for mksurfdata_map tests
+ (and the PTCLM test, which uses mksurfdata_map). See below for details
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r101
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: most clm4_5 runs
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ roundoff
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ Note that there were no source code changes in this tag, so the only
+ differences come from differences in datasets (fsurdat,
+ flanduse_timeseries, finidat).
+
+ I confirmed that differences in fsurdat and flanduse_timeseries are
+ generally roundoff-level. There are greater than roundoff-level diffs in
+ PCT_NAT_PFT at a small number of points, but all of these points have
+ PCT_NATVEG = 0 (this is due to a fix in how PCT_NAT_PFT is determined for
+ points with 0% vegetated landunit, and > 0% urban); this would only affect
+ dynamic landunit runs. Other than that, max normalized RMS diffs are 2e-8,
+ and most are considerbly smaller.
+
+ finidat files were created as one-offs to ensure that the only differences
+ are in the subgrid weights, arising from these surface dataset differences.
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r101
+Originator(s): sacks (Bill Sacks)
+Date: Tue Dec 9 06:27:39 MST 2014
+One-line Summary: rework cold start initialization for transient runs
+
+Purpose of changes:
+
+ (1) Do not adjust subgrid weights (or set harvest variables) in cold start
+ initialization. Instead, wait to do this until the first run step. The
+ motivation for this is (a) this is consistent with what is done for
+ glacier (for which prognostic weights aren't available until the run
+ phase), and (b) it simplifies what needs to be done in initialization,
+ particularly for transient crops (which are coming soon).
+
+ (2) Do not run the biogeophys & biogeochem dyn subgrid conservation code in
+ the first step of a cold start run. This affects the current operation of
+ glacier, and is important in conjunction with (1): this avoids doing a
+ large adjustment of physics or BGC caused by a fictitious change in area
+ in the first time step after cold start.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: mvertens reviewed conceptual changes to clm_time_manager and
+ DaylengthMod; other changes only reviewed by self
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+========= Main changes, as documented above
+M models/lnd/clm/src/main/clm_initializeMod.F90
+M models/lnd/clm/src/main/clm_varctl.F90
+M models/lnd/clm/src/dyn_subgrid/dynSubgridDriverMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynHarvestMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynConsBiogeochemMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynpftFileMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynConsBiogeophysMod.F90
+
+========= Unrelated change: Fix a possible threading bug in DaylengthMod
+ (although this would rarely cause problems: I think this would only
+ cause a problem if you started / restarted exactly on the solstice)
+M models/lnd/clm/src/utils/clm_time_manager.F90
+M models/lnd/clm/src/biogeophys/DaylengthMod.F90
+
+
+
+CLM testing:
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r100
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM4.5 cold start runs with glacier and/or
+ transient PFTs
+ - what platforms/compilers: ALL
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ Changes answers only in initialization. This shows up as changes in the
+ fields set to the coupler in initialization, and/or changes in the initial
+ history file. In offline runs (I compsets), this change does not affect
+ the simulation beyond initialization, but it is expected to change the
+ evolution of the system in coupled runs. However, again note that this
+ only affects cold start runs wth glacier and/or transient PFTs, which
+ would not be typical for production runs.
+
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r100
+Originator(s): sacks (Bill Sacks); most changes from Jim Edwards
+Date: Wed Dec 3 06:21:13 MST 2014
+One-line Summary: update pio calls to pio2 API
+
+Purpose of changes:
+
+ Update pio calls to the pio2 API, so that the transition to pio2 will be
+ seamless. Most changes were from Jim Edwards. There are also some other minor
+ changes that are unrelated to this main change, as noted below.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: sacks
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ pio: pio1_8_13 -> pio1_9_5
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+========= Main changes needed for pio2 API
+M models/lnd/clm/src_clm40/main/ncdio_pio.F90
+M models/lnd/clm/src_clm40/main/ncdio_pio.F90.in
+M models/lnd/clm/src/main/ncdio_pio.F90
+M models/lnd/clm/src/main/ncdio_pio.F90.in
+
+========= Change 2-d array to 1-d. Jim says he thinks this was also needed for
+ pio2 support.
+M models/lnd/clm/src_clm40/main/histFileMod.F90
+M models/lnd/clm/src/main/histFileMod.F90
+
+========= Unrelated change: change len to len_trim. Jim says this was needed to
+ fix a problem on some machine.
+M models/lnd/clm/src_clm40/biogeochem/MEGANFactorsMod.F90
+M models/lnd/clm/src/biogeochem/MEGANFactorsMod.F90
+
+
+CLM testing:
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r099
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r099
+Originator(s): sacks (Bill Sacks)
+Date: Tue Dec 2 15:05:09 MST 2014
+One-line Summary: add ozone stress code from Danica Lombardozzi
+
+Purpose of changes:
+
+(1) Implement ozone stress. The scientific implementation was done by Danica
+ Lombardozzi. The software reimplementation was done by Bill Sacks.
+
+(2) Fix some misc. bugs, including a restart bug that was introduced in r097.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+- 2091: some restarts not bit-for-bit starting in clm4_5_1_r097
+- 2029: Memory leak in GetGlobalValuesMod
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+- 2094: ozone code doesn't work with the PGI compiler
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: new namelist option, use_ozone
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: ozone changes reviewed by Danica Lombardozzi (she reviewed
+both the code and the changes seen due to ozone in a short simulation)
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts: append_nl_value_n02_scripts4_141129 -> append_nl_value_n02_scripts4_141201
+
+List all files eliminated:
+
+List all files added and what they do:
+
+========= Implement ozone stress. Uses polymorphism to handle ozone-on vs. ozone-off.
+A models/lnd/clm/src/biogeophys/OzoneFactoryMod.F90
+A models/lnd/clm/src/biogeophys/OzoneOffMod.F90
+A models/lnd/clm/src/biogeophys/OzoneBaseMod.F90
+A models/lnd/clm/src/biogeophys/OzoneMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+========= Calculate and apply ozone stress
+M models/lnd/clm/src/biogeophys/PhotosynthesisMod.F90
+M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/main/clm_instMod.F90
+ - also fix restart bug (bug 2091)
+M models/lnd/clm/src/main/clm_driver.F90
+
+========= Add namelist flag to turn ozone on
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/src/main/clm_varctl.F90
+M models/lnd/clm/src/main/controlMod.F90
+
+========= Fix memory leak (bug 2029)
+M models/lnd/clm/src/main/GetGlobalValuesMod.F90
+M models/lnd/clm/src_clm40/main/GetGlobalValuesMod.F90
+M models/lnd/clm/src/ED/main/EDRestVectorMod.F90
+
+========= Workarounds for compiler bugs
+M models/lnd/clm/src/biogeochem/CNDVType.F90
+M models/lnd/clm/src/biogeochem/CNDriverMod.F90
+
+========= Improve documentation comments for compiler bug workarounds
+M models/lnd/clm/src/biogeophys/IrrigationMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynHarvestMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynpftFileMod.F90
+
+========= Add allocation of Points, matching behavior of true routine
+M models/lnd/clm/src/unit_test_stubs/csm_share/mct_mod_stub.F90
+
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+ Remove these two entries that now pass:
+ restarts not bit-for-bit
+ error on hist comparison
+
+ Add this new failure:
+ compiler bug in PGI's handling of polymorphism
+
+
+CLM testing:
+
+ Note: testing was done on ozone_polymorphism_n09_clm4_5_1_r098, which was
+ before I put in place the abort if you're trying to run ozone with pgi. After
+ that, I ran two tests with pgi (one with ozone and one without), and one test
+ with intel (with ozone) in order to make sure that the abort check was put in
+ properly. After all testing was complete, I reverted accidental whitespace
+ changes in clm_initializeMod.F90 and restFileMod.F90 - I did not run any
+ additional testing after reverting those whitespace changes.
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+ Also, ran these two additional tests, with comparisons to baselines - these
+ are tests that I have replaced with new tests:
+
+ ERS_Ly5.f10_f10.ICLM45BGCCROP.yellowstone_intel.clm-irrigOn_reduceOutput
+ PET_P15x2_Ly3.f10_f10.ICLM45BGCCROP.yellowstone_pgi.clm-irrigOn_reduceOutput
+
+
+CLM tag used for the baseline comparisons: clm4_5_1_r098
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r098
+Originator(s): sacks (Bill Sacks)
+Date: Sat Nov 29 06:18:59 MST 2014
+One-line Summary: update externals to cesm1_3_beta14 or beyond
+
+Purpose of changes:
+
+ Update most externals to cesm1_3_beta14 or beyond. The one exception is mct,
+ for which I had trouble accessing the tag at the location used in beta14, so
+ I am sticking with the previous mct tag.
+
+ Some notable changes:
+
+ (1) update in intel compiler on yellowstone to intel15
+
+ (2) robust fix for number of datm streams, using Sean Santos's dynamic vector
+
+ (3) testmods reworked to use recursive testmods
+
+ (4) unit_testing, CMake & Machines updated so that unit tests now work on
+ yellowstone
+
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+-scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/refactor_koven_tags/refactor_koven_n02_scripts4_141023
++scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/append_nl_value_tags/append_nl_value_n02_scripts4_141129
+-scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_141017a
++scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_141125
+-scripts/ccsm_utils/CMake https://github.com/quantheory/CMake_Fortran_utils/tags/CMake_Fortran_utils_140715
++scripts/ccsm_utils/CMake https://github.com/quantheory/CMake_Fortran_utils/tags/CMake_Fortran_utils_141122
+-models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_17
++models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_18
+-models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_141022
++models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_141121
+-models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_140529
++models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_141028
+-models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_8_12/pio
++models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_8_13/pio
+-tools/load_balancing_tool https://svn-ccsm-models.cgd.ucar.edu/tools/load_balancing_tool/trunk_tags/load_balancing_tool_140818/
++tools/load_balancing_tool https://svn-ccsm-models.cgd.ucar.edu/tools/load_balancing_tool/trunk_tags/load_balancing_tool_141008
+-tools/pyReshaper https://subversion.ucar.edu/asap/pyReshaper/tags/v0.9.1/
++tools/pyReshaper https://proxy.subversion.ucar.edu/pubasap/pyReshaper/tags/v0.9.1
+-tools/unit_testing https://svn-ccsm-models.cgd.ucar.edu/unit_testing/trunk_tags/unit_testing_0_08
++tools/unit_testing https://svn-ccsm-models.cgd.ucar.edu/unit_testing/trunk_tags/unit_testing_0_12
+
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+========= Workaround for pgi internal compiler error
+M models/lnd/clm/src/main/clm_driver.F90
+
+========= Rework README, mainly to remove the need for using '--clean'
+M models/lnd/clm/src/README.unit_testing
+
+========= Move an xfail from goldbach to yellowstone; add xfail for ERS_Ly5 test
+ (bug 2091)
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+========= Update what machine-comiler combos we test
+M .ChangeLog_template
+
+CLM testing:
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: ok
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu (clm45 only): ok
+ goldbach_nag: ok
+
+ Other than xFails, note that the following failed:
+
+ *** presumably this failed in the previous tag, so baselines didn't exist
+ BFAIL SMS_D.1x1_mexicocityMEX.I.goldbach_nag.compare_hist.clm4_5_1_r097
+
+ *** ozone tests that won't work until an upcoming tag that brings ozone in
+ SFAIL ERS_D.f10_f10.I1850CLM45.goldbach_nag.clm-o3.GC.1128-0838.45.n
+ SFAIL PET_P15x2_Ly3.f10_f10.ICLM45BGCCROP.yellowstone_pgi.clm-irrig_o3_reduceOutput.GC.1128-0838.45.p
+ SFAIL ERS_Ly5.f10_f10.ICLM45BGCCROP.yellowstone_intel.clm-irrig_o3_reduceOutput.GC.1128-0838.45.i
+
+ Also, note that the following test failed:
+
+ FAIL ERH_D.f19_g16.I1850CLM45CN.yellowstone_pgi.clm-drydepnomegan
+
+ This appears to be a scripts problem. Since Mariana wants to do away with
+ ERH tests anyway, I just replaced this with:
+
+ PASS ERI_D.f19_g16.I1850CLM45CN.yellowstone_pgi.clm-drydepnomegan
+
+
+ Also ran the following two tests, which have been replaced with
+ (currently-failing) o3 tests:
+
+ ERS_Ly5.f10_f10.ICLM45BGCCROP.yellowstone_intel.clm-irrigOn_reduceOutput
+ PET_P15x2_Ly3.f10_f10.ICLM45BGCCROP.yellowstone_pgi.clm-irrigOn_reduceOutput
+
+ The PET test passed, but the ERS test failed (see bug 2091)
+
+CLM tag used for the baseline comparisons: clm4_5_1_r097
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: ALL
+ - what platforms/compilers: yellowstone-intel
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ NOT INVESTIGATED
+
+ These diffs are presumably due to the yellowstone-intel compiler upgrade
+ to v15.
+
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r097
+Originator(s): mvertens mvertens (Mariana Vertenstein,UCAR/CSEG,303-497-1349)
+Date: Mon Nov 24 11:06:30 MST 2014
+One-line Summary: major refactorization to introduce new soilbiogeochem data
+ types and routines that are independent of either ED or CN datatypes
+
+Purpose of changes: Major refactorization to introduce new soilbiogeochem
+ data types and permit ED and CN vegetation to be independent of each other
+ AND both work with either the same soilbiogeochem or in the future
+ potentially different soilbiogeochem modules
+
+Requirements for tag: None
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: bld/configure modified to
+ accomodate new directory structure - introduction of soilbiogeochem/
+ directory
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: Since use_cn and use_ed
+ are now mutually exclusive, CN memory is not longer allocated when ED is run
+ and the memory reduction (for f19_g16) seems to be about 50%.
+
+Code reviewed by: mvertens, muszala, sacks
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts branch is used
+ https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/refactor_koven_tags/refactor_koven_n01_scripts4_141023
+
+List all files eliminated, added and modified: (see below)
+List all files added and what they do: (see below)
+List all existing files that have been modified, and describe the changes: (see below)
+
+These are all grouped together here - since splitting the items up simply did
+not make sense in this case
+
+---------------------------------------------------
+New module where all instances are now declared (moved from clm_initializeMod)
+- all calls to instance restarts are here as well - so restFileMod
+ is greatly simplified
+---------------------------------------------------
+A models/lnd/clm/src/main/clm_instMod.F90
+
+---------------------------------------------------
+New soilbiogeochem/ directory introduced (new modules and data types)
+---------------------------------------------------
+A models/lnd/clm/src/soilbiogeochem
+
+---------------------------------------------------
+CN state and flux types split into:
+SoilBiogeoChem[Carbon|Nitrogen][State|Flux]Type and SoilBiogoechemStateType
+CNVeg[Carbon|Nitrogen][State|Flux]Type and CNVegStateType
+---------------------------------------------------
+D models/lnd/clm/src/biogeochem/CNStateType.F90
+D models/lnd/clm/src/biogeochem/CNCarbonFluxType.F90
+D models/lnd/clm/src/biogeochem/CNCarbonStateType.F90
+D models/lnd/clm/src/biogeochem/CNNitrogenFluxType.F90
+D models/lnd/clm/src/biogeochem/CNNitrogenStateType.F90
+A models/lnd/clm/src/biogeochem/CNVegStateType.F90
+A models/lnd/clm/src/biogeochem/CNVegCarbonFluxType.F90
+A models/lnd/clm/src/biogeochem/CNVegCarbonStateType.F90
+A models/lnd/clm/src/biogeochem/CNVegNitrogenStateType.F90
+A models/lnd/clm/src/biogeochem/CNVegNitrogenFluxType.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemCarbonFluxType.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemCarbonStateType.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemStateType.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemNitrogenFluxType.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemNitrogenStateType.F90
+
+---------------------------------------------------
+New modules in soilbiogeochem/ directory that are independent of CNVeg or ED types
+---------------------------------------------------
+D models/lnd/clm/src/biogeochem/CNDecompCascadeConType.F90
+D models/lnd/clm/src/biogeochem/CNNitrifDenitrifMod.F90
+D models/lnd/clm/src/biogeochem/CNVerticalProfileMod.F90
+D models/lnd/clm/src/biogeochem/CNDecompMod.F90
+D models/lnd/clm/src/biogeochem/CNAllocationMod.F90
+D models/lnd/clm/src/biogeochem/CNDecompCascadeBGCMod.F90
+D models/lnd/clm/src/biogeochem/CNDecompCascadeCNMod.F90
+D models/lnd/clm/src/biogeochem/CNSoilLittVertTranspMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemDecompCascadeConType.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemNitrifDenitrifMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemNStateUpdate1Mod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemDecompCascadeBGCMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemNLeachingMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemVerticalProfileMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemCompetitionMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemDecompCascadeCNMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemPrecisionControlMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemNitrogenUptakeMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemPotentialMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemLittVertTranspMod.F90
+A models/lnd/clm/src/soilbiogeochem/SoilBiogeochemDecompMod.F90
+
+---------------------------------------------------
+Moved CNEcosystemDynMod to CNDRiverMod
+---------------------------------------------------
+D models/lnd/clm/src/biogeochem/CNEcosystemDynMod.F90
+A models/lnd/clm/src/biogeochem/CNDriverMod.F90
+
+---------------------------------------------------
+Changes to modules in biogeochem/ directory to now use new datatypes (see above)
+---------------------------------------------------
+A models/lnd/clm/src/biogeochem/C14BompbSpikeMod.F90
+M models/lnd/clm/src/biogeochem/CNCStateUpdate2Mod.F90
+M models/lnd/clm/src/biogeochem/CNGapMortalityMod.F90
+M models/lnd/clm/src/biogeochem/CNGRespMod.F90
+M models/lnd/clm/src/biogeochem/CNNStateUpdate1Mod.F90
+M models/lnd/clm/src/biogeochem/CNBalanceCheckMod.F90
+M models/lnd/clm/src/biogeochem/CNNStateUpdate3Mod.F90
+M models/lnd/clm/src/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/biogeochem/CNDVDriverMod.F90
+M models/lnd/clm/src/biogeochem/CNMRespMod.F90
+M models/lnd/clm/src/biogeochem/MEGANFactorsMod.F90
+M models/lnd/clm/src/biogeochem/SatellitePhenologyMod.F90
+M models/lnd/clm/src/biogeochem/CNWoodProductsMod.F90
+M models/lnd/clm/src/biogeochem/CNPrecisionControlMod.F90
+M models/lnd/clm/src/biogeochem/CNCIsoFluxMod.F90
+M models/lnd/clm/src/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/biogeochem/CNDVLightMod.F90
+M models/lnd/clm/src/biogeochem/NutrientCompetitionMethodMod.F90
+M models/lnd/clm/src/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/biogeochem/ch4varcon.F90
+M models/lnd/clm/src/biogeochem/CNCStateUpdate1Mod.F90
+M models/lnd/clm/src/biogeochem/CNCStateUpdate3Mod.F90
+M models/lnd/clm/src/biogeochem/CNSharedParamsMod.F90
+M models/lnd/clm/src/biogeochem/CNDVType.F90
+M models/lnd/clm/src/biogeochem/NutrientCompetitionCLM45defaultMod.F90
+M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90
+M models/lnd/clm/src/biogeochem/CNAnnualUpdateMod.F90
+M models/lnd/clm/src/biogeochem/CNNStateUpdate2Mod.F90
+M models/lnd/clm/src/biogeochem/CropType.F90
+M models/lnd/clm/src/biogeochem/CNNDynamicsMod.F90
+M models/lnd/clm/src/biogeochem/DryDepVelocity.F90
+M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90
+M models/lnd/clm/src/biogeochem/CNDVEstablishmentMod.F90
+M models/lnd/clm/src/biogeochem/CNC14DecayMod.F90
+
+---------------------------------------------------
+Moved frictionvel_type to FrictionVelocityMod
+---------------------------------------------------
+D models/lnd/clm/src/biogeophys/FrictionVelocityType.F90
+
+---------------------------------------------------
+Moved aerosol_type to AerosolMod
+---------------------------------------------------
+D models/lnd/clm/src/biogeophys/AerosolType.F90
+
+---------------------------------------------------
+Moved photosyns_type to PhotosynthesisMod
+---------------------------------------------------
+D models/lnd/clm/src/biogeophys/PhotosynthesisType.F90
+
+---------------------------------------------------
+Moved soilstate cold start initialization to a new module
+---------------------------------------------------
+A models/lnd/clm/src/biogeophys/SoilStateInitTimeConstMod.F90
+
+---------------------------------------------------
+Moved soilhydrology time constant initialization to a new module
+---------------------------------------------------
+A models/lnd/clm/src/biogeophys/SoilHydrologyInitTimeConstMod.F90
+
+---------------------------------------------------
+ED Refactorization1
+(1) EDPhenologyMod changed to EDPhenologyType
+(2) EDBioType moved as a module type (ed_clm_type) in EDCLMLINKMod.F90
+(3) EDVecPatchType no longer needed (for now is_veg, is_bareground and wt_ed are in PatchType.F90)
+---------------------------------------------------
+D models/lnd/clm/src/ED/main/EDBioType.F90
+M models/lnd/clm/src/ED/main/EDCLMLinkMod.F90
+D models/lnd/clm/src/ED/main/EDVecPatchType.F90
+D models/lnd/clm/src/ED/biogeophys/EDPhenologyMod.F90
+A models/lnd/clm/src/ED/biogeochem/EDPhenologyType.F90
+A models/lnd/clm/src/ED/biogeochem/EDSharedParamsMod.F90
+
+---------------------------------------------------
+ED Refactorization2
+(1) Modified EDTypesMod.F90
+ Removed gridcell_edstate_type (array of pointers) and instance
+ gridcelledstate - now have the following ED types and instance
+ defined in clm_instMod.F90 and passed down in clm_initialize and clm_driver (top level)
+ type(ed_site_type), allocatable, target :: ed_allsites_inst(:)
+ type(ed_phenology_type) :: ed_phenology_inst
+ type(ed_clm_type) :: ed_clm_inst
+ so now have ed_allsites_inst which is an array of sites (at this point allocated at the
+ gridcell level - but that could easily be modified to be at some other level like the
+ column level
+(2) In EDTypesMod.F90 added method map_clmpatch_to_edpatch that
+ maps a CLM vector patch to an ED linked-list patch - there is still
+ a one to one correspondence between an ED patch and a CLM vector patch. The
+ call looks like the following
+ currentPatch => map_clmpatch_to_edpatch(ed_allsites_inst(g), p)
+
+(3) In EDTypesMod.F90 added a ED Patch type-bound method set_root_fraction that computes
+ the root fraction for an ED patch
+(4) In EDTypes.F90 eliminated the following components of userdata
+ type (site) , pointer :: firstsite_pnt => null() ! pointer to the first site in the system
+ type (cohort), pointer :: storesmallcohort => null() ! storage of the smallest cohort for insertion routine
+ type (cohort), pointer :: storebigcohort => null() ! storage of the largest cohort for insertion routine
+ These are no longer needed since the above pointers are now local variables
+ in EDCohortDynamics and EDPatchDynamics
+---------------------------------------------------
+M models/lnd/clm/src/ED/main/EDVecCohortType.F90
+M models/lnd/clm/src/ED/main/EDRestVectorMod.F90
+M models/lnd/clm/src/ED/main/EDInitMod.F90
+M models/lnd/clm/src/ED/main/EDMainMod.F90
+M models/lnd/clm/src/ED/main/EDTypesMod.F90
+M models/lnd/clm/src/ED/fire/SFMainMod.F90
+M models/lnd/clm/src/ED/biogeochem/EDGrowthFunctionsMod.F90
+M models/lnd/clm/src/ED/biogeochem/EDCohortDynamicsMod.F90
+M models/lnd/clm/src/ED/biogeochem/EDPhysiologyMod.F90
+M models/lnd/clm/src/ED/biogeochem/EDPatchDynamicsMod.F90
+M models/lnd/clm/src/ED/biogeochem/EDCanopyStructureMod.F90
+M models/lnd/clm/src/ED/biogeophys/EDAccumulateFluxesMod.F90
+M models/lnd/clm/src/ED/biogeophys/EDSurfaceAlbedoMod.F90
+M models/lnd/clm/src/ED/biogeophys/EDPhotosynthesisMod.F90
+M models/lnd/clm/src/ED/biogeophys/EDBtranMod.F90
+
+---------------------------------------------------
+The following changes are implemented below (and in the above routines)
+(1) _vars% changed to _inst%
+(2) pft% changed to patch%
+(3) merged ecophyscon and pftvarcon into single derived type pftcon (in pftconMod)
+ "use EcophysConType, only : ecophyscon" changed to "use pftconMod, only : pftcon"
+(4) module save statements removed in majority of routines
+---------------------------------------------------
+D models/lnd/clm/src/main/EcophysConType.F90
+D models/lnd/clm/src/main/pftvarcon.F90
+A models/lnd/clm/src/main/pftconMod.F90
+M models/lnd/clm/src/main/initInterp.F90
+M models/lnd/clm/src/main/clm_varpar.F90
+M models/lnd/clm/src/main/landunit_varcon.F90
+M models/lnd/clm/src/main/accumulMod.F90
+M models/lnd/clm/src/main/subgridWeightsMod.F90
+M models/lnd/clm/src/main/decompInitMod.F90
+M models/lnd/clm/src/main/clm_initializeMod.F90
+M models/lnd/clm/src/main/subgridRestMod.F90
+M models/lnd/clm/src/main/ColumnType.F90
+M models/lnd/clm/src/main/subgridMod.F90
+M models/lnd/clm/src/main/PatchType.F90
+M models/lnd/clm/src/main/ndepStreamMod.F90
+M models/lnd/clm/src/main/lnd2atmType.F90
+M models/lnd/clm/src/main/atm2lndType.F90
+M models/lnd/clm/src/main/histFileMod.F90
+M models/lnd/clm/src/main/restFileMod.F90
+M models/lnd/clm/src/main/clm_varsur.F90
+M models/lnd/clm/src/main/LandunitType.F90
+M models/lnd/clm/src/main/GetGlobalValuesMod.F90
+M models/lnd/clm/src/main/controlMod.F90
+M models/lnd/clm/src/main/initSubgridMod.F90
+M models/lnd/clm/src/main/filterMod.F90
+M models/lnd/clm/src/main/lnd2glcMod.F90
+M models/lnd/clm/src/main/clm_driver.F90
+M models/lnd/clm/src/main/clm_varctl.F90
+M models/lnd/clm/src/main/subgridAveMod.F90
+M models/lnd/clm/src/main/initGridCellsMod.F90
+M models/lnd/clm/src/main/atm2lndMod.F90
+M models/lnd/clm/src/main/lnd2atmMod.F90
+M models/lnd/clm/src/main/ncdio_pio.F90
+M models/lnd/clm/src/main/surfrdMod.F90
+M models/lnd/clm/src/main/decompMod.F90
+M models/lnd/clm/src/main/reweightMod.F90
+M models/lnd/clm/src/main/readParamsMod.F90
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/biogeophys/WaterfluxType.F90
+M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/biogeophys/SnowSnicarMod.F90
+M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90
+M models/lnd/clm/src/biogeophys/LakeTemperatureMod.F90
+M models/lnd/clm/src/biogeophys/FrictionVelocityMod.F90
+M models/lnd/clm/src/biogeophys/SoilFluxesMod.F90
+M models/lnd/clm/src/biogeophys/TemperatureType.F90
+M models/lnd/clm/src/biogeophys/HumanIndexMod.F90
+M models/lnd/clm/src/biogeophys/PhotosynthesisMod.F90
+M models/lnd/clm/src/biogeophys/LakeFluxesMod.F90
+M models/lnd/clm/src/biogeophys/AerosolMod.F90
+M models/lnd/clm/src/biogeophys/ActiveLayerMod.F90
+M models/lnd/clm/src/biogeophys/SurfaceResistanceMod.F90
+M models/lnd/clm/src/biogeophys/SoilStateType.F90
+M models/lnd/clm/src/biogeophys/SoilHydrologyType.F90
+M models/lnd/clm/src/biogeophys/HydrologyDrainageMod.F90
+M models/lnd/clm/src/biogeophys/UrbanAlbedoMod.F90
+M models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90
+M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/biogeophys/RootBiophysMod.F90
+M models/lnd/clm/src/biogeophys/SurfaceRadiationMod.F90
+M models/lnd/clm/src/biogeophys/SoilWaterMovementMod.F90
+M models/lnd/clm/src/biogeophys/SoilMoistStressMod.F90
+M models/lnd/clm/src/biogeophys/UrbBuildTempOleson2015Mod.F90
+M models/lnd/clm/src/biogeophys/CanopyHydrologyMod.F90
+M models/lnd/clm/src/biogeophys/EnergyFluxType.F90
+M models/lnd/clm/src/biogeophys/CanopyStateType.F90
+M models/lnd/clm/src/biogeophys/UrbanFluxesMod.F90
+M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90
+M models/lnd/clm/src/biogeophys/UrbanRadiationMod.F90
+M models/lnd/clm/src/biogeophys/SoilHydrologyMod.F90
+M models/lnd/clm/src/biogeophys/IrrigationMod.F90
+M models/lnd/clm/src/biogeophys/CanopyTemperatureMod.F90
+M models/lnd/clm/src/biogeophys/HydrologyNoDrainageMod.F90
+M models/lnd/clm/src/biogeophys/LakeHydrologyMod.F90
+M models/lnd/clm/src/biogeophys/UrbanParamsType.F90
+M models/lnd/clm/src/dyn_subgrid/dynLandunitAreaMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynTimeInfoMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynSubgridDriverMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynHarvestMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynFileMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynConsBiogeochemMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynPriorWeightsMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynVarMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynEDMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynVarTimeInterpMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynpftFileMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynCNDVMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynConsBiogeophysMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynVarMod.F90.in
+M models/lnd/clm/src/dyn_subgrid/dynVarTimeInterpMod.F90.in
+M models/lnd/clm/src/dyn_subgrid/dynInitColumnsMod.F90
+M models/lnd/clm/src/cpl/lnd_comp_esmf.F90
+M models/lnd/clm/src/cpl/lnd_import_export.F90
+M models/lnd/clm/src/cpl/lnd_comp_mct.F90
+M models/lnd/clm/src/utils/accumulMod.F90
+M models/lnd/clm/src/utils/domainMod.F90
+
+---------------------------------------------------
+Changes for Unit testing
+---------------------------------------------------
+R models/lnd/clm/src/ED/main/CMakeLists.txt
+M models/lnd/clm/src/unit_test_shr/unittestSubgridMod.F90
+M models/lnd/clm/src/CMakeLists.txt
+
+---------------------------------------------------
+Configuration changes for new soilbiogeochem/
+---------------------------------------------------
+M models/lnd/clm/bld/configure
+
+CLM testing:
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone - okay
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel - okay
+ yellowstone_pgi - okay
+ yellowstone_gnu - okay
+ goldbach_nag - okay
+
+ goldbach_intel (moved these to yellowstone_intel for future tests)
+
+CLM tag used for the baseline comparisons: clm4_5_1_r096
+
+Changes answers relative to baseline: NO (bit-for-bit)
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r096
+Originator(s): erik (Erik)
+Date: Wed Nov 19 02:11:09 MST 2014
+One-line Summary: Several answer changing bug-fixes: snow grain size, lake hydrology, default settings, organic soil
+
+Purpose of changes:
+
+Bring in several bug-fixes most of which change answers.
+
+Snow grain size bug that Mark Flanner discovered under snow layer combination.
+Lake hydrology fix from Zack Subin that would rarely cause the code to abort. Snow depth fix from
+Sean Swenson. Use Priglent inversion as recommended by Charlie Koven. Correct population density for 2000 conditions
+from 1850 to peroperly be 2000. Modify all Carbon on spinup from Dave Lawrenece. Add option to square or not square
+the organic fraction (default is to square for clm4_5 and to NOT for clm5_0). Bug with pervious road that Keith
+Oleson found. Simplify an if for urban to consistently use a double precision constant. Point to the new CLMNCEP_V5
+dataset.
+
+For clm4_0 rcp6 and rcp8.5 pftdyn datasets are updated for after 2005.
+
+Some fixes that don't change answers. Get the Prigilent inversion and usephfact options working again.
+Fix a bug in interp_source option that Sean Swenson found. Split out test datasets for getregional script since
+the datasets all have to be at the same resolution as the domain file. Also read filelist rather than use env
+variables.
+
+Requirements for tag: Fix the bugs below
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 1934 -- snow grain size (both clm4_0 and clm4_5). Just fix on clm4_5 side.
+ 1717 -- lake hydrology fix (clm4_5 only)
+ 1941 -- snowdp fix from Sean (both clm4_0 and clm4_5) (fix in clm4_5_1_r087)
+ 1759 -- ngwh for clm4_0 datasets (apply cesm1_2_x_n10_clm4_5_10)
+ 1772 -- use Priglent inversion
+ 1838 -- pop dens is 1850 for 2000 compsets
+ 1774 -- modify all Carbon on spinup
+ 1765 -- remove duplicate setting of bd and tkdry
+ 1764 -- Bug with pervious road
+ 2066 -- getregional_datasets.pl bug for long lists of files
+ 2067 -- get Prigilent inversion and usephfact options working.
+ 2081 -- point to new CLMNCEP_V5 version
+ 2061 -- make constant consistently double precision rather than have an if around it
+ 2089 -- bug in interp_source that Sean Swenson found
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: Add organic_frac_squared=.false. logical as a clm5_0 default feature
+ The old behavior organic_frac_squared=.true. is on as before for clm4_5.
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, dlawren, swensosc, mflanner, subin
+
+List any svn externals directories updated (csm_share, mct, etc.): datm
+ update datm to datm8_141113 update CRUNCEP_V5 dataset version used
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+=========== Split out getregional lists (all files in list MUST be at same res as domain file)
+A models/lnd/clm/test/tools/nl_files/getregional_05popd
+A models/lnd/clm/test/tools/nl_files/getregional_T62
+A models/lnd/clm/test/tools/nl_files/getregional_ndep
+A models/lnd/clm/tools/shared/ncl_scripts/sample_inlist_0.5popd
+A models/lnd/clm/tools/shared/ncl_scripts/sample_inlist_ndep
+A models/lnd/clm/tools/shared/ncl_scripts/sample_outlist_0.5popd
+A models/lnd/clm/tools/shared/ncl_scripts/sample_outlist_ndep
+A models/lnd/clm/tools/shared/ncl_scripts/sample_inlist_T62
+A models/lnd/clm/tools/shared/ncl_scripts/sample_outlist_T62
+
+List all existing files that have been modified, and describe the changes:
+
+=========== Change getregional tests
+M models/lnd/clm/test/tools/input_tests_master
+M models/lnd/clm/test/tools/tests_posttag_nompi_regression
+
+=========== Bring in
+M models/lnd/clm/tools/shared/ncl_scripts/getregional_datasets.pl
+M models/lnd/clm/tools/shared/ncl_scripts/getregional_datasets.ncl
+M models/lnd/clm/tools/shared/ncl_scripts/sample_inlist
+M models/lnd/clm/tools/shared/ncl_scripts/sample_outlist
+
+M models/lnd/clm/bld/unit_testers/build-namelist_test.pl ------------ correct number of tests
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml ---- Update rcp6 and rcp8.5 pftdyn datasets
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml ---- Add organic_frac_squared,
+ set fin_use_fsat=.false. by default
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml -- Add organic_frac_squared
+M models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml ------ Correct year for popd from 1850 to 2000
+M models/lnd/clm/bld/namelist_files/use_cases/2000_glacierMEC_control.xml - Correct year for popd from 1850 to 2000
+
+M models/lnd/clm/bld/CLMBuildNamelist.pm - Add organic_frac_squared and change setup_logic_more_vertlayers to
+ setup_logic_soilstate
+
+M models/lnd/clm/src/utils/CMakeLists.txt - Add namelist util to source list
+
+M models/lnd/clm/src/biogeochem/ch4Mod.F90 ------------- Pass fsurdat to initCold
+M models/lnd/clm/src/biogeochem/CNCarbonStateType.F90 -- Use nlevdecomp_full in place of nlevdecomp
+M models/lnd/clm/src/main/initInterp.F90 --------------- Change use of rbufsli to rbufslo
+M models/lnd/clm/src/main/clm_initializeMod.F90 -------- Pass nlfilename into soilstate_vars init
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90 ---- Add col%itype(c) == icol_road_perv to an if condition
+M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90 - Use 1.0_r8 constant always, rather than integer 1
+ for clm4_5
+M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90 --- Use Mass-weighted combination of radius for combo
+M models/lnd/clm/src/biogeophys/SoilStateType.F90 ------ Add organic_frac_squared logical and namelist read
+ for it. Add two if's that determine if organic_frac
+ should be squared or not.
+M models/lnd/clm/src/biogeophys/LakeHydrologyMod.F90 --- Break apart if-condition for snl==-1
+
+CLM testing: regular
+
+ build-namelist tests:
+
+ yellowstone yes
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone yes (although still fails)
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ yellowstone_gnu yes
+ goldbach_nag yes
+ goldbach_intel yes
+
+ short tests (aux_clm_short) (generally these are NOT used when making a tag):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ goldbach_nag yes
+
+ tools testing: (when tools modified or scripts updated (for PTCLM))
+
+ yellowstone interactive yes
+ PTCLM (models/lnd/clm/tools/shared/PTCLM/test) yellowstone yes
+
+CLM tag used for the baseline comparisons:
+
+Changes answers relative to baseline: Yes!
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: clm4_5 and clm5_0
+ - what platforms/compilers: all
+ - nature of change (similar climate, except new clm5_0 feature)
+
+ clm4_0 for rcp6 and rcp8.5 changes answers by using the new good wood harvest
+ datasets for after 2005.
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+
+ /home/erik/noorg_clm451r092_I1850CRUCLM45BGC -- clm4_5 default version
+ /home/erik/clm451r092_I1850CRUCLM45BGC -------- clm4_5 with organic_frac_squared=.false.
+ (clm5_0 default version)
+
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r095
+Originator(s): andre (Benjamin Andre,UCAR/CSEG,303-497-1391)
+Date: Mon Nov 10 17:54:18 MST 2014
+One-line Summary: refactoring N comp by Jinyun Tang (LBL) and transpiration sink isolation by Gautam Bisht (LBL)
+
+Purpose of changes: Bring in two refactorings:
+ Jinyun Tang (LBL) - isolation of the routines to do soil nutrient
+ competition dynamics into a module, and allow for different
+ implementations through runtime polymorphism.
+
+ Gautam Bisht (LBL) - new function to make transpiration sink
+ distribution independent of subsurface flow physics
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 2039
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: andre, cmt
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+Nutrient Competition
+ List all files added and what they do:
+ A clm/src/biogeochem/NutrientCompetitionFactoryMod.F90 - factory module to select soil nutrient competition method
+ A clm/src/biogeochem/NutrientCompetitionMethodMod.F90 - abstract base class for soil nutrient competition dynamics
+ A clm/src/biogeochem/NutrientCompetitionCLM45defaultMod.F90 - existing clm45 implementation of soil nutrient competition dynamics
+
+ List all existing files that have been modified, and describe the changes:
+ M clm/src/biogeochem/CNDecompMod.F90 - add nutrient competition method to function parameters
+ M clm/src/biogeochem/CropType.F90 - rename UpdateAccVars() to work around pgi compiler error, remove dependency on temperature_type
+ M clm/src/biogeochem/CNAllocationMod.F90 - move code into clm45 default nutrient competition module
+ M clm/src/biogeochem/CNEcosystemDynMod.F90 - add nutrient competition method to function parameters
+ MM clm/src/main/clm_initializeMod.F90 - add nutrient competition method to function parameters
+ M clm/src/main/clm_driver.F90 - add nutrient competition method to function parameters, call to renamed CropUpdateAccVars
+ M clm/src/main/readParamsMod.F90 - add nutrient competition method to function parameters
+
+Transpiration Sink:
+ List all existing files that have been modified, and describe the changes:
+M clm/src/biogeophys/SoilWaterMovementMod.F90 - move transpiration sink into separate function so it is independent of physics.
+
+
+CLM testing:
+
+ build-namelist tests: n/a
+
+ unit-tests (models/lnd/clm/src): no
+
+ yellowstone
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel - 4.0 ok; 4.5 ok
+ yellowstone_pgi - 4.0 ok; 4.5 ok
+ yellowstone_gnu - n/a; 4.5 ok
+ goldbach_nag - 4.0 ok; 4.5 ok
+ goldbach_intel - 4.0 ok; 4.5 ok
+
+ short tests (aux_clm_short) - no
+
+ tools testing: (when tools modified or scripts updated (for PTCLM)) - n/a
+
+CLM tag used for the baseline comparisons: clm4_5_1_r094
+
+Changes answers relative to baseline: no
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r094
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Fri Nov 7 13:43:38 MST 2014
+One-line Summary: misc. glacier-related updates
+
+Purpose of changes:
+
+(1) Add dlnd, satm and srof externals, so that TG compsets (CISM-only, forced by
+ dlnd) can be run from a CLM tag. This will facilitate CISM development and
+ testing.
+
+(2) Remove CLM's dependence on the CISM grid. Previously, CLM used the CISM grid
+ to determine which fglcmask file to use. But the differences between the
+ fglcmask files were inconsequential (all of them included the full area of
+ Greenland, which is what was important). I have created a new set of
+ fglcmask files that are independent of the CISM grid, and point CLM to these
+ new files. This will make it easier to add new CISM grids in the future,
+ because no changes will be needed in CLM for this purpose. However, note
+ that the use of these new files means that the number of virtual landunits &
+ columns changes for glcmec runs.
+
+(3) In subgridAveMod, fix c2l routines: change pft%wtlunit to col%wtlunit (bugz
+ 2077)
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+- 2077 (c2l references pft instead of col)
+- 2085 (listDefaultNamelist is broken)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: change fglcmask
+
+List any changes to the defaults for the boundary datasets: change fglcmask
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+========= Added new externals for the sake of running TG compsets (CISM-only)
+ from a CLM tag
++models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_131201
++models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/satm
++models/rof/srof https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/srof
+
+========= Other externals updates
+-tools/unit_testing https://svn-ccsm-models.cgd.ucar.edu/unit_testing/trunk_tags/unit_testing_0_07
++tools/unit_testing https://svn-ccsm-models.cgd.ucar.edu/unit_testing/trunk_tags/unit_testing_0_08
+
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+========= Remove dependence on CISM grid
+M models/lnd/clm/bld/listDefaultNamelist.pl
+ - also fix bug 2085
+M models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+M models/lnd/clm/bld/test_build_namelist/t/input/namelist_defaults_clm4_5_test.xml
+M models/lnd/clm/bld/test_build_namelist/t/input/namelist_definition_clm4_5_test.xml
+M models/lnd/clm/bld/clm.buildnml.csh
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_0.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl
+M models/lnd/clm/bld/user_nl_clm
+M models/lnd/clm/bld/CLMBuildNamelist.pm
+M models/lnd/clm/doc/UsersGuide/custom.xml
+M models/lnd/clm/src_clm40/main/controlMod.F90
+M models/lnd/clm/src_clm40/main/clm_varctl.F90
+M models/lnd/clm/src/main/controlMod.F90
+M models/lnd/clm/src/main/clm_varctl.F90
+
+========= Change pft%wtlunit to col%wtlunit in c2l routines (which currently
+ aren't called from anywhere in the code) (bugz 2077)
+M models/lnd/clm/src/main/subgridAveMod.F90
+
+========= fix numbers of build-namelist unit test failures, due to removal of a test
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok
+
+ expected failure in 23, due to change in fglcmask
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: still broken, due to internal compiler error
+ roo2 (mac laptop): ok
+
+ See notes in clm4_5_1_r090. Point (2) has been fixed, but point (1) remains.
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu: ok
+ goldbach_nag: ok
+ goldbach_intel: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r093
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: IG compsets - both CLM40 and CLM45
+ - what platforms/compilers: ALL
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ only changes diagnostic cpl hist fields - no change in the simulation
+
+ The changes arise from the new glcmask files, which exclude a few points
+ from the glcmask that used to be included. These points are all outside
+ of Greenland, so they are not important for coupling to CISM. However,
+ it means that a few virtual columns have been removed. This, in turn,
+ changes the values of some l2x topo, tsrf and qice fields sent to the
+ coupler. But this does NOT feed back on the simulation in any way.
+
+ Some tests also exhibit diffs in the CLM diagnostic fields PCT_GLC_MEC
+ and QICE_FORC. Again, these are due to changes in where we have virtual
+ columns, and do not affect the simulation.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r093
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Fri Nov 7 13:17:47 MST 2014
+One-line Summary: change cold-start snow initialization, update cism external
+
+Purpose of changes:
+
+(1) Change cold-start snow initialization logic. The original logic did
+different snow initialization depending on whether we are inside or outside the
+glcmask. That's a problem in that answers change depending on the glcmask. The
+new logic instead uses a latitude threshold for determining where to initialize
+a non-zero snow pack. Note that this will change answers for all cold-start
+cases, including non-glcmec cases.
+
+(2) Update CISM to version 2.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self; Dave Lawrence agreed with the change to snow initialization
+
+List any svn externals directories updated (csm_share, mct, etc.):
+-models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140916
++models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism2_0_02
+
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/main/clm_initializeMod.F90
+
+CLM testing:
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu: ok
+ goldbach_nag: ok
+ goldbach_intel: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r092
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ (1) any CLM4.5 case with cold start initialization, due to change in snow
+ initialization
+
+ (2) any case that includes CISM, due to answer changes in the CISM external
+
+ I carefully checked the yellowstone-intel clm4.5 tests to ensure that:
+ (a) FAILed compare_hist all had finidat = ' '
+ (b) PASSed compare_hist either had non-blank finidat OR were single-point
+
+ - what platforms/compilers: ALL
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ Not investigated, but expected to be larger than roundoff/same climate
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r092
+Originator(s): muszala (Stefan Muszala)
+Date: Tue Nov 4 06:10:16 MST 2014
+One-line Summary: bug fixes from santos that address valgrind problems. update rtm external
+
+Purpose of changes: Addresses issues found with Valgrind by Santos. Update RTM.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: santos, self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+-models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_39
++models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_40
+
+List all files eliminated: N/A
+
+List all files added and what they do:
+
+A + models/lnd/clm/src/main/dtypes.h
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+-- update failures
+
+M SVN_EXTERNAL_DIRECTORIES
+-- rtm update to 40
+
+M models/lnd/clm/src_clm40/main/ncdio_pio.F90
+M models/lnd/clm/src_clm40/main/ncdio_pio.F90.in
+M models/lnd/clm/src/main/ncdio_pio.F90
+M models/lnd/clm/src/main/ncdio_pio.F90.in
+-- example changes:
+- status = pio_inq_vardimid(ncid, vardesc , dids)
++ status = pio_inq_vardimid(ncid, vardesc , dids(1:ndims))
+
+M models/lnd/clm/src_clm40/biogeophys/FrictionVelocityMod.F90
+M models/lnd/clm/src/biogeophys/FrictionVelocityMod.F90
+-- change 1.0_8 to 1.0_r8
+
+CLM testing:
+
+ build-namelist tests: N/A
+
+ unit-tests (models/lnd/clm/src): N/A
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel 40- OK 45- OK
+ yellowstone_pgi 40- OK 45- OK
+ yellowstone_gnu 40- N/A 45- OK
+ goldbach_nag 40- OK 45- OK
+ goldbach_intel 40- OK 45- OK
+
+ tools testing: (when tools modified or scripts updated (for PTCLM)) N/A
+
+CLM tag used for the baseline comparisons: clm4_5_1_r091
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r091
+Originator(s): muszala (Stefan Muszala)
+Date: Mon Oct 27 09:48:56 MDT 2014
+One-line Summary: update externals. fix bug so CLM runs with Intel 14x.
+
+Purpose of changes: Update externals. Fix bug in VOCEmissionMod.F90 that prevented
+CLM from running with Intel 14x on yellowstone. Bring in workaround for bug 1730 from
+Sacks.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/ - see CLM test fail list
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, clm developers, particularly Bill Sacks.
+
+List any svn externals directories updated (csm_share, mct, etc.):
+-scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_141009
++scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_141023
+-scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_141001
++scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_141017a
+-models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_141003
++models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_141022
+-models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_140416
++models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_140925
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+-- reflect changes in new testlists
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+-- Sacks' workaround for bug 1730
+M models/lnd/clm/src/main/histFileMod.F90
+M models/lnd/clm/src/main/ncdio_pio.F90
+M models/lnd/clm/src/main/ncdio_pio.F90.in
+-- remove duplicate assignment of 0_r8 to meg_out(imeg)%flux_out
+M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90
+-- update externals
+M SVN_EXTERNAL_DIRECTORIES
+
+CLM testing:
+
+ Please view the CLM expected fail list for new test failures. They are matched
+ to bugzilla bug ids.
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel - 40 - OK 45 - OK
+ yellowstone_pgi - 40 - OK 45 - OK
+ yellowstone_gnu - 40 - N/A 45 - OK
+ goldbach_nag - 40 - OK 45 - OK
+ goldbach_intel - 40 - OK 45 - OK
+
+CLM tag used for the baseline comparisons: clm4_5_1_r090
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r090
+Originator(s): sacks (Bill Sacks)
+Date: Thu Oct 16 06:39:52 MDT 2014
+One-line Summary: modularize irrigation; do some unit test rework
+
+Purpose of changes:
+
+(1) Pull irrigation code out of CanopyFluxes and CanopyHydrology, into its
+ own module
+
+(2) Pull out the locally-created filters from CanopyFluxes and BareGroundFluxes
+ into filterMod, in order to support pulling irrigation out of
+ CanopyFluxes. This will also be needed to support pulling other hydrology
+ stuff out of CanopyFluxes.
+
+(3) Add unit tests for irrigation
+
+(4) Rework some irrigation infrastruture, and add some more unit test utility
+ routines
+
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 2063 (HumanIndexMod fails to compile with gfortran)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: muszala
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated:
+
+========= Renamed to unit_test_stubs
+D models/lnd/clm/src/unit_test_mocks/util_share/ncdio_var.F90
+D models/lnd/clm/src/unit_test_mocks/util_share/restUtilMod_mock.F90
+D models/lnd/clm/src/unit_test_mocks/util_share/GetGlobalValuesMod_mock.F90
+D models/lnd/clm/src/unit_test_mocks/util_share/ncdio_var.F90.in
+D models/lnd/clm/src/unit_test_mocks/util_share/restUtilMod_mock.F90.in
+D models/lnd/clm/src/unit_test_mocks/util_share/clm_time_manager_mock.F90
+D models/lnd/clm/src/unit_test_mocks/util_share/ncdio_pio_mock.F90
+D models/lnd/clm/src/unit_test_mocks/util_share/spmdMod_mock.F90
+D models/lnd/clm/src/unit_test_mocks/util_share/do_genf90
+D models/lnd/clm/src/unit_test_mocks/util_share/CMakeLists.txt
+D models/lnd/clm/src/unit_test_mocks/util_share/ncdio_pio_mock.F90.in
+D models/lnd/clm/src/unit_test_mocks/util_share
+D models/lnd/clm/src/unit_test_mocks/csm_share/shr_mpi_mod_mock.F90
+D models/lnd/clm/src/unit_test_mocks/csm_share/mct_mod_mock.F90
+D models/lnd/clm/src/unit_test_mocks/csm_share/CMakeLists.txt
+D models/lnd/clm/src/unit_test_mocks/csm_share
+D models/lnd/clm/src/unit_test_mocks/main/CMakeLists.txt
+D models/lnd/clm/src/unit_test_mocks/main/histFileMod_mock.F90
+D models/lnd/clm/src/unit_test_mocks/main
+D models/lnd/clm/src/unit_test_mocks/dyn_subgrid/dynFileMod_mock.F90
+D models/lnd/clm/src/unit_test_mocks/dyn_subgrid/CMakeLists.txt
+D models/lnd/clm/src/unit_test_mocks/dyn_subgrid
+D models/lnd/clm/src/unit_test_mocks/CMakeLists.txt
+D models/lnd/clm/src/unit_test_mocks
+
+========= Remove unnecessary files
+D models/lnd/clm/src/ED/CMakeLists.txt
+D models/lnd/clm/src/ED/biogeophys/CMakeLists.txt
+
+List all files added and what they do:
+
+========= Pull out irrigation code into its own module
+A models/lnd/clm/src/biogeophys/IrrigationMod.F90
+
+========= Add some unit test utility code (and some tests for the utility code)
+A models/lnd/clm/src/unit_test_shr/unittestFilterBuilderMod.F90
+A models/lnd/clm/src/unit_test_shr/unittestSimpleSubgridSetupsMod.F90
+A models/lnd/clm/src/unit_test_shr/test/unittestFilterBuilder_test/test_filterBuilder.pf
+A models/lnd/clm/src/unit_test_shr/test/unittestFilterBuilder_test/CMakeLists.txt
+A models/lnd/clm/src/unit_test_shr/test/unittestFilterBuilder_test
+A models/lnd/clm/src/unit_test_shr/test/CMakeLists.txt
+A models/lnd/clm/src/unit_test_shr/test
+
+========= Renamed from unit_test_mocks to unit_test_stubs; also renamed
+ individual files from mock to stub (or 'fake' for ncdio_pio, because
+ it does more than a stub); also, moved some stubs to match the current
+ organization of the main source tree
+A models/lnd/clm/src/unit_test_stubs/utils/restUtilMod_stub.F90
+A models/lnd/clm/src/unit_test_stubs/utils/do_genf90
+A models/lnd/clm/src/unit_test_stubs/utils/restUtilMod_stub.F90.in
+A models/lnd/clm/src/unit_test_stubs/utils/CMakeLists.txt
+A models/lnd/clm/src/unit_test_stubs/utils/clm_time_manager_stub.F90
+A models/lnd/clm/src/unit_test_stubs/utils/spmdMod_stub.F90
+A models/lnd/clm/src/unit_test_stubs/utils
+A models/lnd/clm/src/unit_test_stubs/csm_share/shr_mpi_mod_stub.F90
+A models/lnd/clm/src/unit_test_stubs/csm_share/mct_mod_stub.F90
+A models/lnd/clm/src/unit_test_stubs/csm_share/CMakeLists.txt
+A models/lnd/clm/src/unit_test_stubs/csm_share
+A models/lnd/clm/src/unit_test_stubs/main/histFileMod_stub.F90
+A models/lnd/clm/src/unit_test_stubs/main/ncdio_var.F90
+A models/lnd/clm/src/unit_test_stubs/main/ncdio_pio_fake.F90
+A models/lnd/clm/src/unit_test_stubs/main/ncdio_var.F90.in
+A models/lnd/clm/src/unit_test_stubs/main/GetGlobalValuesMod_stub.F90
+A models/lnd/clm/src/unit_test_stubs/main/do_genf90
+A models/lnd/clm/src/unit_test_stubs/main/ncdio_pio_fake.F90.in
+A models/lnd/clm/src/unit_test_stubs/main/CMakeLists.txt
+A models/lnd/clm/src/unit_test_stubs/main
+A models/lnd/clm/src/unit_test_stubs/dyn_subgrid/dynFileMod_stub.F90
+A models/lnd/clm/src/unit_test_stubs/dyn_subgrid/CMakeLists.txt
+A models/lnd/clm/src/unit_test_stubs/dyn_subgrid
+A models/lnd/clm/src/unit_test_stubs/CMakeLists.txt
+A models/lnd/clm/src/unit_test_stubs
+
+========= Add dependencies (direct & indirect) of IrrigationMod
+A models/lnd/clm/src/biogeochem/CMakeLists.txt
+
+========= Add unit tests for irrigation (see README file for some design notes)
+A models/lnd/clm/src/biogeophys/test/Irrigation_test/test_irrigation_deficit.pf
+A models/lnd/clm/src/biogeophys/test/Irrigation_test/test_irrigation_multipatch.pf
+A models/lnd/clm/src/biogeophys/test/Irrigation_test/IrrigationWrapperMod.F90
+A models/lnd/clm/src/biogeophys/test/Irrigation_test/test_irrigation_singlepatch.pf
+A models/lnd/clm/src/biogeophys/test/Irrigation_test/CMakeLists.txt
+A models/lnd/clm/src/biogeophys/test/Irrigation_test/README
+A models/lnd/clm/src/biogeophys/test/Irrigation_test
+
+List all existing files that have been modified, and describe the changes:
+
+========= Pull irrigation out of CanopyFluxes into its own routine, and also
+ pull out the filters that used to be created locally in CanopyFluxes
+ and BareGroundFluxes
+M models/lnd/clm/src/main/clm_driver.F90
+
+========= Pull out filters that used to be created locally in CanopyFluxes and
+ BareGroundFluxes, so that they can be reused in irrigation and
+ elsewhere
+M models/lnd/clm/src/main/filterMod.F90
+
+========= Add calls to irrigation init & restart routines
+MM models/lnd/clm/src/main/clm_initializeMod.F90
+M models/lnd/clm/src/main/restFileMod.F90
+
+========= Irrigation computations are now done in the irrigation module; also,
+ the relevant filter is now created outside CanopyFluxes
+MM models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90
+
+========= Irrigation computations are now done in the irrigation module
+M models/lnd/clm/src/biogeophys/CanopyHydrologyMod.F90
+M models/lnd/clm/src/biogeophys/LakeHydrologyMod.F90
+
+========= Irrigation variables are now defined in the irrigation module
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/biogeophys/WaterfluxType.F90
+M models/lnd/clm/src/biogeophys/HydrologyDrainageMod.F90
+
+========= Filter is now created outside BareGroundFluxes; also, moved some bare
+ ground initialization from CanopyFluxes (needed because the filters
+ are no longer created locally, so CanopyFluxes does not know what
+ points it would need to set for bare ground)
+MM models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90
+
+========= Remove some dependencies to aid unit testing (this includes combining
+ two branches of a conditional (allowlakeprod) that were doing the same
+ thing
+M models/lnd/clm/src/biogeophys/SoilStateType.F90
+
+
+========= Add dependencies (direct & indirect) of IrrigationMod for unit testing
+M models/lnd/clm/src/utils/CMakeLists.txt
+M models/lnd/clm/src/main/CMakeLists.txt
+M models/lnd/clm/src/ED/main/CMakeLists.txt
+M models/lnd/clm/src/CMakeLists.txt
+M models/lnd/clm/src/biogeophys/CMakeLists.txt
+M models/lnd/clm/src/biogeophys/test/CMakeLists.txt
+
+========= Add new unit test utilities
+M models/lnd/clm/src/unit_test_shr/CMakeLists.txt
+
+========= Make newly-added subgrid units active by default
+M models/lnd/clm/src/unit_test_shr/unittestSubgridMod.F90
+
+========= Unrelated fix for gfortran
+MM models/lnd/clm/src/biogeophys/HumanIndexMod.F90
+
+========= Change whitespace
+M models/lnd/clm/src/dyn_subgrid/CMakeLists.txt
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: not run
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone: fail due to two issues:
+
+ (1) The unit tests currently won't build on yellowstone due to an ICE that
+ will probably be fixed when we remove dependencies of SoilStateType.
+
+ (2) In addition, even once that's fixed, the yellowstone unit tests either
+ need (a) a bump in the unit testing external (unit_testing_0_08) and
+ Machines external (Machines_141007) (I didn't do that for my tag because
+ it pulls in a bump in the intel compiler version to 14 rather than 13.1),
+ or (b) the following diffs:
+
+ Index: tools/unit_testing/python/machine_setup.py
+ ===================================================================
+ --- tools/unit_testing/python/machine_setup.py (revision 64421)
+ +++ tools/unit_testing/python/machine_setup.py (working copy)
+ @@ -52,7 +52,7 @@
+ mod.load("ncarenv/1.0")
+ mod.load("ncarbinlibs/1.0")
+ if compiler == "intel":
+ - mod.load("intel/13.1.2")
+ + mod.load("intel/14.0.2")
+ elif compiler == "pgi":
+ mod.load("pgi/13.9")
+ mod.load("ncarcompilers/1.0")
+ Index: scripts/ccsm_utils/Machines/config_compilers.xml
+ ===================================================================
+ --- scripts/ccsm_utils/Machines/config_compilers.xml (revision 64421)
+ +++ scripts/ccsm_utils/Machines/config_compilers.xml (working copy)
+ @@ -547,7 +547,7 @@
+ -xHost
+ -xHost
+ $(TRILINOS_PATH)
+ - /glade/u/home/santos/pFUnit/pFUnit_Intel
+ + /glade/u/home/sacks/pFUnit/pFUnit3.0.1_Intel14.0.2_Serial
+
+
+
+
+
+
+ However, I have run the unit tests on my mac, with gfortran, and they all
+ pass
+
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu: NOT RUN
+ goldbach_nag: ok
+ goldbach_intel: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r089
+
+Changes answers relative to baseline: YES (but only because of bug 1998)
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: irrigation
+ - what platforms/compilers: all
+ - nature of change: larger than roundoff, not investigated closely
+
+ The changes are entirely due to the btran bug (bug 1998): The old flow was:
+ - compute btran
+ - calculate irrigation (depends on btran)
+ - hack btran for soybeans
+
+ whereas the new flow is:
+ - call CanopyFluxes: computes btran and hacks btran for soybeans
+ - calculate irrigation (depends on btran)
+
+ I have confirmed that answers are bit-for-bit for both irrigation tests (for
+ both cpl and clm hist files), when I introduce the following diffs in both
+ the trunk and the branch:
+
+ Index: src/biogeophys/CanopyFluxesMod.F90
+ ===================================================================
+ --- src/biogeophys/CanopyFluxesMod.F90 (revision 64406)
+ +++ src/biogeophys/CanopyFluxesMod.F90 (working copy)
+ @@ -842,9 +842,6 @@
+ btran(p) = min(1._r8, btran(p) * 3.33_r8)
+ end if
+ end if
+ - if (pft%itype(p) == nsoybean .or. pft%itype(p) == nsoybeanirrig) then
+ - btran(p) = min(1._r8, btran(p) * 1.25_r8)
+ - end if
+ end do
+
+ if ( use_ed ) then
+ @@ -894,9 +891,6 @@
+ btran(p) = min(1._r8, btran(p) * 3.33_r8)
+ end if
+ end if
+ - if (pft%itype(p) == nsoybean .or. pft%itype(p) == nsoybeanirrig) then
+ - btran(p) = min(1._r8, btran(p) * 1.25_r8)
+ - end if
+ end do
+
+ call Photosynthesis (bounds, fn, filterp, &
+
+
+
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r089
+Originator(s): erik (Erik)
+Date: Mon Oct 13 13:46:43 MDT 2014
+One-line Summary: Bring new urban building temperature to trunk as a clm5.0 feature
+ as well as human-stress index calculations
+
+Purpose of changes:
+
+New prognostic internal building air temperature methodology for CLM5.0. Retain the older simpler method
+for CLM4.5. The namelist toggle to switch between them is: building_temp_method. By default for clm4_5 physics
+the older method is used and for clm5_0 the newer one is used. Also add in a package of human-stress index
+calculations. Again this is by default on for clm5_0 and off for clm4_5.
+
+The new building air temperature methodology, solves the system of equations for internal: air, roof, floor,
+and wall (shade and sunlit) Temperatures. It uses the LAPACK subroutine DGESV to solve the system. It also
+figures out the energy flux needed to either cool the building air temperature to a maximum allowed temperature
+or to heat it to the minimum allowed temperature.
+
+Add in the new load balancing tool and the PyReshaper tool (changes multi-variable
+monthly history files into single-variable time-series files).
+
+Remove the now unneeded clm4_5 interpinic as well as the mkmapgrids FORTRAN program.
+
+Requirements for tag: Bring in new clm5_0 building temperature as option, Fix scripts bugs, bug 2053/2032
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 2053 Problems with dry-deposition for fully coupled cases with CLM4.5 in cesm1_3_beta13
+ 2032 rtm.buildnml.csh kills cesm_setup if GET_REFCASE is FALSE
+ 1685 Drydeposition potentially using "rs" variable before it's defined (over water)
+ (was fixed but came back)
+ Fix bugs: 2024, 2035, 2037 in scripts SBN and namelistcompare issues
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: new namelists and namelist items
+ add: clmu_inparm and clm_humanindex_inparm namelists
+ move: urban_hac and urban_traffic to clmu_inparm
+ add: calc_human_stress_indices to clm_humanindex_inparm
+ building_temp_method to clmu_inparm
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes:
+ clm5_0 memory use is greater than clm4_5 due to human_stress_indices calculations
+ and output on history (I've seen it make up to a 20% difference)
+
+Code reviewed by: self, oleson, sacks, mvertens, andre
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, Machines, rtm, cism, csm_share
+ Update to cesm1_3_alpha13c externals.
+ Also add in load_balancing_tool and pyReshaper
+
+ scripts to scripts4_141009
+ Machines to Machines_141001
+ rtm to rtm1_0_39
+ cism to cism1_140916
+ csm_share to share3_141003
+ load_balancing_tool to load_balancing_tool_140818
+ pyReshaper to v0.9.1
+
+List all files eliminated:
+
+--------- Remove clm4_5 interpinic, online interpinic supersedes it
+D models/lnd/clm/tools/clm4_5/interpinic
+D models/lnd/clm/tools/clm4_5/interpinic/*
+D models/lnd/clm/tools/clm4_5/interpinic/src/*
+
+--------- Remove mkmapgrids program
+D models/lnd/clm/tools/shared/mkmapgrids/src
+D models/lnd/clm/tools/shared/mkmapgrids/src/*
+D models/lnd/clm/tools/shared/mkmapgrids/mkmapgrids.namelist
+D models/lnd/clm/tools/shared/mkmapgrids/mkmapgrids.csh
+
+--------- shr_sys_mod was mocked only because of it's use of shr_mpi_mod
+--------- mock shr_mpi_mod instead and standard shr_sys_mod can be used
+D models/lnd/clm/src/unit_test_mocks/csm_share/shr_sys_mod_mock.F90
+
+List all files added and what they do:
+
+A models/lnd/clm/src/biogeophys/HumanIndexMod.F90 -- New module to
+ calculate a bunch of human stress index values.
+
+A models/lnd/clm/src/biogeophys/UrbBuildTempOleson2015Mod.F90 --- New module
+ for calculating the prognostic internal building air temperature.
+
+--------- New simple unit tester for humanstress indices module
+A models/lnd/clm/src/biogeophys/test/HumanStress_test/test_humanstress.pf
+A models/lnd/clm/src/biogeophys/test/HumanStress_test/CMakeLists.txt
+A models/lnd/clm/src/biogeophys/test/HumanStress_test
+
+A models/lnd/clm/src/unit_test_mocks/csm_share/shr_mpi_mod_mock.F90 -- shell
+ for most shr_mpi_ calls that do nothing (so assumes MPI is NOT being done)
+ shr_mpi_abort does a stop
+
+List all existing files that have been modified, and describe the changes:
+
+--------- remove the mkgriddata and clm4_5 interpinic tools from testing
+M models/lnd/clm/test/tools/input_tests_master
+M models/lnd/clm/test/tools/tests_posttag_yong
+M models/lnd/clm/test/tools/tests_posttag_nompi_regression
+M models/lnd/clm/test/tools/tests_pretag_yellowstone_nompi
+
+--------- remove documentation of mkgriddata and clm4_5 interpinic tools
+--------- but add documentation on ncl script
+M models/lnd/clm/tools/shared/mkmapgrids/README
+M models/lnd/clm/tools/README
+
+--------- Add new namelists: clmu_inparm and clm_humanindex_inparm
+M models/lnd/clm/bld/configure ---- use same configuration for clm4_5 AND clm5_0
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml --- Set
+ calc_human_stress_indices, and building_temp_method by clm4_5/clm5_0
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml - Move
+ urban_hac and urban_traffic to new clmu_inparm namelist and
+ add building_temp_method and add calc_human_stress_indices to
+ clm_humanindex_inparm namelist
+M models/lnd/clm/bld/CLMBuildNamelist.pm -------- Handle new namelists:
+ clmu_inparm and clm_humanindex_inparm
+
+--------- Get unit tests working again, and add a simple humanindex test
+M models/lnd/clm/src/utils/CMakeLists.txt
+M models/lnd/clm/src/ED/main/CMakeLists.txt
+M models/lnd/clm/src/ED/biogeophys/CMakeLists.txt
+M models/lnd/clm/src/README.unit_testing
+M models/lnd/clm/src/biogeophys/CMakeLists.txt
+M models/lnd/clm/src/biogeophys/test/CMakeLists.txt
+M models/lnd/clm/src/unit_test_mocks/util_share/spmdMod_mock.F90 - set mpicom
+M models/lnd/clm/src/unit_test_mocks/csm_share/CMakeLists.txt
+M models/lnd/clm/src/CMakeLists.txt
+
+--------- Fix so can work with drydeposition namelist and without megan namelist
+M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90 - Don't initialize if
+ megan namelist is turned off (bug 2053)
+M models/lnd/clm/src/biogeochem/DryDepVelocity.F90 - Make sure rs is set
+ before used (bug 1685)
+
+--------- Handle new building temperature options, add new constants
+MM models/lnd/clm/src/main/clm_varcon.F90 -------- Bunch of new constants
+ for urban-building (should be moved to modules that use them).
+ Also pass is_simple_buildtemp into init method
+MM models/lnd/clm/src/main/clm_initializeMod.F90 - Pass building temp type
+ down to relevent init methods add initialization for humanindex_vars,
+ initialize drydepvel_vars (bug 2053)
+M models/lnd/clm/src/main/restFileMod.F90 ------- Pass building temp type
+ logicals down to energyflux_vars and temperature_vars restart methods
+M models/lnd/clm/src/main/LandunitType.F90 ------ Add documentation, correct
+ error in documentation
+MM models/lnd/clm/src/main/controlMod.F90 -------- Move urban namelist items to
+ UrbanReadNML, add HumanIndexReadNML
+M models/lnd/clm/src/main/clm_driver.F90 -------- Pass humanindex_vars down as needed
+
+--------- Change for new shr_cal_mod names
+M models/lnd/clm/src/ED/biogeophys/EDPhenologyMod.F90 - Use full name of month
+ "january" instead of "jan"
+
+--------- Add new building temperature module and add capability to do old clm4_5
+--------- method as well as new method, also do human_stress_indices calculations
+MM models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90 --- Add BuildingHAC for
+ simple building temp method (should move to it's own module) add if
+ statements for building_temp_method type, call BuildingTemperature
+ when prognostic method used
+M models/lnd/clm/src/biogeophys/SoilFluxesMod.F90 -------- Change name of
+ eflx_building_heat to eflx_building_heat_errsoi
+M models/lnd/clm/src/biogeophys/TemperatureType.F90 ------ Add building
+ temperature variables (should move to urbBuildTemp module), pass building_temp
+ method logical down for initialization, add documentation headers
+ hist, cold, and restart init depends on building temp method logical
+M models/lnd/clm/src/biogeophys/LakeFluxesMod.F90 -------- calc_human_stress_indices
+ (should move to method in humanIndexMod)
+MM models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90 -- calc_human_stress_indices
+ (should move to method in humanIndexMod)
+MM models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90 ------ calc_human_stress_indices
+ (should move to method in humanIndexMod)
+M models/lnd/clm/src/biogeophys/EnergyFluxType.F90 ---- change name of eflx_building_heat_col
+ to eflx_building_heat_errsoi_col, add some new building temperature flux terms
+ add documentation, alloc, hist, restart and cold initialization depends on
+ building temperature method type (should move to urban building module)
+M models/lnd/clm/src/biogeophys/UrbanFluxesMod.F90 ---- Add private functions:
+ wasteheat, simple_wasteheatfromac, calc_simple_internal_building_temp
+ (should move to building_temp modules).
+ calc_human_stress_indices (should move to method in humanIndexMod)
+M models/lnd/clm/src/biogeophys/UrbanParamsType.F90 --- Add methods: UrbanReadNML,
+ IsSimpleBuildTemp, IsProgBuildTemp, add clmu_inparm namelist, and move urban_* items
+ there and add building_temp_method to it.
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone yes
+
+ unit-tests (models/lnd/clm/src):
+
+ yellowstone yes
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ yellowstone_gnu yes
+ goldbach_nag yes
+ goldbach_intel yes
+
+ short tests (aux_clm_short):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ goldbach_nag yes
+
+ tools testing:
+
+ yellowstone interactive yes
+ PTCLM (models/lnd/clm/tools/shared/PTCLM/test) yellowstone yes
+
+CLM tag used for the baseline comparisons: clm4_5_1_r088
+
+Changes answers relative to baseline: No for CLM40 and CLM45
+ But, answers DO change for CLM50
+ (except scripts tag update changes history files for IG and irrigation compsets/tests)
+
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r088
+Originator(s): muszala (Stefan Muszala)
+Date: Wed Oct 1 09:24:43 MDT 2014
+One-line Summary: Pull out ED deps. in TemperatureTypeMod, can now compile with pgi 14.7
+
+Purpose of changes: Pull out the dependency on EDBioType in TemperatureType.F90. The ED
+variables related to phenology now reside in EDPhenologyMod.F90. This refactor also had
+the effect of getting past a PGI 14.7 ICE which looks like it was due to the use of EDbio_vars
+in TemperatureType.F90. When I pulled out lines 1227 and 1226 of biogeophys/TemperatureType.F90
+(in clm4_5_1_r087) and passed the two EDbio_vars variables through the argument list the ICE
+went away.
+
+This tag breaks ED restart tests. We went ahead with the tag because we had to fix a more
+general problem with the CESM and CAM builds and PGI 14.7. the ED v0.1.0 branch does not
+have these modifications and may be used as an alternative. A new clm tag will shortly
+follow that addresses any remaining problems.
+
+Requirements for tag: None
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+Temporary addition of a cal parameter type in a branch tag. Will be merged into csm_share trunk shortly.
+
+-models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_140723
++models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/branch_tags/share_ece_tags/share_ece_01_140723
+
+List all files eliminated: N/A
+
+List all files added and what they do:
+
+! new home for ED phenology variables and type-bound procedures that
+! allow for accumulation of buffers
+A + models/lnd/clm/src/ED/biogeophys/EDPhenologyMod.F90
+! put some CMakeLists.txt in place for ED unittests
+A + models/lnd/clm/src/ED/main/CMakeLists.txt
+A + models/lnd/clm/src/ED/biogeophys/CMakeLists.txt
+A + models/lnd/clm/src/ED/CMakeLists.txt
+
+List all existing files that have been modified, and describe the changes:
+
+! pull out deps. on EDBioType
+M models/lnd/clm/src/biogeophys/TemperatureType.F90
+! add code for new class instance, EDphenology_inst
+M models/lnd/clm/src/main/clm_initializeMod.F90
+! add code for new class instance, EDphenology_inst
+M models/lnd/clm/src/main/restFileMod.F90
+! add code for new class instance, EDphenology_inst
+M models/lnd/clm/src/main/clm_driver.F90
+
+! add code for new class instance, EDphenology_inst
+M models/lnd/clm/src/ED/biogeochem/EDPhysiologyMod.F90
+! pull out ED_GDD_patch and phen_cd_status_patch
+M models/lnd/clm/src/ED/main/EDBioType.F90
+! add code for new class instance, EDphenology_inst
+M models/lnd/clm/src/ED/biogeochem/EDPhysiologyMod.F90
+! add code for new class instance, EDphenology_inst
+M models/lnd/clm/src/ED/main/EDMainMod.F90
+! add code for new class instance, EDphenology_inst
+M models/lnd/clm/src/ED/main/EDCLMLinkMod.F90
+! add code for new class instance, EDphenology_inst
+M models/lnd/clm/src/ED/main/EDRestVectorMod.F90
+! add code for new class instance, EDphenology_inst
+M models/lnd/clm/src/ED/main/EDInitMod.F90
+
+! for ED unit tests
+M models/lnd/clm/src/CMakeLists.txt
+
+! update CNED failures
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+CLM testing:
+
+ERS CNED tests are failing in this tag. It is expected. expectedClmTestFails.xml is updated to reflect this.
+
+ build-namelist tests: N/A
+
+ yellowstone
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel 40-OK, 45-OK
+ yellowstone_pgi 40-OK, 45-OK
+ goldbach_nag 40-OK, 45-OK
+ goldbach_intel 40-OK, 45-OK
+
+CLM tag used for the baseline comparisons: clm4_5_1_r087
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r087
+Originator(s): erik (Erik)
+Date: Tue Sep 30 12:07:10 MDT 2014
+One-line Summary: Fix two balance check errors, and turn abort for balance check back on to appropriate levels
+
+Purpose of changes:
+
+Fix two balance check errors that were causing problems for simulations. Also some of the balance check aborts
+were turned off in clm4_5_1_r082, so turn them back on again. Tighten water balance error from 1.e-4 to 1.e-5.
+Tighten LW, surface-flux and solar radiation balance errors from 1.e-3 to 1.e-5 and add warning for 1.e-7.
+Turn surface-flux balance and soil balance check errors abort back on. Soil balance tightened to 1.e-4 (from 1.e-3)
+with warnings shown at 1.e-6.
+
+Also bring in an update to PTCLM, and allow tools tester to be submitted to geyser or caldera. Just as an aside
+as something that was already done.
+
+Requirements for tag: Fix bugs: 2026 and 1941
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 2026 Soil balance error
+ 1941 snowdp balance error
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, swensosc
+
+List any svn externals directories updated (csm_share, mct, etc.): Update PTCLMmkdata version
+
+ Update PTCLM to PTCLM2_140816
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/test/tools/test_driver.sh ------ Allow to run in caldera and geyser as well
+
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90 ---- Add aborts back as well as warnings
+ and tighten some error conditions and warnings.
+M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90 - Remove calculation and use of the
+ heat capacity of frozen h2osfc layer but use the heat capacity of the liquid layer
+ as balance check doesn't know about the frozen, and the discrepency causes balance
+ check errors.
+M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90 --- snow includes dew.
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone YES
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel YES
+ yellowstone_pgi YES
+ yellowstone_gnu (optional) YES
+ goldbach_nag YES
+ goldbach_intel YES
+
+CLM tag used for the baseline comparisons: clm4_5_1_r086
+
+Changes answers relative to baseline: YES!
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM4.5
+ - what platforms/compilers: ALL
+ - nature of change: same climate (some shorter simulations are still exact)
+
+Simulations that Keith ran to test the snowdp change are:
+
+/glade/p/work/oleson/urb2dev_n00_clm4_5_52/scripts/urb2dev_n03_clm4_5_64_V2DomLam_I20TRCRUCLM45
+
+/glade/p/work/oleson/urb2dev_n00_clm4_5_52/scripts/urb2dev_n03_clm4_5_64_V2DomLam_IRCP85CRUCLM45
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+ For soil balance error...
+
+http://www.cgd.ucar.edu/staff/swensosc/public/diagnostics/ColdtestTRENDYspinupf091850CRU-ColdtestTRENDYspinupf091850CRU_control/setsIndex.html
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r086
+Originator(s): muszala (Stefan Muszala)
+Date: Thu Sep 25 09:04:08 MDT 2014
+One-line Summary: critical ED modifications from r fisher, fix bug 2043
+
+Purpose of changes: add modifications to ED, particularly for cold deciduous. add
+ fix for bug 2043. Consider these ED baselines as fixed (ie.
+ unless you are modifying ED science, these should now be BFB).
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 2043
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, r fisher
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/main/lnd2atmMod.F90
+-- change intent of waterstate_vars to inout (fixes bug 2043)
+
+M models/lnd/clm/src/main/clm_driver.F90
+-- add EDbio_vars to edmodel actual argument list
+
+M models/lnd/clm/src/ED/biogeochem/EDCohortDynamicsMod.F90
+-- change leaves_off_switch and laimemory handling
+
+M models/lnd/clm/src/ED/biogeochem/EDPhysiologyMod.F90
+-- rework cold deciduous and threshold code. add fragmentation_scaler routine
+
+M models/lnd/clm/src/ED/main/EDMainMod.F90
+-- change argument lists to include EDbio_vars for ecosystem_dynamics and phenology
+
+M models/lnd/clm/src/ED/biogeophys/EDSurfaceAlbedoMod.F90
+-- overhaul ED norman radiation code
+
+M models/lnd/clm/src/ED/biogeophys/EDPhotosynthesisMod.F90
+-- tweak calculation of jmax25top and tpu25top
+
+M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90
+-- clean up two comments
+
+CLM testing:
+
+ ED compsets change values.
+
+ For bug 2043. Confirmed that a gnu compile on yellowstone gets passed the intent problem.
+
+ build-namelist tests: N/A
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel - 40 OK - 45 OK
+ yellowstone_pgi - 40 OK - 45 OK
+ goldbach_nag - 40 OK - 45 OK
+ goldbach_intel - 40 OK - 45 OK
+
+CLM tag used for the baseline comparisons: clm4_5_1_r085
+
+Changes answers relative to baseline: Only for ED compsets
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r085
+Originator(s): sacks (Bill Sacks)
+Date: Fri Sep 19 10:22:30 MDT 2014
+One-line Summary: replace conditionals with polymorphism for soil water retention curve
+
+Purpose of changes:
+
+The main motivation for this tag was the need to introduce a
+soil_suction_inverse routine, which will be used for irrigation. It is important
+that soil_suction_inverse remains consistent with soil_suction for every soil
+water retention curve method. In talking with Ben Andre and Erik, we felt the
+best way to ensure this consistency was to have a separate, small module for
+each soil retention curve method. We felt the best way to implement this was via
+polymorphism. Polymorphism is arguably overkill in this simple case, but we
+thought it would be good to convert it to polymorphism partly as an example that
+we and others can follow in more complex cases where it will provide greater
+benefit.
+
+To add a new soil retention curve method:
+
+ (1) Create a module similar to
+ SoilWaterRetentionCurveClappHornberg1978Mod.F90
+
+ (2) Modify the select case statement in SoilWaterRetentionCurveFactoryMod.F90
+ so that it is able to create an instance of your new type
+
+Note that this refactor also combines the soil_suction and soil_hk
+parameterization options into a single option. Dave Lawrence and Rosie Fisher
+felt that was preferable, and Jinyun Tang was okay with this.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: andre, muszala, Jinyun Tang
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated:
+
+========= Replaced with the 3 new files noted below
+D models/lnd/clm/src/biogeophys/SoiWatRetCurveParMod.F90
+
+List all files added and what they do:
+
+========= Replacement for SoiWatRetCurveParMod, implemented using
+ polymorphism. Note that I have also added a soil_suction_inverse
+ routine, which is not yet used or tested. I'll be using (and testing)
+ this in an upcoming tag, where I refactor the irrigation code to use this.
+A models/lnd/clm/src/biogeophys/SoilWaterRetentionCurveMod.F90
+A models/lnd/clm/src/biogeophys/SoilWaterRetentionCurveFactoryMod.F90
+A models/lnd/clm/src/biogeophys/SoilWaterRetentionCurveClappHornberg1978Mod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+========= Minor changes to accommodate the refactored code
+M models/lnd/clm/src/main/clm_initializeMod.F90
+M models/lnd/clm/src/main/init_hydrology.F90
+M models/lnd/clm/src/main/clm_driver.F90
+M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/biogeophys/SoilWaterMovementMod.F90
+M models/lnd/clm/src/biogeophys/SoilMoistStressMod.F90
+M models/lnd/clm/src/biogeophys/HydrologyNoDrainageMod.F90
+
+
+CLM testing:
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu: NOT RUN
+ goldbach_nag: ok
+ goldbach_intel: ok
+
+CLM tag used for the baseline comparisons: clm4_5_1_r084
+
+Changes answers relative to baseline: NO - bfb
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r084
+Originator(s): sacks (Bill Sacks)
+Date: Thu Sep 18 14:39:44 MDT 2014
+One-line Summary: make glc_dyn_runoff_routing spatially-varying, based on input from glc
+
+Purpose of changes:
+
+Dave Lawrence, Bill Lipscomb and Jeremy Fyke have pointed out that
+glc_dyn_runoff_routing needs to be spatially-varying: Even when we're coupling
+to CISM, we should continue to use the old scheme in regions that don't have an
+active icesheet model underneath (which currently includes Antarctica and all of
+the world's smaller glaciers - i.e., everything except Greenland). Furthermore,
+we have introduced a new ability into CISM to run in diagnostic mode, without
+sending calving/runoff fluxes to the coupler. In this case, too, CLM should
+revert to using the old scheme (glc_dyn_runoff_routing = .false.).
+
+To accomplish both of these things, I have introduced a new coupling field,
+through which GLC tells CLM which areas have an icesheet that is "active" in the
+sense of sending fluxes to the coupler. In this CLM tag, CLM sets a
+spatially-varying version of glc_dyn_runoff_routing based on this new coupler
+field - replacing the old, namelist-settable version of this flag.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist:
+
+ - removed glc_dyn_runoff_routing
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ drv: drvseq5_0_15 -> drvseq5_0_17
+ - fix for some multi-instance runs
+ - add icemask_coupled_fluxes field
+
+ cism: cism1_140602 -> cism1_140914
+ - Add zero_gcm_fluxes option; send icemask_coupled_fluxes field to coupler
+
+ scripts: scripts4_140916b -> scripts4_140916c
+ - Rename CLM_UPDATE_GLC_AREAS to GLC_TWO_WAY_COUPLING
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+========= Rename CLM_UPDATE_GLC_AREAS xml variable to GLC_TWO_WAY_COUPLING.
+ Remove glc_dyn_runoff_routing namelist variable (this is now a
+ spatially-varying field, tied more tightly to CISM).
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/bld/CLMBuildNamelist.pm
+M models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+M models/lnd/clm/bld/user_nl_clm
+M models/lnd/clm/bld/env_run.xml
+
+========= Receive icemask_coupled_fluxes from CISM
+M models/lnd/clm/src/cpl/lnd_import_export.F90
+M models/lnd/clm/src/cpl/clm_cpl_indices.F90
+
+========= Set spatially-varying glc_dyn_runoff_routing field based on
+ icemask_coupled_fluxes, and use this in place of the old scalar
+ glc_dyn_runoff_routing flag
+M models/lnd/clm/src/main/glc2lndMod.F90
+M models/lnd/clm/src/main/clm_driver.F90
+M models/lnd/clm/src/main/clm_varctl.F90
+M models/lnd/clm/src/main/controlMod.F90
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/biogeophys/HydrologyDrainageMod.F90
+M models/lnd/clm/src/dyn_subgrid/dynSubgridDriverMod.F90
+
+========= Add write statement to workaround a pgi compiler problem
+M models/lnd/clm/src/main/restFileMod.F90
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok (baseline comparisons fail for clm45 & clm50, as expected)
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ yellowstone_gnu: NOT RUN
+ goldbach_nag: ok
+ goldbach_intel: ok
+
+ Note: Most testing was run on glc_runoff_routing_n06_clm4_5_1_r083. After
+ that tag, I added the following write statement in restFileMod, as a
+ workaround for a PGI compiler bug:
+
+ write(iulog,*) 'about to call aerosol_vars%restart: ', ubound(waterstate_vars%h2osoi_ice_col)
+
+ After that addition, I just reran a subset of tests: 6 yellowstone-intel
+ tests, 10 yellowstone-pgi tests (including the 2 that had failed due to the
+ compiler bug), and 5 goldbach-nag tests.
+
+NOTE: Unit test build is currently failing due to a change in r082. Stefan is
+working on a fix.
+
+CLM tag used for the baseline comparisons: clm4_5_1_r083
+
+Changes answers relative to baseline: YES
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM45 with CISM (i.e., IG)
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ (1) Large changes in runoff from glaciers in IG compsets, due to setting
+ glc_dyn_runoff_routing to .false. outside of Greenland.
+
+ (2) Roundoff-level changes in icemask for some resolutions and compilers,
+ due to changes in the cism external.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? For icemask changes: examined cprnc RMS errors. For other
+ changes, diffs are greater than roundoff.
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r083
+Originator(s): muszala (Stefan Muszala)
+Date: Wed Sep 17 09:21:31 MDT 2014
+One-line Summary: only update scripts and run new baselines. this due to an error in yellowstone pgi test naming (clm_aux45 changed to aux_clm45)
+
+Purpose of changes: Update scripts due to an error in a previous scripts tag in which I named pgi tests as clm_aux45 instead
+of aux_clm45. These were for tests moved from goldbach to yellowstone.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts4_140910 -> scripts4_140916b
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes: N/A
+
+- update test list failures
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+CLM testing:
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ WJS: note: Stef told me that he actually ran all the yellowstone tests, and
+ all were okay
+
+ yellowstone_intel - 40 OK
+ yellowstone_pgi
+
+ goldbach_nag - 40 OK, 45 OK
+ goldbach_intel - 40 OK, 45 OK
+
+Note 1: Due to the fact that in older baselines component_gen_comp was failing due to a scripts error (now fixed)
+some older baselines don't have the clm history files. As one example:
+
+ BFAIL ERI_D.f10_f10.ICRUCLM50BGC.goldbach_intel.clm-reduceOutput.compare_hist.clm4_5_1_r082
+ - rerun. compare against clm4_5_1_r081, then it passes.
+ PASS ERI_D.f10_f10.ICRUCLM50BGC.goldbach_intel.clm-reduceOutput
+ PASS ERI_D.f10_f10.ICRUCLM50BGC.goldbach_intel.clm-reduceOutput.memleak
+ PASS ERI_D.f10_f10.ICRUCLM50BGC.goldbach_intel.clm-reduceOutput.compare_hist.clm4_5_1_r082_oldPgi
+ PASS ERI_D.f10_f10.ICRUCLM50BGC.goldbach_intel.clm-reduceOutput.memcomp.clm4_5_1_r082_oldPgi
+ PASS ERI_D.f10_f10.ICRUCLM50BGC.goldbach_intel.clm-reduceOutput.tputcomp.clm4_5_1_r082_oldPgi
+ PASS ERI_D.f10_f10.ICRUCLM50BGC.goldbach_intel.clm-reduceOutput.C.140917-082253.nlcomp
+
+ For these tests, I made sure that clm2 history files were in the clm4_5_1_r083 baseline dirs. They
+ should pass going forward.
+
+ ERI_D.f10_f10.ICRUCLM50BGC.goldbach_intel.clm-reduceOutput
+ PEM.f10_f10.ICLM45BGCCROP.goldbach_intel.clm-crop
+ SSP.f19_g16.I1850CLM45BGC.yellowstone_pgi.clm-default
+
+Note 2: For these N2 tests, there was a scripts problem with st_archiver in the previous tag. These should pass next time.
+ BFAIL ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.compare_hist.clm4_5_1_r082
+ BFAIL ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.clm-default.compare_hist.clm4_5_1_r082
+ BFAIL ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.clm-default.clm2.h0.compare_hist.clm4_5_1_r082
+ BFAIL ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.clm-default.clm2.h1.compare_hist.clm4_5_1_r082
+
+CLM tag used for the baseline comparisons: clm4_5_1_r082
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r082
+Originator(s): muszala (Stefan Muszala)
+Date: Thu Sep 11 14:07:58 MDT 2014
+One-line Summary: Merge in a number of ED changes to address science bugs and infrastructure (particularly restarts)
+
+Purpose of changes: Merge in ED changes. Most of these have to do with science changes from rfisher. There
+is also a refactor and added functionality for ED restarts.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): Bug 2041, 2042
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, rfisher
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ scripts4_140814a -> scripts4_140910
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+-- add failing N2 tests (due to scripts, see below).
+
+M models/lnd/clm/bld/build-namelist
+-- documentation at top of file should reference CLMBuildNamelist.pm
+
+M models/lnd/clm/src/main/clm_driver.F90
+-- add call to SurfaceAlbedo for use_ed logical branch, add EDBioVars as
+-- argument to temperature_vars%UpdateAccVars and add canopystate_vars as
+-- argument to BalanceCheck (for ED)
+M models/lnd/clm/src/main/decompMod.F90
+-- added openMP output...remove mods after getting ED working with openMP
+
+M models/lnd/clm/src/ED/biogeochem/EDCanopyStructureMod.F90
+-- change handling of CWD_AG and CWD_BG
+M models/lnd/clm/src/ED/biogeochem/EDGrowthFunctionsMod.F90
+-- added tree_sai function
+M models/lnd/clm/src/ED/biogeochem/EDCohortDynamicsMod.F90
+-- signifcant reworking of entire module
+M models/lnd/clm/src/ED/biogeochem/EDPhysiologyMod.F90
+-- minor code clean up
+M models/lnd/clm/src/ED/biogeochem/EDPatchDynamicsMod.F90
+-- change an error check to: if (( areatot - area ) > 0._r8 ) then
+
+M models/lnd/clm/src/ED/main/EDBioType.F90
+-- add infrastrucutre (define, allocate, etc...)
+M models/lnd/clm/src/ED/main/EDMainMod.F90
+-- major update for updating canopy biomass pools
+M models/lnd/clm/src/ED/main/EDCLMLinkMod.F90
+-- modify calls for history file output and error checking
+M models/lnd/clm/src/ED/main/EDRestVectorMod.F90
+-- add resp_clm as restart variable and use SHR_ASSERT instead of call assert. major refactor
+-- of createPatchCohortStructure to handle arbitrary number of cohorts and patches
+M models/lnd/clm/src/ED/main/EDInitMod.F90
+-- add logical to deal with different values of assignemnt dc%laimemory
+M models/lnd/clm/src/ED/main/EDTypesMod.F90
+-- add cohort_type and change paramters: numCohortsPerPatch, cohorts_per_gcell and fire_threshold
+
+M models/lnd/clm/src/ED/fire/SFMainMod.F90
+-- clean up write statemnts and a bug fix: change tau_b(dg_sf) -> tau_b(c)
+
+M models/lnd/clm/src/ED/biogeophys/EDPhotosynthesisMod.F90
+-- change tpu25top(FT) = 0.06_r8 * jmax25top(FT) to tpu25top(FT) = 0.167_r8 * jmax25top(FT), some cleanup
+
+M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90
+-- clean up a use statement
+M models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90
+-- remove whitespaces after a statement
+M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90
+-- add a use_ed block to prevent some unassigned pointer errors
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90
+-- add canopystate_vars as argument for elai and esai for more verbose error
+-- reporting added by rfisher
+M models/lnd/clm/src/biogeophys/TemperatureType.F90
+-- add arg. for UpdateAccVars for new calculations, split out use_ed and use_crop
+M models/lnd/clm/src/biogeophys/SurfaceRadiationMod.F90
+-- add associate statement to tlai for error reporting
+
+M UpDateChangeLog.pl
+-- fix tiny typo
+
+M SVN_EXTERNAL_DIRECTORIES
+-- for scripts update
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone - N/A, no namelist changes made in this tag
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel - 45 OK , 40 OK
+
+ Changes due to update of scripts from scripts4_140814a -> scripts4_140910 (this change came with scripts4_140828)
+ FAIL namelist compare: user_nl_clm differs
+ These should pass next time.
+
+ NEW: flanduse_timeseries = '$DIN_LOC_ROOT/lnd/clm2/surfdata_map/surfdata.pftdyn_1x1_tropicAtl_TEST_simyr1939-1943_c140108.nc'
+ BASELINE: flanduse_timeseries = '/glade/p/cesmdata/cseg/inputdata/lnd/clm2/surfdata_map/surfdata.pftdyn_1x1_tropicAtl_TEST_simyr1939-1943_c140108.nc'
+
+ FAIL SMS_Ly3.1x1_tropicAtl.I20TRCLM45BGC.yellowstone_intel.clm-tropicAtl_subsetLate.GC.newPgi_45_intel.nlcomp
+ FAIL SMS_Ly5.1x1_tropicAtl.I20TRCLM45BGC.yellowstone_intel.clm-tropicAtl_subsetMid.GC.newPgi_45_intel.nlcomp
+ FAIL SMS_Ly8.1x1_tropicAtl.I20TRCLM45BGC.yellowstone_intel.clm-tropicAtl_subsetEarly.GC.newPgi_45_intel.nlcomp
+
+ These fail due to a bug in scripts4_140905c. Alice is aware of this and will provide a fix for a future CLM tag (but 2041):
+ FAIL ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel
+ FAIL ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.clm-default
+
+ Failure that looks like a hardware problem and that Erik is looking into (bug 2042):
+ RUN ERI_D.ne30_g16.ICLM45BGC.yellowstone_intel.GC.newPgi_45_intel
+
+ yellowstone_pgi - 45 OK , 40 OK
+
+ New PGI tests brought over from goldbach. These are expected as there are no baselines for this on yellowstone
+
+ BFAIL ERI_D.f10_f10.ICLM45BGC.yellowstone_pgi.clm-reduceOutput.compare_hist.clm4_5_1_r081
+ BFAIL ERI_D.f10_f10.ICRUCLM50BGC.yellowstone_pgi.clm-reduceOutput.compare_hist.clm4_5_1_r081
+ BFAIL ERI_D.f19_g16.ICLM45BGC.yellowstone_pgi.clm-reduceOutput.compare_hist.clm4_5_1_r081
+ BFAIL ERI_D.f19_g16.ICLM45.yellowstone_pgi.clm-reduceOutput.compare_hist.clm4_5_1_r081
+ BFAIL ERI_D.f19_g16.ICRUCLM50BGC.yellowstone_pgi.clm-reduceOutput.compare_hist.clm4_5_1_r081
+ BFAIL ERI.f10_f10.ICLM45BGC.yellowstone_pgi.clm-reduceOutput.compare_hist.clm4_5_1_r081
+ BFAIL ERI.f10_f10.ICRUCLM50BGC.yellowstone_pgi.clm-reduceOutput.compare_hist.clm4_5_1_r081
+ BFAIL ERI.f19_g16.ICLM45BGC.yellowstone_pgi.clm-reduceOutput.compare_hist.clm4_5_1_r081
+ BFAIL ERI.f19_g16.ICRUCLM50BGC.yellowstone_pgi.clm-reduceOutput.compare_hist.clm4_5_1_r081
+ BFAIL ERS_D.f10_f10.I20TRCLM45BGC.yellowstone_pgi.clm-decStart.clm2.h0.compare_hist.clm4_5_1_r081
+ BFAIL ERS_D.f10_f10.I20TRCLM45BGC.yellowstone_pgi.clm-decStart.clm2.h1.compare_hist.clm4_5_1_r081
+ BFAIL ERS_D.f10_f10.I20TRCLM45BGC.yellowstone_pgi.clm-decStart.compare_hist.clm4_5_1_r081
+ BFAIL ERS_D.f10_f10.ICLM45BGC.yellowstone_pgi.clm-ciso.clm2.h0.compare_hist.clm4_5_1_r081
+ BFAIL ERS_D.f10_f10.ICLM45BGC.yellowstone_pgi.clm-ciso.clm2.h1.compare_hist.clm4_5_1_r081
+ BFAIL ERS_D.f10_f10.ICLM45BGC.yellowstone_pgi.clm-ciso.compare_hist.clm4_5_1_r081
+ BFAIL ERS.f10_f10.I1850CLM45BGC.yellowstone_pgi.clm-default.clm2.h0.compare_hist.clm4_5_1_r081
+ BFAIL ERS.f10_f10.I1850CLM45BGC.yellowstone_pgi.clm-default.clm2.h1.compare_hist.clm4_5_1_r081
+ BFAIL ERS.f10_f10.I1850CLM45BGC.yellowstone_pgi.clm-default.compare_hist.clm4_5_1_r081
+ BFAIL ERS_Lm3.1x1_smallvilleIA.ICLM45BGCCROP.yellowstone_pgi.clm2.h0.compare_hist.clm4_5_1_r081
+ BFAIL ERS_Lm3.1x1_smallvilleIA.ICLM45BGCCROP.yellowstone_pgi.compare_hist.clm4_5_1_r081
+ BFAIL SMS_D.1x1_vancouverCAN.ICLM45.yellowstone_pgi.clm-default.clm2.h0.compare_hist.clm4_5_1_r081
+ BFAIL SMS_D.1x1_vancouverCAN.ICLM45.yellowstone_pgi.clm-default.clm2.h1.compare_hist.clm4_5_1_r081
+ BFAIL SMS_D.1x1_vancouverCAN.ICLM45.yellowstone_pgi.clm-default.compare_hist.clm4_5_1_r081
+ BFAIL SMS.f45_f45.ICLM45.yellowstone_pgi.clm-ptsRLB.clm2.h0.compare_hist.clm4_5_1_r081
+ BFAIL SMS.f45_f45.ICLM45.yellowstone_pgi.clm-ptsRLB.clm2.h1.compare_hist.clm4_5_1_r081
+ BFAIL SMS.f45_f45.ICLM45.yellowstone_pgi.clm-ptsRLB.compare_hist.clm4_5_1_r081
+ BFAIL SMS.f45_f45.ICLM45.yellowstone_pgi.clm-ptsROA.compare_hist.clm4_5_1_r081
+ BFAIL SMS_Ld5.f19_g16.IRCP45CLM45BGC.yellowstone_pgi.clm-decStart.clm2.h0.compare_hist.clm4_5_1_r081
+ BFAIL SMS_Ld5.f19_g16.IRCP45CLM45BGC.yellowstone_pgi.clm-decStart.clm2.h1.compare_hist.clm4_5_1_r081
+ BFAIL SMS_Ld5.f19_g16.IRCP45CLM45BGC.yellowstone_pgi.clm-decStart.compare_hist.clm4_5_1_r081
+
+ goldbach_nag - 45 OK , 40 OK
+
+ Baseline missing from previous tag:
+ ERI_D.f19_g16.ICLM45BGC.goldbach_nag.clm-reduceOutput-
+ file /fs/cgd/csm/ccsm_baselines/clm4_5_1_r081/ERI_D.f19_g16.ICLM45BGC.goldbach_nag.clm-reduceOutput/cpl.hi.nc does not exis
+ SMS.f09_g16.ICRUCLM45.goldbach_nag.clm-af_bias_v5
+ file /fs/cgd/csm/ccsm_baselines/clm4_5_1_r081/SMS.f09_g16.ICRUCLM45.goldbach_nag.clm-af_bias_v5/cpl.hi.nc does not exist
+ SMS_Ly1.f19_g16.ICLM45BGCCROP.goldbach_nag.clm-reduceOutput
+ file /fs/cgd/csm/ccsm_baselines/clm4_5_1_r081/SMS_Ly1.f19_g16.ICLM45BGCCROP.goldbach_nag.clm-reduceOutput/cpl.hi.nc does no
+ SMS_Ly1.f19_g16.ICLM45BGCCROP.goldbach_nag.clm-reduceOutput
+ file /fs/cgd/csm/ccsm_baselines/clm4_5_1_r081/SMS_Ly1.f19_g16.ICLM45BGCCROP.goldbach_nag.clm-reduceOutput/cpl.hi.nc does no t exist
+
+ goldbach_intel - 45 OK , 40 OK
+
+ goldbach_pgi (These have been moved to yellowstone due to PGI 14.1 throwing and ICE on goldbach)
+
+CLM tag used for the baseline comparisons: clm4_5_1_r081
+
+Changes answers relative to baseline: Only for ED compsets
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r081
+Originator(s): mvertens (Mariana Vertenstein)
+Date: Sun Aug 24 19:39:50 MDT 2014
+One-line Summary: major infrastructure changes and directory reorganization under src
+
+Purpose of changes:
+
+ Overview of previous code design
+ ==========================================
+ - data structures arranged by subgrid type (pps, cps, lps, grc)
+ - all functional categorization lumped in that one subgrid type which led to
+ - centralization rather than modularization of all data
+ - definition and instantiation in ONE big module (clmtype.F90)
+ - allocation and initialization in ONE big module (clmtypeInitMod.F90)
+ - history variables all in ONE big module (histFldsMod.F90)
+ - restart variables in effectively TWO big modules (biogeophysicsRestMod.F90 and CNRestMod.F90)
+ - time constant initialization in ONE complex module (initTimeConst.F90)
+ - time varying cold start initialization in ONE module (initColdMod.F90)
+ - accumulation variables in ONE module (accumulMod.F90)
+
+ Overview of new code design
+ ==========================================
+ data structures arranged by scientific functional categories
+ (e.g. temperature_type, waterstate_type, energyflux_type, etc)
+ - a given data structure now contains ALL subgrid levels are in the data structure -
+ and variables in the data structure are now appended with a unique suffix to
+ indicate their subgrid levels (new suffixes: _patch, _col, _lun, _grc)
+ - this does NOT effect the science code base, ONLY the associate statements
+ - there are separate module for each data type definition
+ (e.g. TemperatureType.F90, WaterstateType.F90, EnergyFluxType.F90, etc)
+ - each data type has associated methods for
+ - Allocation:
+ variables now initialized as NaNs upon allocation
+ - Cold Start Initialization:
+ cold start initialization of variables is now ALWAYS done
+ and overwritten if finidat is read in as spun up dataset (also now
+ have on line interpolation of initial conditions as part of this refactor as well)
+ - History initialization of variables
+ All history fields now initialized as spval
+ - Restart initialization of variables
+ - Accumulation Initialization
+ initialization and accumulation update of variables
+ - Instantiation of datatypes is now separate from their declaration
+ (for now in clm_initialize.F90 - will be moved in the future)
+
+ Centralized routines that no longer exist:
+ ==========================================
+ Data types : clmtype.F90, clmtypeInitMod.F90
+ Initialization : initTimeConst.F90, initCold.F90
+ History : histFldsMod.F90
+ Accumulation : accumulMod.F90
+ Restart : biogeophysRestMod.F90, CNRestMod.F90
+ Biogeochemistry: CNSetValue.F90
+
+ New Type modules that now replace clmtype.F90
+ ==========================================
+ main/atm2lndType.F90
+ main/lnd2atmType.F90
+ main/ColumnType.F90
+ main/EcophysConType.F90
+ main/GridcellType.F90
+ main/LandunitType.F90
+ main/PatchType.F90
+
+ biogeochem/CNCarbonFluxType.F90
+ biogeochem/CNCarbonStateType.F90
+ biogeochem/CNDecompCascadeConType.F90
+ biogeochem/CNDVType.F90
+ biogeochem/CNNitrogenFluxType.F90
+ biogeochem/CNNitrogenStateType.F90
+ biogeochem/CNStateType.F90
+ biogeochem/CropType.F90
+
+ biogeophys/AerosolType.F90
+ biogeophys/CanopyStateType.F90
+ biogeophys/EnergyFluxType.F90
+ biogeophys/FrictionVelocityType.F90
+ biogeophys/LakeStateType.F90
+ biogeophys/PhotosynthesisType.F90
+ biogeophys/SoilHydrologyType.F90
+ biogeophys/SoilStateType.F90
+ biogeophys/SolarAbsorbedType.F90
+ biogeophys/SurfaceAlbedoType.F90
+ biogeophys/TemperatureType.F90
+ biogeophys/UrbanParamsType.F90
+ biogeophys/WaterfluxType.F90
+ biogeophys/WaterStateType.F90
+
+ ED/main/EDBioType.F90
+ ED/main/EDEcophysConType.F90
+ ED/main/EDVecCohortType.F90
+ ED/main/EDVecPatchType.F90
+
+
+ Instantiation of Types
+
+2) Public Types:
+
+ - the following are public types that can BE PASSED AS ARGUMENTS
+ - the type instances FOR NOW are clm_initialized and then used by the driver
+ - this will be generalized in the future
+
+ type(ch4_type) :: ch4_vars
+ type(carbonstate_type) :: carbonstate_vars
+ type(carbonstate_type) :: c13_carbonstate_vars
+ type(carbonstate_type) :: c14_carbonstate_vars
+ type(carbonflux_type) :: carbonflux_vars
+ type(carbonflux_type) :: c13_carbonflux_vars
+ type(carbonflux_type) :: c14_carbonflux_vars
+ type(nitrogenstate_type) :: nitrogenstate_vars
+ type(nitrogenflux_type) :: nitrogenflux_vars
+ type(dgvs_type) :: dgvs_vars
+ type(crop_type) :: crop_vars
+ type(cnstate_type) :: cnstate_vars
+ type(dust_type) :: dust_vars
+ type(vocemis_type) :: vocemis_vars
+ type(drydepvel_type) :: drydepvel_vars
+ type(aerosol_type) :: aerosol_vars
+ type(canopystate_type) :: canopystate_vars
+ type(energyflux_type) :: energyflux_vars
+ type(frictionvel_type) :: frictionvel_vars
+ type(lakestate_type) :: lakestate_vars
+ type(photosyns_type) :: photosyns_vars
+ type(soilstate_type) :: soilstate_vars
+ type(soilhydrology_type) :: soilhydrology_vars
+ type(solarabs_type) :: solarabs_vars
+ type(surfalb_type) :: surfalb_vars
+ type(surfrad_type) :: surfrad_vars
+ type(temperature_type) :: temperature_vars
+ type(urbanparams_type) :: urbanparams_vars
+ type(waterflux_type) :: waterflux_vars
+ type(waterstate_type) :: waterstate_vars
+ type(atm2lnd_type) :: atm2lnd_vars
+ type(glc2lnd_type) :: glc2lnd_vars
+ type(lnd2atm_type) :: lnd2atm_vars
+ type(lnd2glc_type) :: lnd2glc_vars
+ type(glc_diagnostics_type) :: glc_diagnostics_vars
+ type(EDbio_type) :: EDbio_vars
+
+ - private Types (now som modules have their own PRIVATE types)
+
+ DUSTMod.F90 : type(dust_type)
+ VOCEmissionMod.F90: type(vocemis_type)
+ ch4Mod.F90 : type(ch4_type)
+
+ API Changes:
+ ==========================================
+ Original APIs:
+ clmtype was in effect a global common block and all routines had use statements into it
+ difficult to track any intent or flow through system
+ difficult to set up functional unit testing (.e.g. CanopyFluxesMod.F90, etc)
+
+ Refactorized APIs:
+ all new datatype instances are passed as arguments
+ science code is effectively the same since only the associate statements have been modified
+
+ New Directory Structure under clm/
+ ==========================================
+ bld/
+ doc/
+ src/biogeochem/
+ src/biogeophys/
+ src/cpl/
+ src/dyn_subgrid/
+ src/ED/
+ src/ED/biogeochem
+ src/ED/biogeophys
+ src/ED/fire
+ src/ED/main
+ src/main/
+ src/unit_test_mocks/
+ src/unit_test_shr/
+ src/utils/
+ src_clm4_0/
+ test/
+ tools/
+
+ Advantages of refactorization:
+ ==========================================
+ - Lets compiler enforce intent attributes
+ - Makes functional unit testing easier since module drivers can be
+ constructed with relevant mock data more easily
+ - Makes more sense scientifically since now easier to extend code logic as
+ to where you want to introduce new variables
+ - Easier to maintain code since code flow is easier to follow and to modify
+ - Easy to move variables around from one data type to another since now
+ know everything that is logically connected to that variable that
+ needs to be moved Offers new modularity for trading in and out new
+ formulations of targeted functionality
+
+Requirements for tag: N.A.
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N.A.
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+ directory restructuring has been reflected in changes to configure in setting up the Filepath
+
+Describe any changes made to the namelist:
+ clm_hydrology1_inparm changed to clm_canopyhydrology_inparm
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: myself, Bill Sacks
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated:
+
+D clm/src/clm4_5
+D clm/src/clm4_5/biogeochem
+D clm/src/clm4_5/biogeochem/CNCStateUpdate2Mod.F90
+D clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90
+D clm/src/clm4_5/biogeochem/CNRestMod.F90
+D clm/src/clm4_5/biogeochem/CropRestMod.F90
+D clm/src/clm4_5/biogeochem/CNGRespMod.F90
+D clm/src/clm4_5/biogeochem/CNNitrifDenitrifMod.F90
+D clm/src/clm4_5/biogeochem/CNNStateUpdate1Mod.F90
+D clm/src/clm4_5/biogeochem/CNBalanceCheckMod.F90
+D clm/src/clm4_5/biogeochem/CNNStateUpdate3Mod.F90
+D clm/src/clm4_5/biogeochem/CNFireMod.F90
+D clm/src/clm4_5/biogeochem/CNMRespMod.F90
+D clm/src/clm4_5/biogeochem/MEGANFactorsMod.F90
+D clm/src/clm4_5/biogeochem/SatellitePhenologyMod.F90
+D clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+D clm/src/clm4_5/biogeochem/ch4RestMod.F90
+D clm/src/clm4_5/biogeochem/CNPrecisionControlMod.F90
+D clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+D clm/src/clm4_5/biogeochem/CNWoodProductsMod.F90
+D clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+D clm/src/clm4_5/biogeochem/ch4Mod.F90
+D clm/src/clm4_5/biogeochem/DUSTMod.F90
+D clm/src/clm4_5/biogeochem/CNDVLightMod.F90
+D clm/src/clm4_5/biogeochem/CNInitMod.F90
+D clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+D clm/src/clm4_5/biogeochem/ch4varcon.F90
+D clm/src/clm4_5/biogeochem/CNCStateUpdate1Mod.F90
+D clm/src/clm4_5/biogeochem/CNDecompMod.F90
+D clm/src/clm4_5/biogeochem/CNCStateUpdate3Mod.F90
+D clm/src/clm4_5/biogeochem/CNDVMod.F90
+D clm/src/clm4_5/biogeochem/ED
+D clm/src/clm4_5/biogeochem/ED/EDCanopyStructureMod.F90
+D clm/src/clm4_5/biogeochem/ED/EDSetValuesMod.F90
+D clm/src/clm4_5/biogeochem/ED/EDGrowthFunctionsMod.F90
+D clm/src/clm4_5/biogeochem/ED/EDPhysiologyMod.F90
+D clm/src/clm4_5/biogeochem/ED/EDCohortDynamicsMod.F90
+D clm/src/clm4_5/biogeochem/ED/EDPatchDynamicsMod.F90
+D clm/src/clm4_5/biogeochem/CNSharedParamsMod.F90
+D clm/src/clm4_5/biogeochem/VOCEmissionMod.F90
+D clm/src/clm4_5/biogeochem/ch4InitMod.F90
+D clm/src/clm4_5/biogeochem/CNAnnualUpdateMod.F90
+D clm/src/clm4_5/biogeochem/CNNStateUpdate2Mod.F90
+D clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+D clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+D clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+D clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+D clm/src/clm4_5/biogeochem/CNDecompCascadeBGCMod.F90
+D clm/src/clm4_5/biogeochem/CNSetValueMod.F90
+D clm/src/clm4_5/biogeochem/CNVegStructUpdateMod.F90
+D clm/src/clm4_5/biogeochem/CNDecompCascadeCNMod.F90
+D clm/src/clm4_5/biogeochem/CNDVEstablishmentMod.F90
+D clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+D clm/src/clm4_5/biogeochem/CNDVInitMod.F90
+D clm/src/clm4_5/biogeochem/CNC14DecayMod.F90
+
+D clm/src/clm4_5/main
+D clm/src/clm4_5/main/clm_varcon.F90
+D clm/src/clm4_5/main/initInterp.F90
+D clm/src/clm4_5/main/clm_varpar.F90
+D clm/src/clm4_5/main/landunit_varcon.F90
+D clm/src/clm4_5/main/initTimeConstMod.F90
+D clm/src/clm4_5/main/subgridWeightsMod.F90
+D clm/src/clm4_5/main/decompInitMod.F90
+D clm/src/clm4_5/main/clm_initializeMod.F90
+D clm/src/clm4_5/main/subgridRestMod.F90
+D clm/src/clm4_5/main/clm_glclnd.F90
+D clm/src/clm4_5/main/paramUtilMod.F90
+D clm/src/clm4_5/main/accFldsMod.F90
+D clm/src/clm4_5/main/subgridMod.F90
+D clm/src/clm4_5/main/clmtypeInitMod.F90
+D clm/src/clm4_5/main/ndepStreamMod.F90
+D clm/src/clm4_5/main/init_hydrology.F90
+D clm/src/clm4_5/main/initColdMod.F90
+D clm/src/clm4_5/main/column_varcon.F90
+D clm/src/clm4_5/main/histFileMod.F90
+D clm/src/clm4_5/main/pft2colMod.F90
+D clm/src/clm4_5/main/clm_atmlnd.F90
+D clm/src/clm4_5/main/findHistFields.pl
+D clm/src/clm4_5/main/clm_varsur.F90
+D clm/src/clm4_5/main/restFileMod.F90
+D clm/src/clm4_5/main/CMakeLists.txt
+D clm/src/clm4_5/main/controlMod.F90
+D clm/src/clm4_5/main/spitfireSF
+D clm/src/clm4_5/main/spitfireSF/SFParamsMod.F90
+D clm/src/clm4_5/main/spitfireSF/SFMainMod.F90
+D clm/src/clm4_5/main/test
+D clm/src/clm4_5/main/test/subgridWeights_test
+D clm/src/clm4_5/main/test/subgridWeights_test/test_subgridWeights.pf
+D clm/src/clm4_5/main/test/subgridWeights_test/CMakeLists.txt
+D clm/src/clm4_5/main/test/clm_glclnd_test
+D clm/src/clm4_5/main/test/clm_glclnd_test/test_clm_glclnd.pf
+D clm/src/clm4_5/main/test/clm_glclnd_test/CMakeLists.txt
+D clm/src/clm4_5/main/test/CMakeLists.txt
+D clm/src/clm4_5/main/initSubgridMod.F90
+D clm/src/clm4_5/main/filterMod.F90
+D clm/src/clm4_5/main/clm_varctl.F90
+D clm/src/clm4_5/main/clm_driver.F90
+D clm/src/clm4_5/main/surfrdUtilsMod.F90
+D clm/src/clm4_5/main/ED
+D clm/src/clm4_5/main/ED/EDInitTimeConst.F90
+D clm/src/clm4_5/main/ED/EDCLMLinkMod.F90
+D clm/src/clm4_5/main/ED/EDClmType.F90
+D clm/src/clm4_5/main/ED/EDRestVectorMod.F90
+D clm/src/clm4_5/main/ED/EDHistFldsMod.F90
+D clm/src/clm4_5/main/ED/EDClmTypeInitMod.F90
+D clm/src/clm4_5/main/ED/EDPftvarcon.F90
+D clm/src/clm4_5/main/ED/EDParamsMod.F90
+D clm/src/clm4_5/main/ED/EDInitMod.F90
+D clm/src/clm4_5/main/ED/EDTypesMod.F90
+D clm/src/clm4_5/main/ED/EDMainMod.F90
+D clm/src/clm4_5/main/subgridAveMod.F90
+D clm/src/clm4_5/main/initGridCellsMod.F90
+D clm/src/clm4_5/main/initSoilParVICMod.F90
+D clm/src/clm4_5/main/pftvarcon.F90
+D clm/src/clm4_5/main/surfrdMod.F90
+D clm/src/clm4_5/main/decompMod.F90
+D clm/src/clm4_5/main/FuncPedotransferMod.F90
+D clm/src/clm4_5/main/clmtype.F90
+D clm/src/clm4_5/main/reweightMod.F90
+D clm/src/clm4_5/main/readParamsMod.F90
+D clm/src/clm4_5/main/histFldsMod.F90
+
+D clm/src/clm4_5/biogeophys
+D clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+D clm/src/clm4_5/biogeophys/SLakeInitMod.F90
+D clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+D clm/src/clm4_5/biogeophys/SLakeFluxesMod.F90
+D clm/src/clm4_5/biogeophys/UrbanInputMod.F90
+D clm/src/clm4_5/biogeophys/SnowHydrologyMod.F90
+D clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+D clm/src/clm4_5/biogeophys/FrictionVelocityMod.F90
+D clm/src/clm4_5/biogeophys/TridiagonalMod.F90
+D clm/src/clm4_5/biogeophys/SLakeHydrologyMod.F90
+D clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+D clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+D clm/src/clm4_5/biogeophys/UrbanMod.F90
+D clm/src/clm4_5/biogeophys/QSatMod.F90
+D clm/src/clm4_5/biogeophys/ActiveLayerMod.F90
+D clm/src/clm4_5/biogeophys/SurfaceResistanceMod.F90
+D clm/src/clm4_5/biogeophys/clm_driverInitMod.F90
+D clm/src/clm4_5/biogeophys/CLMVICMapMod.F90
+D clm/src/clm4_5/biogeophys/SLakeTemperatureMod.F90
+D clm/src/clm4_5/biogeophys/SNICARMod.F90
+D clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90
+D clm/src/clm4_5/biogeophys/CMakeLists.txt
+D clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+D clm/src/clm4_5/biogeophys/RootBiophysMod.F90
+D clm/src/clm4_5/biogeophys/test
+D clm/src/clm4_5/biogeophys/test/CMakeLists.txt
+D clm/src/clm4_5/biogeophys/test/Daylength_test
+D clm/src/clm4_5/biogeophys/test/Daylength_test/test_daylength.pf
+D clm/src/clm4_5/biogeophys/test/Daylength_test/CMakeLists.txt
+D clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+D clm/src/clm4_5/biogeophys/SoilWaterMovementMod.F90
+D clm/src/clm4_5/biogeophys/H2OSfcMod.F90
+D clm/src/clm4_5/biogeophys/SoilMoistStressMod.F90
+D clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+D clm/src/clm4_5/biogeophys/SoiWatRetCurveParMod.F90
+D clm/src/clm4_5/biogeophys/ED
+D clm/src/clm4_5/biogeophys/ED/EDAccumulateFluxesMod.F90
+D clm/src/clm4_5/biogeophys/ED/EDSurfaceAlbedoMod.F90
+D clm/src/clm4_5/biogeophys/ED/EDPhotosynthesisMod.F90
+D clm/src/clm4_5/biogeophys/ED/EDBtranMod.F90
+D clm/src/clm4_5/biogeophys/FracWetMod.F90
+D clm/src/clm4_5/biogeophys/UrbanInitMod.F90
+D clm/src/clm4_5/biogeophys/SLakeRestMod.F90
+D clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+D clm/src/clm4_5/biogeophys/SLakeCon.F90
+D clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+D clm/src/clm4_5/biogeophys/BandDiagonalMod.F90
+D clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+D clm/src/clm4_5/biogeophys/DaylengthMod.F90
+
+D clm/src/clm4_5/dyn_subgrid
+D clm/src/clm4_5/dyn_subgrid/test
+D clm/src/clm4_5/dyn_subgrid/test/dynLandunitArea_test
+D clm/src/clm4_5/dyn_subgrid/test/dynLandunitArea_test/test_update_landunit_weights_one_gcell.pf
+D clm/src/clm4_5/dyn_subgrid/test/dynLandunitArea_test/test_update_landunit_weights.pf
+D clm/src/clm4_5/dyn_subgrid/test/dynLandunitArea_test/CMakeLists.txt
+D clm/src/clm4_5/dyn_subgrid/test/dynVar_test
+D clm/src/clm4_5/dyn_subgrid/test/dynVar_test/test_dynVarShared.F90
+D clm/src/clm4_5/dyn_subgrid/test/dynVar_test/test_dynVarTimeInterp.pf
+D clm/src/clm4_5/dyn_subgrid/test/dynVar_test/test_dynVarTimeUninterp.pf
+D clm/src/clm4_5/dyn_subgrid/test/dynVar_test/CMakeLists.txt
+D clm/src/clm4_5/dyn_subgrid/test/dynTimeInfo_test
+D clm/src/clm4_5/dyn_subgrid/test/dynTimeInfo_test/test_dynTimeInfo.pf
+D clm/src/clm4_5/dyn_subgrid/test/dynTimeInfo_test/CMakeLists.txt
+D clm/src/clm4_5/dyn_subgrid/test/CMakeLists.txt
+D clm/src/clm4_5/dyn_subgrid/test/dynInitColumns_test
+D clm/src/clm4_5/dyn_subgrid/test/dynInitColumns_test/test_init_columns.pf
+D clm/src/clm4_5/dyn_subgrid/test/dynInitColumns_test/CMakeLists.txt
+D clm/src/clm4_5/dyn_subgrid/dynLandunitAreaMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynTimeInfoMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynSubgridDriverMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynHarvestMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynFileMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynConsBiogeochemMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynPriorWeightsMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynVarMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynEDMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynVarTimeInterpMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynpftFileMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynCNDVMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynConsBiogeophysMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynVarTimeUninterpMod.F90
+D clm/src/clm4_5/dyn_subgrid/dynVarMod.F90.in
+D clm/src/clm4_5/dyn_subgrid/dynVarTimeInterpMod.F90.in
+D clm/src/clm4_5/dyn_subgrid/dynVarTimeUninterpMod.F90.in
+D clm/src/clm4_5/dyn_subgrid/do_genf90
+D clm/src/clm4_5/dyn_subgrid/dynInitColumnsMod.F90
+D clm/src/clm4_5/dyn_subgrid/CMakeLists.txt
+
+D clm/src/util_share
+D clm/src/util_share/organicFileMod.F90
+D clm/src/util_share/spmdGathScatMod.F90
+D clm/src/util_share/clm_time_manager.F90
+D clm/src/util_share/clm_nlUtilsMod.F90
+D clm/src/util_share/clm_varorb.F90
+D clm/src/util_share/abortutils.F90
+D clm/src/util_share/accumulMod.F90
+D clm/src/util_share/getdatetime.F90
+D clm/src/util_share/fileutils.F90
+D clm/src/util_share/dtypes.h
+D clm/src/util_share/ncdio_pio.F90
+D clm/src/util_share/SimpleMathMod.F90
+D clm/src/util_share/spmdMod.F90
+D clm/src/util_share/domainMod.F90
+D clm/src/util_share/ncdio_pio.F90.in
+D clm/src/util_share/restUtilMod.F90
+D clm/src/util_share/quadraticMod.F90
+D clm/src/util_share/restUtilMod.F90.in
+D clm/src/util_share/CMakeLists.txt
+D clm/src/util_share/GetGlobalValuesMod.F90
+
+D clm/src/clm4_0
+D clm/src/clm4_0/biogeochem
+D clm/src/clm4_0/biogeochem/CNCStateUpdate2Mod.F90
+D clm/src/clm4_0/biogeochem/CNC13StateUpdate2Mod.F90
+D clm/src/clm4_0/biogeochem/CNGapMortalityMod.F90
+D clm/src/clm4_0/biogeochem/CropRestMod.F90
+D clm/src/clm4_0/biogeochem/CNGRespMod.F90
+D clm/src/clm4_0/biogeochem/CNNStateUpdate1Mod.F90
+D clm/src/clm4_0/biogeochem/CNBalanceCheckMod.F90
+D clm/src/clm4_0/biogeochem/CNNStateUpdate3Mod.F90
+D clm/src/clm4_0/biogeochem/CNFireMod.F90
+D clm/src/clm4_0/biogeochem/CNMRespMod.F90
+D clm/src/clm4_0/biogeochem/MEGANFactorsMod.F90
+D clm/src/clm4_0/biogeochem/CNPrecisionControlMod.F90
+D clm/src/clm4_0/biogeochem/CNWoodProductsMod.F90
+D clm/src/clm4_0/biogeochem/CNSummaryMod.F90
+D clm/src/clm4_0/biogeochem/DUSTMod.F90
+D clm/src/clm4_0/biogeochem/CNDVLightMod.F90
+D clm/src/clm4_0/biogeochem/CNPhenologyMod.F90
+D clm/src/clm4_0/biogeochem/CNCStateUpdate1Mod.F90
+D clm/src/clm4_0/biogeochem/CNDecompMod.F90
+D clm/src/clm4_0/biogeochem/STATICEcosysDynMod.F90
+D clm/src/clm4_0/biogeochem/CNCStateUpdate3Mod.F90
+D clm/src/clm4_0/biogeochem/CNDVMod.F90
+D clm/src/clm4_0/biogeochem/CNC13StateUpdate1Mod.F90
+D clm/src/clm4_0/biogeochem/CNrestMod.F90
+D clm/src/clm4_0/biogeochem/CNC13StateUpdate3Mod.F90
+D clm/src/clm4_0/biogeochem/VOCEmissionMod.F90
+D clm/src/clm4_0/biogeochem/CNDVEcosystemDynIniMod.F90
+D clm/src/clm4_0/biogeochem/CNAnnualUpdateMod.F90
+D clm/src/clm4_0/biogeochem/CNNStateUpdate2Mod.F90
+D clm/src/clm4_0/biogeochem/C13SummaryMod.F90
+D clm/src/clm4_0/biogeochem/DryDepVelocity.F90
+D clm/src/clm4_0/biogeochem/CNC13FluxMod.F90
+D clm/src/clm4_0/biogeochem/CNAllocationMod.F90
+D clm/src/clm4_0/biogeochem/CNNDynamicsMod.F90
+D clm/src/clm4_0/biogeochem/CNEcosystemDynMod.F90
+D clm/src/clm4_0/biogeochem/CNSetValueMod.F90
+D clm/src/clm4_0/biogeochem/CNVegStructUpdateMod.F90
+D clm/src/clm4_0/biogeochem/CNDVEstablishmentMod.F90
+
+D clm/src/clm4_0/main
+D clm/src/clm4_0/main/clm_varcon.F90
+D clm/src/clm4_0/main/clm_varpar.F90
+D clm/src/clm4_0/main/CNiniTimeVar.F90
+D clm/src/clm4_0/main/dynlandMod.F90
+D clm/src/clm4_0/main/decompInitMod.F90
+D clm/src/clm4_0/main/clm_initializeMod.F90
+D clm/src/clm4_0/main/subgridRestMod.F90
+D clm/src/clm4_0/main/clm_glclnd.F90
+D clm/src/clm4_0/main/accFldsMod.F90
+D clm/src/clm4_0/main/subgridMod.F90
+D clm/src/clm4_0/main/clmtypeInitMod.F90
+D clm/src/clm4_0/main/ndepStreamMod.F90
+D clm/src/clm4_0/main/pftdynMod.F90
+D clm/src/clm4_0/main/iniTimeConst.F90
+D clm/src/clm4_0/main/histFileMod.F90
+D clm/src/clm4_0/main/pft2colMod.F90
+D clm/src/clm4_0/main/clm_atmlnd.F90
+D clm/src/clm4_0/main/findHistFields.pl
+D clm/src/clm4_0/main/restFileMod.F90
+D clm/src/clm4_0/main/clm_varsur.F90
+D clm/src/clm4_0/main/controlMod.F90
+D clm/src/clm4_0/main/initSurfAlbMod.F90
+D clm/src/clm4_0/main/filterMod.F90
+D clm/src/clm4_0/main/clm_driver.F90
+D clm/src/clm4_0/main/clm_varctl.F90
+D clm/src/clm4_0/main/subgridAveMod.F90
+D clm/src/clm4_0/main/initGridCellsMod.F90
+D clm/src/clm4_0/main/CNiniSpecial.F90
+D clm/src/clm4_0/main/pftvarcon.F90
+D clm/src/clm4_0/main/surfrdMod.F90
+D clm/src/clm4_0/main/decompMod.F90
+D clm/src/clm4_0/main/clmtype.F90
+D clm/src/clm4_0/main/histFldsMod.F90
+D clm/src/clm4_0/main/mkarbinitMod.F90
+D clm/src/clm4_0/biogeophys
+D clm/src/clm4_0/biogeophys/BalanceCheckMod.F90
+D clm/src/clm4_0/biogeophys/SurfaceRadiationMod.F90
+D clm/src/clm4_0/biogeophys/SoilTemperatureMod.F90
+D clm/src/clm4_0/biogeophys/SnowHydrologyMod.F90
+D clm/src/clm4_0/biogeophys/UrbanInputMod.F90
+D clm/src/clm4_0/biogeophys/Biogeophysics1Mod.F90
+D clm/src/clm4_0/biogeophys/Biogeophysics2Mod.F90
+D clm/src/clm4_0/biogeophys/FracWetMod.F90
+D clm/src/clm4_0/biogeophys/UrbanInitMod.F90
+D clm/src/clm4_0/biogeophys/FrictionVelocityMod.F90
+D clm/src/clm4_0/biogeophys/TridiagonalMod.F90
+D clm/src/clm4_0/biogeophys/SurfaceAlbedoMod.F90
+D clm/src/clm4_0/biogeophys/Hydrology1Mod.F90
+D clm/src/clm4_0/biogeophys/Hydrology2Mod.F90
+D clm/src/clm4_0/biogeophys/BiogeophysicsLakeMod.F90
+D clm/src/clm4_0/biogeophys/BiogeophysRestMod.F90
+D clm/src/clm4_0/biogeophys/UrbanMod.F90
+D clm/src/clm4_0/biogeophys/SoilHydrologyMod.F90
+D clm/src/clm4_0/biogeophys/QSatMod.F90
+D clm/src/clm4_0/biogeophys/clm_driverInitMod.F90
+D clm/src/clm4_0/biogeophys/HydrologyLakeMod.F90
+D clm/src/clm4_0/biogeophys/BareGroundFluxesMod.F90
+D clm/src/clm4_0/biogeophys/SNICARMod.F90
+D clm/src/clm4_0/biogeophys/CanopyFluxesMod.F90
+
+List all files added and what they do:
+
+A clm/src_clm40
+A clm/src_clm40/biogeochem
+A clm/src_clm40/biogeochem/CNCStateUpdate2Mod.F90
+A clm/src_clm40/biogeochem/CNC13StateUpdate2Mod.F90
+A clm/src_clm40/biogeochem/CNGRespMod.F90
+A clm/src_clm40/biogeochem/CNBalanceCheckMod.F90
+A clm/src_clm40/biogeochem/CNNStateUpdate3Mod.F90
+A clm/src_clm40/biogeochem/CNSummaryMod.F90
+A clm/src_clm40/biogeochem/CNPhenologyMod.F90
+A clm/src_clm40/biogeochem/STATICEcosysDynMod.F90
+A clm/src_clm40/biogeochem/CNCStateUpdate1Mod.F90
+A clm/src_clm40/biogeochem/CNC13StateUpdate1Mod.F90
+A clm/src_clm40/biogeochem/CNrestMod.F90
+A clm/src_clm40/biogeochem/VOCEmissionMod.F90
+A clm/src_clm40/biogeochem/CNAnnualUpdateMod.F90
+A clm/src_clm40/biogeochem/CNNStateUpdate2Mod.F90
+A clm/src_clm40/biogeochem/C13SummaryMod.F90
+A clm/src_clm40/biogeochem/CNAllocationMod.F90
+A clm/src_clm40/biogeochem/DryDepVelocity.F90
+A clm/src_clm40/biogeochem/CNNDynamicsMod.F90
+A clm/src_clm40/biogeochem/CNSetValueMod.F90
+A clm/src_clm40/biogeochem/CNGapMortalityMod.F90
+A clm/src_clm40/biogeochem/CropRestMod.F90
+A clm/src_clm40/biogeochem/CNNStateUpdate1Mod.F90
+A clm/src_clm40/biogeochem/CNFireMod.F90
+A clm/src_clm40/biogeochem/CNMRespMod.F90
+A clm/src_clm40/biogeochem/MEGANFactorsMod.F90
+A clm/src_clm40/biogeochem/CNWoodProductsMod.F90
+A clm/src_clm40/biogeochem/CNPrecisionControlMod.F90
+A clm/src_clm40/biogeochem/DUSTMod.F90
+A clm/src_clm40/biogeochem/CNDVLightMod.F90
+A clm/src_clm40/biogeochem/CNDecompMod.F90
+A clm/src_clm40/biogeochem/CNDVMod.F90
+A clm/src_clm40/biogeochem/CNCStateUpdate3Mod.F90
+A clm/src_clm40/biogeochem/CNC13StateUpdate3Mod.F90
+A clm/src_clm40/biogeochem/CNDVEcosystemDynIniMod.F90
+A clm/src_clm40/biogeochem/CNC13FluxMod.F90
+A clm/src_clm40/biogeochem/CNEcosystemDynMod.F90
+A clm/src_clm40/biogeochem/CNVegStructUpdateMod.F90
+A clm/src_clm40/biogeochem/CNDVEstablishmentMod.F90
+
+A clm/src_clm40/main
+A clm/src_clm40/main/spmdGathScatMod.F90
+A clm/src_clm40/main/organicFileMod.F90
+A clm/src_clm40/main/clm_varcon.F90
+A clm/src_clm40/main/clm_varpar.F90
+A clm/src_clm40/main/CNiniTimeVar.F90
+A clm/src_clm40/main/abortutils.F90
+A clm/src_clm40/main/accumulMod.F90
+A clm/src_clm40/main/decompInitMod.F90
+A clm/src_clm40/main/clm_glclnd.F90
+A clm/src_clm40/main/accFldsMod.F90
+A clm/src_clm40/main/subgridMod.F90
+A clm/src_clm40/main/pftdynMod.F90
+A clm/src_clm40/main/pft2colMod.F90
+A clm/src_clm40/main/clm_atmlnd.F90
+A clm/src_clm40/main/quadraticMod.F90
+A clm/src_clm40/main/GetGlobalValuesMod.F90
+A clm/src_clm40/main/clm_time_manager.F90
+A clm/src_clm40/main/filterMod.F90
+A clm/src_clm40/main/clm_varctl.F90
+A clm/src_clm40/main/subgridAveMod.F90
+A clm/src_clm40/main/dtypes.h
+A clm/src_clm40/main/CNiniSpecial.F90
+A clm/src_clm40/main/surfrdMod.F90
+A clm/src_clm40/main/domainMod.F90
+A clm/src_clm40/main/lnd_import_export.F90
+A clm/src_clm40/main/restUtilMod.F90
+A clm/src_clm40/main/clmtype.F90
+A clm/src_clm40/main/mkarbinitMod.F90
+A clm/src_clm40/main/restUtilMod.F90.in
+A clm/src_clm40/main/dynlandMod.F90
+A clm/src_clm40/main/getdatetime.F90
+A clm/src_clm40/main/clm_initializeMod.F90
+A clm/src_clm40/main/subgridRestMod.F90
+A clm/src_clm40/main/fileutils.F90
+A clm/src_clm40/main/clmtypeInitMod.F90
+A clm/src_clm40/main/ndepStreamMod.F90
+A clm/src_clm40/main/SimpleMathMod.F90
+A clm/src_clm40/main/iniTimeConst.F90
+A clm/src_clm40/main/lnd_comp_esmf.F90
+A clm/src_clm40/main/histFileMod.F90
+A clm/src_clm40/main/clm_cpl_indices.F90
+A clm/src_clm40/main/findHistFields.pl
+A clm/src_clm40/main/restFileMod.F90
+A clm/src_clm40/main/clm_varsur.F90
+A clm/src_clm40/main/controlMod.F90
+A clm/src_clm40/main/CMakeLists.txt
+A clm/src_clm40/main/initSurfAlbMod.F90
+A clm/src_clm40/main/clm_nlUtilsMod.F90
+A clm/src_clm40/main/clm_driver.F90
+A clm/src_clm40/main/clm_varorb.F90
+A clm/src_clm40/main/initGridCellsMod.F90
+A clm/src_clm40/main/lnd_comp_mct.F90
+A clm/src_clm40/main/pftvarcon.F90
+A clm/src_clm40/main/ncdio_pio.F90
+A clm/src_clm40/main/spmdMod.F90
+A clm/src_clm40/main/decompMod.F90
+A clm/src_clm40/main/ncdio_pio.F90.in
+A clm/src_clm40/main/histFldsMod.F90
+
+A clm/src_clm40/biogeophys
+A clm/src_clm40/biogeophys/BalanceCheckMod.F90
+A clm/src_clm40/biogeophys/SoilTemperatureMod.F90
+A clm/src_clm40/biogeophys/UrbanInputMod.F90
+A clm/src_clm40/biogeophys/SnowHydrologyMod.F90
+A clm/src_clm40/biogeophys/Biogeophysics1Mod.F90
+A clm/src_clm40/biogeophys/FrictionVelocityMod.F90
+A clm/src_clm40/biogeophys/TridiagonalMod.F90
+A clm/src_clm40/biogeophys/Hydrology1Mod.F90
+A clm/src_clm40/biogeophys/BiogeophysRestMod.F90
+A clm/src_clm40/biogeophys/UrbanMod.F90
+A clm/src_clm40/biogeophys/QSatMod.F90
+A clm/src_clm40/biogeophys/clm_driverInitMod.F90
+A clm/src_clm40/biogeophys/HydrologyLakeMod.F90
+A clm/src_clm40/biogeophys/BareGroundFluxesMod.F90
+A clm/src_clm40/biogeophys/SNICARMod.F90
+A clm/src_clm40/biogeophys/CanopyFluxesMod.F90
+A clm/src_clm40/biogeophys/SurfaceRadiationMod.F90
+A clm/src_clm40/biogeophys/Biogeophysics2Mod.F90
+A clm/src_clm40/biogeophys/UrbanInitMod.F90
+A clm/src_clm40/biogeophys/FracWetMod.F90
+A clm/src_clm40/biogeophys/SurfaceAlbedoMod.F90
+A clm/src_clm40/biogeophys/Hydrology2Mod.F90
+A clm/src_clm40/biogeophys/BiogeophysicsLakeMod.F90
+A clm/src_clm40/biogeophys/SoilHydrologyMod.F90
+
+A clm/src/main
+A clm/src/main/organicFileMod.F90
+A clm/src/main/clm_varcon.F90
+A clm/src/main/initInterp.F90
+A clm/src/main/landunit_varcon.F90
+A clm/src/main/clm_varpar.F90
+A clm/src/main/abortutils.F90
+A clm/src/main/accumulMod.F90
+A clm/src/main/subgridWeightsMod.F90
+A clm/src/main/decompInitMod.F90
+A clm/src/main/subgridMod.F90
+A clm/src/main/atm2lndType.F90
+A clm/src/main/lnd2atmType.F90
+A clm/src/main/column_varcon.F90
+A clm/src/main/EcophysConType.F90
+A clm/src/main/GetGlobalValuesMod.F90
+A clm/src/main/initSubgridMod.F90
+A clm/src/main/lnd2glcMod.F90
+A clm/src/main/glc2lndMod.F90
+A clm/src/main/filterMod.F90
+A clm/src/main/surfrdUtilsMod.F90
+A clm/src/main/clm_varctl.F90
+A clm/src/main/subgridAveMod.F90
+A clm/src/main/initVerticalMod.F90
+A clm/src/main/glcDiagnosticsMod.F90
+A clm/src/main/lnd2atmMod.F90
+A clm/src/main/atm2lndMod.F90
+A clm/src/main/surfrdMod.F90
+A clm/src/main/FuncPedotransferMod.F90
+A clm/src/main/readParamsMod.F90
+A clm/src/main/clm_initializeMod.F90
+A clm/src/main/subgridRestMod.F90
+A clm/src/main/paramUtilMod.F90
+A clm/src/main/ColumnType.F90
+A clm/src/main/PatchType.F90
+A clm/src/main/ndepStreamMod.F90
+A clm/src/main/init_hydrology.F90
+A clm/src/main/histFileMod.F90
+A clm/src/main/findHistFields.pl
+A clm/src/main/restFileMod.F90
+A clm/src/main/clm_varsur.F90
+A clm/src/main/controlMod.F90
+A clm/src/main/LandunitType.F90
+A clm/src/main/CMakeLists.txt
+A clm/src/main/test
+A clm/src/main/test/subgridWeights_test
+A clm/src/main/test/subgridWeights_test/test_subgridWeights.pf
+A clm/src/main/test/subgridWeights_test/CMakeLists.txt
+A clm/src/main/test/clm_glclnd_test
+A clm/src/main/test/clm_glclnd_test/test_clm_glclnd.pf
+A clm/src/main/test/clm_glclnd_test/CMakeLists.txt
+A clm/src/main/test/CMakeLists.txt
+A clm/src/main/clm_driver.F90
+A clm/src/main/GridcellType.F90
+A clm/src/main/initGridCellsMod.F90
+A clm/src/main/pftvarcon.F90
+A clm/src/main/ncdio_pio.F90
+A clm/src/main/decompMod.F90
+A clm/src/main/ncdio_pio.F90.in
+A clm/src/main/reweightMod.F90
+
+A clm/src/ED
+A clm/src/ED/biogeochem
+A clm/src/ED/biogeochem/EDCanopyStructureMod.F90
+A clm/src/ED/biogeochem/EDGrowthFunctionsMod.F90
+A clm/src/ED/biogeochem/EDPhysiologyMod.F90
+A clm/src/ED/biogeochem/EDCohortDynamicsMod.F90
+A clm/src/ED/biogeochem/EDPatchDynamicsMod.F90
+A clm/src/ED/main
+A clm/src/ED/main/EDBioType.F90
+A clm/src/ED/main/EDEcophysConType.F90
+A clm/src/ED/main/EDParamsMod.F90
+A clm/src/ED/main/EDMainMod.F90
+A clm/src/ED/main/EDCLMLinkMod.F90
+A clm/src/ED/main/EDVecCohortType.F90
+A clm/src/ED/main/EDVecPatchType.F90
+A clm/src/ED/main/EDRestVectorMod.F90
+A clm/src/ED/main/EDPftvarcon.F90
+A clm/src/ED/main/EDInitMod.F90
+A clm/src/ED/main/EDTypesMod.F90
+A clm/src/ED/fire
+A clm/src/ED/fire/SFParamsMod.F90
+A clm/src/ED/fire/SFMainMod.F90
+A clm/src/ED/biogeophys
+A clm/src/ED/biogeophys/EDAccumulateFluxesMod.F90
+A clm/src/ED/biogeophys/EDSurfaceAlbedoMod.F90
+A clm/src/ED/biogeophys/EDPhotosynthesisMod.F90
+A clm/src/ED/biogeophys/EDBtranMod.F90
+A clm/src/Notes
+M clm/src/unit_test_shr/unittestSubgridMod.F90
+
+A clm/src/utils
+A clm/src/utils/spmdGathScatMod.F90
+A clm/src/utils/clm_time_manager.F90
+A clm/src/utils/clm_nlUtilsMod.F90
+A clm/src/utils/clm_varorb.F90
+A clm/src/utils/accumulMod.F90
+A clm/src/utils/getdatetime.F90
+A clm/src/utils/fileutils.F90
+A clm/src/utils/dtypes.h
+A clm/src/utils/spmdMod.F90
+A clm/src/utils/SimpleMathMod.F90
+A clm/src/utils/domainMod.F90
+A clm/src/utils/restUtilMod.F90
+A clm/src/utils/quadraticMod.F90
+A clm/src/utils/CMakeLists.txt
+A clm/src/utils/restUtilMod.F90.in
+
+A clm/src/biogeochem
+A clm/src/biogeochem/CNCStateUpdate2Mod.F90
+A clm/src/biogeochem/CNDecompCascadeConType.F90
+A clm/src/biogeochem/CNNitrifDenitrifMod.F90
+A clm/src/biogeochem/CNGRespMod.F90
+A clm/src/biogeochem/CNBalanceCheckMod.F90
+A clm/src/biogeochem/CNNStateUpdate3Mod.F90
+A clm/src/biogeochem/CNDVDriverMod.F90
+A clm/src/biogeochem/SatellitePhenologyMod.F90
+A clm/src/biogeochem/CNPhenologyMod.F90
+A clm/src/biogeochem/CNCarbonFluxType.F90
+A clm/src/biogeochem/CNCarbonStateType.F90
+A clm/src/biogeochem/CNCStateUpdate1Mod.F90
+A clm/src/biogeochem/VOCEmissionMod.F90
+A clm/src/biogeochem/CNAnnualUpdateMod.F90
+A clm/src/biogeochem/CNNStateUpdate2Mod.F90
+A clm/src/biogeochem/CropType.F90
+A clm/src/biogeochem/CNAllocationMod.F90
+A clm/src/biogeochem/CNNDynamicsMod.F90
+A clm/src/biogeochem/DryDepVelocity.F90
+A clm/src/biogeochem/CNDecompCascadeBGCMod.F90
+A clm/src/biogeochem/CNSoilLittVertTranspMod.F90
+A clm/src/biogeochem/CNDecompCascadeCNMod.F90
+A clm/src/biogeochem/CNC14DecayMod.F90
+A clm/src/biogeochem/CNGapMortalityMod.F90
+A clm/src/biogeochem/CNNStateUpdate1Mod.F90
+A clm/src/biogeochem/CNFireMod.F90
+A clm/src/biogeochem/CNNitrogenFluxType.F90
+A clm/src/biogeochem/CNMRespMod.F90
+A clm/src/biogeochem/MEGANFactorsMod.F90
+A clm/src/biogeochem/CNVerticalProfileMod.F90
+A clm/src/biogeochem/CNCIsoFluxMod.F90
+A clm/src/biogeochem/CNWoodProductsMod.F90
+A clm/src/biogeochem/CNPrecisionControlMod.F90
+A clm/src/biogeochem/ch4Mod.F90
+A clm/src/biogeochem/DUSTMod.F90
+A clm/src/biogeochem/CNDVLightMod.F90
+A clm/src/biogeochem/ch4varcon.F90
+A clm/src/biogeochem/CNDecompMod.F90
+A clm/src/biogeochem/CNCStateUpdate3Mod.F90
+A clm/src/biogeochem/CNSharedParamsMod.F90
+A clm/src/biogeochem/CNDVType.F90
+A clm/src/biogeochem/CNStateType.F90
+A clm/src/biogeochem/CNEcosystemDynMod.F90
+A clm/src/biogeochem/CNNitrogenStateType.F90
+A clm/src/biogeochem/CNVegStructUpdateMod.F90
+A clm/src/biogeochem/CNDVEstablishmentMod.F90
+
+A clm/src/biogeophys
+A clm/src/biogeophys/SnowSnicarMod.F90
+A clm/src/biogeophys/SnowHydrologyMod.F90
+A clm/src/biogeophys/TridiagonalMod.F90
+A clm/src/biogeophys/FrictionVelocityType.F90
+A clm/src/biogeophys/LakeFluxesMod.F90
+A clm/src/biogeophys/PhotosynthesisMod.F90
+A clm/src/biogeophys/AerosolType.F90
+A clm/src/biogeophys/ActiveLayerMod.F90
+A clm/src/biogeophys/QSatMod.F90
+A clm/src/biogeophys/SoilHydrologyType.F90
+A clm/src/biogeophys/HydrologyDrainageMod.F90
+A clm/src/biogeophys/LakeStateType.F90
+A clm/src/biogeophys/BareGroundFluxesMod.F90
+A clm/src/biogeophys/SolarAbsorbedType.F90
+A clm/src/biogeophys/CanopyHydrologyMod.F90
+A clm/src/biogeophys/UrbanFluxesMod.F90
+A clm/src/biogeophys/SurfaceAlbedoMod.F90
+A clm/src/biogeophys/UrbanRadiationMod.F90
+A clm/src/biogeophys/PhotosynthesisType.F90
+A clm/src/biogeophys/CanopyTemperatureMod.F90
+A clm/src/biogeophys/HydrologyNoDrainageMod.F90
+A clm/src/biogeophys/DaylengthMod.F90
+A clm/src/biogeophys/WaterfluxType.F90
+A clm/src/biogeophys/BalanceCheckMod.F90
+A clm/src/biogeophys/SoilTemperatureMod.F90
+A clm/src/biogeophys/WaterStateType.F90
+A clm/src/biogeophys/LakeTemperatureMod.F90
+A clm/src/biogeophys/FrictionVelocityMod.F90
+A clm/src/biogeophys/SoilFluxesMod.F90
+A clm/src/biogeophys/TemperatureType.F90
+A clm/src/biogeophys/SurfaceAlbedoType.F90
+A clm/src/biogeophys/AerosolMod.F90
+A clm/src/biogeophys/SoilStateType.F90
+A clm/src/biogeophys/SurfaceResistanceMod.F90
+A clm/src/biogeophys/UrbanAlbedoMod.F90
+A clm/src/biogeophys/CanopyFluxesMod.F90
+A clm/src/biogeophys/CMakeLists.txt
+A clm/src/biogeophys/RootBiophysMod.F90
+A clm/src/biogeophys/test
+A clm/src/biogeophys/test/CMakeLists.txt
+A clm/src/biogeophys/test/Daylength_test
+A clm/src/biogeophys/test/Daylength_test/test_daylength.pf
+A clm/src/biogeophys/test/Daylength_test/CMakeLists.txt
+A clm/src/biogeophys/SurfaceRadiationMod.F90
+A clm/src/biogeophys/SoilWaterMovementMod.F90
+A clm/src/biogeophys/SoilMoistStressMod.F90
+A clm/src/biogeophys/SoiWatRetCurveParMod.F90
+A clm/src/biogeophys/EnergyFluxType.F90
+A clm/src/biogeophys/CanopyStateType.F90
+A clm/src/biogeophys/BandDiagonalMod.F90
+A clm/src/biogeophys/SoilHydrologyMod.F90
+A clm/src/biogeophys/LakeCon.F90
+A clm/src/biogeophys/LakeHydrologyMod.F90
+A clm/src/biogeophys/UrbanParamsType.F90
+A clm/src/dyn_subgrid
+A clm/src/dyn_subgrid/dynLandunitAreaMod.F90
+A clm/src/dyn_subgrid/dynTimeInfoMod.F90
+A clm/src/dyn_subgrid/dynSubgridDriverMod.F90
+A clm/src/dyn_subgrid/dynFileMod.F90
+A clm/src/dyn_subgrid/dynConsBiogeochemMod.F90
+A clm/src/dyn_subgrid/dynEDMod.F90
+A clm/src/dyn_subgrid/dynVarMod.F90
+A clm/src/dyn_subgrid/dynVarTimeUninterpMod.F90
+A clm/src/dyn_subgrid/dynVarMod.F90.in
+A clm/src/dyn_subgrid/dynVarTimeInterpMod.F90.in
+A clm/src/dyn_subgrid/dynVarTimeUninterpMod.F90.in
+A clm/src/dyn_subgrid/do_genf90
+A clm/src/dyn_subgrid/CMakeLists.txt
+A clm/src/dyn_subgrid/test
+A clm/src/dyn_subgrid/test/dynLandunitArea_test
+A clm/src/dyn_subgrid/test/dynLandunitArea_test/test_update_landunit_weights_one_gcell.pf
+A clm/src/dyn_subgrid/test/dynLandunitArea_test/CMakeLists.txt
+A clm/src/dyn_subgrid/test/dynLandunitArea_test/test_update_landunit_weights.pf
+A clm/src/dyn_subgrid/test/dynVar_test
+A clm/src/dyn_subgrid/test/dynVar_test/test_dynVarShared.F90
+A clm/src/dyn_subgrid/test/dynVar_test/test_dynVarTimeInterp.pf
+A clm/src/dyn_subgrid/test/dynVar_test/test_dynVarTimeUninterp.pf
+A clm/src/dyn_subgrid/test/dynVar_test/CMakeLists.txt
+A clm/src/dyn_subgrid/test/dynTimeInfo_test
+A clm/src/dyn_subgrid/test/dynTimeInfo_test/test_dynTimeInfo.pf
+A clm/src/dyn_subgrid/test/dynTimeInfo_test/CMakeLists.txt
+A clm/src/dyn_subgrid/test/CMakeLists.txt
+A clm/src/dyn_subgrid/test/dynInitColumns_test
+A clm/src/dyn_subgrid/test/dynInitColumns_test/test_init_columns.pf
+A clm/src/dyn_subgrid/test/dynInitColumns_test/CMakeLists.txt
+A clm/src/dyn_subgrid/dynHarvestMod.F90
+A clm/src/dyn_subgrid/dynPriorWeightsMod.F90
+A clm/src/dyn_subgrid/dynpftFileMod.F90
+A clm/src/dyn_subgrid/dynVarTimeInterpMod.F90
+A clm/src/dyn_subgrid/dynCNDVMod.F90
+A clm/src/dyn_subgrid/dynConsBiogeophysMod.F90
+A clm/src/dyn_subgrid/dynInitColumnsMod.F90
+A clm/src/unit_test_mocks/util_share/restUtilMod_mock.F90
+A clm/src/unit_test_mocks/util_share/restUtilMod_mock.F90.in
+
+List all existing files that have been modified, and describe the changes:
+ ALL files have been modified - see the general description for an
+ overview of what was done - the following files have not had their directories
+ changed - so the summary is below
+
+M clm/bld/configure
+ - needed to account for change in filepath
+
+M clm/bld/CLMBuildNamelist.pm
+M clm/bld/namelist_files/namelist_definition_clm4_5.xml
+ - see namelist changes mentioned above
+
+M clm/src/unit_test_mocks/util_share/clm_time_manager_mock.F90
+M clm/src/unit_test_mocks/util_share/ncdio_var.F90
+M clm/src/unit_test_mocks/util_share/ncdio_pio_mock.F90
+M clm/src/unit_test_mocks/util_share/do_genf90
+M clm/src/unit_test_mocks/util_share/CMakeLists.txt
+M clm/src/unit_test_mocks/util_share/ncdio_pio_mock.F90.in
+M clm/src/unit_test_mocks/main/histFileMod_mock.F90
+M clm/src/CMakeLists.txt
+ - unit test changes needed to account for introduction of new data types and
+ directory structure
+
+M clm/src/cpl/lnd_comp_esmf.F90
+M clm/src/cpl/lnd_import_export.F90
+M clm/src/cpl/lnd_comp_mct.F90
+ - coupling interface changes needed to account for introduction of new data types
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone yes
+ goldbach yes
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ yellowstone_gnu yes
+ goldbach_nag yes
+ goldbach_intel yes
+ goldbach_pgi yes
+
+ NOTE for goldbach_nag - four ED compare_hist tests fail with small bit for bit differences.
+ In fact - looking more closely, these tests are also run for pgi and intel on goldbach - and
+ values for LITTER_IN and LITTER_OUT are 0. for those compilers but non-zero for nag.
+ With the refactoring code - those fields are again 0. for intel and pgi - but totally different
+ and non-zero for nag. Apparently, this is known problem that will get resolved in when new ED changes
+ are brount in
+
+ ERS_D_Mmpi-serial.1x1_brazil.ICLM45CNED.goldbach_nag.clm-edTest
+ ERS_D_Mmpi-serial.1x1_brazil.ICLM45CNED.goldbach_nag.clm-edTest
+ SMS_D_Mmpi-serial.5x5_amazon.ICLM45CNED.goldbach_nag.clm-edTest
+ SMS_D_Mmpi-serial.5x5_amazon.ICLM45CNED.goldbach_nag.clm-edTest
+
+
+ short tests (aux_clm_short) (generally these are NOT used when making a tag): N/A
+
+ tools testing: N/A
+
+CLM tag used for the baseline comparisons: clm4_5_1_r080
+
+Changes answers relative to baseline: No - BFB
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r080
+Originator(s): erik (Erik)
+Date: Sat Aug 16 15:01:35 MDT 2014
+One-line Summary: Update externals to CESM trunk versions, allow eighth degree as a valid resolution
+
+Purpose of changes:
+
+Update all the externals to the very latest CESM trunk versions (based off of current
+cesm1_3_alpha13a).
+
+Requirements for tag: Get working with trunk externals
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+ 2012 Problem with mksurfdata_map for eighth degree grid...
+ Scripts issues as follows:
+ 2024 nlcompareonly option to create_test not working correctly for reporting
+ 2019 ERH tests don't save the base env_run.xml, so have trouble when resubmitted...
+ 2018 Failed tests in cesm1_3_beta11 needed for CLM
+ 2005 Remove untested named compsets and grids
+ 1999 T85_g16 has inconsistent land domain and surface datasets
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+ Archiving updated in support of time series generation
+ Running test suite now builds some shared libraries built only once
+
+Describe any changes made to the namelist:
+ Resolve env and xml vars used in user_nl_*
+
+ Default for drv_in profile_timer changed from 4 to 1
+ Some PE layours change: f10_f10 change from 15x1 to 30x1
+
+List any changes to the defaults for the boundary datasets: Add 0.125x0.125 mapping files
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): Update to cesm1_3_alpha13a externals
+
+ scripts to scripts4_140814a
+ scripts/doc to doc_140519
+ Machines to Machines_140811
+ CMake to CMake_Fortran_utils_140715
+ drv to drvseq5_0_15
+ cism to cism1_140602
+ timing to timing_140416
+ pio to pio1_8_12
+ cprnc to cprnc_140625
+ mapping to mapping_140702b (note: gen_domain changes answers)
+ unit_testing to unit_testing_0_07
+
+ PTCLM to PTCLM2_140816
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkgridmapMod.F90 -- decrease tolerance
+ M models/lnd/clm/tools/shared/ncl_scripts/getregional_datasets.ncl - Loop over variables
+ and then cat the files together at the end. This makes the process possible for
+ high resolution and speeds up lower resolution sub-setting as well.
+ M models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkgridmapMod.F90 -- decrease tolerance
+ M models/lnd/clm/tools/shared/mkmapdata/README --------------------- Fix/update documentation
+ M models/lnd/clm/bld/unit_testers/build-namelist_test.pl ----------- Correct test count
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml --- Add 0.125x0.125
+ mapping files
+ M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml - Add 0.125x0.125
+ as a valid resolution
+ M models/lnd/clm/bld/namelist_files/createMapEntry.pl --- Correct path, get working again
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone yes
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ yellowstone_gnu yes
+ goldbach_nag yes
+ goldbach_intel yes
+ goldbach_pgi yes
+
+ short tests (aux_clm_short) (generally these are NOT used when making a tag):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ goldbach_nag yes
+
+ tools testing:
+
+ yellowstone interactive yes
+
+CLM tag used for the baseline comparisons: clm4_5_1_r079
+
+Changes answers relative to baseline: Yes! (PE layouts that change)
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: non single-point configurations, where PE layout
+ changes (f10_f10)
+ - what platforms/compilers: all
+ - nature of change (roundoff)
+ x2l_Flrr_volr changes to roundoff
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r079
+Originator(s): andre (Benjamin Andre,LAWRENCE BERKELEY NATIONAL LABORATORY,510-486-4617)
+Date: Thu Jul 31 17:09:57 MDT 2014
+One-line Summary: G. Bisht (LBL) soil temperature refactor; machines update for goldbach-intel
+
+Purpose of changes: Refactor soil temperature module to break the construction of the linear system LHS matrix and RHS vector into small physics based routines. Update machines external to fix compiling with goldbach-intel.
+
+Requirements for tag: regular
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: gbisht, self, muszala
+
+List any svn externals directories updated (csm_share, mct, etc.): Machines
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90 - break creation of linear system into small physics based routines.
+
+
+CLM testing: regular
+
+ build-namelist tests: N/A
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel - 4.0 ok, 4.5 ok
+ yellowstone_pgi - 4.0 ok, 4.5 ok
+ goldbach_nag - 4.0 ok, 4.5 ok (see note below)
+ goldbach_intel - 4.0 ok, 4.5 ok
+ goldbach_pgi - 4.0 ok, 4.5 ok
+
+ NOTE for goldbach_nag - four ED compare_hist tests fail with small non bit for bit differences. This is the same issue described in clm4_5_1_r078 tag notes.
+
+ ERS_D_Mmpi-serial.1x1_brazil.ICLM45CNED.goldbach_nag.clm-edTest
+ ERS_D_Mmpi-serial.1x1_brazil.ICLM45CNED.goldbach_nag.clm-edTest
+ SMS_D_Mmpi-serial.5x5_amazon.ICLM45CNED.goldbach_nag.clm-edTest
+ SMS_D_Mmpi-serial.5x5_amazon.ICLM45CNED.goldbach_nag.clm-edTest
+
+ tools testing: N/A
+
+CLM tag used for the baseline comparisons: clm4_5_1_r078
+
+Changes answers relative to baseline: No, bit for bit
+
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r078
+Originator(s): muszala (Stefan Muszala)
+Date: Wed Jul 23 20:42:00 MDT 2014
+One-line Summary: Add lai stream capability and the ability to run with V5 cruncep data. Code written by swenson,
+modified and tested by muszala.
+
+Purpose of changes: Add lai stream capability with use_lai_streams namelist variable. Also add a datm_mode option
+s.t. we can use cruncep V5 data if a user wants. Move anomaly focing code out of CLM and into DATM.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: add setup_logic_lai_streams which controls use_lai_streams namelist variable
+
+List any changes to the defaults for the boundary datasets: added option to use V5 cruncep data sets. V4 is default.
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: kluzek, swenson, self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+-scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/addclm50_tags/addclm50_n06_ED_scripts_015_140305_rev
++scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/addclm50_tags/addclm50_n09_ED_scripts_015_140305_rev
+
+-models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_140312
++models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_140723
+
+-models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_140418
++models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_140723
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+! add setup_logic_lai_streams function
+M models/lnd/clm/bld/CLMBuildNamelist.pm
+! add entries for: stream_year_first_lai, stream_year_last_lai, model_year_align_lai, stream_fldfilename_lai, lai_mapalgo
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+! add default values for items added in namelist_definition_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+! add use_lai_streams code. lai_init, lai_interp
+M models/lnd/clm/src/clm4_5/biogeochem/SatellitePhenologyMod.F90
+! some comment clean up. add use_lai_streams logical
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+! add use_lai_streams namelist handling and mpi_bcast call
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+! remove snomaly forcing streams since they are now in the datm
+M models/lnd/clm/src/cpl/clm_cpl_indices.F90
+! remove anomaly forcing code since it is now in datm
+M models/lnd/clm/src/cpl/lnd_import_export.F90
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone - failed 20 tests of 537. This is expected due to the addition of the use_lai_streams namelist variable.
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ -- nlcomp tests for 45 will fail --
+
+ yellowstone_intel - 40 OK - 45 OK
+ yellowstone_pgi - 40 OK - 45 OK
+
+ goldbach_nag - 40 OK - 45 OK
+ goldbach_intel - 40 OK - 45 OK
+ goldbach_pgi - 40 OK - 45 OK
+
+Both NAG tests on goldbach for ED compsets failed BFB. All other compilers and machines pass. Error probably related to a non-BFB error that I (spm) see with varying numbers of
+time-steps on an ERS test and am currently trying to fix.
+
+FAIL ERS_D_Mmpi-serial.1x1_brazil.ICLM45CNED.goldbach_nag.clm-edTest.GC.again_45_nag.compare_hist.clm4_5_1_r077_redo
+FAIL ERS_D_Mmpi-serial.1x1_brazil.ICLM45CNED.goldbach_nag.clm-edTest.GC.again_45_nag.clm2.h0.compare_hist.clm4_5_1_r077_redo
+FAIL SMS_D_Mmpi-serial.5x5_amazon.ICLM45CNED.goldbach_nag.clm-edTest.GC.again_45_nag.compare_hist.clm4_5_1_r077_redo
+FAIL SMS_D_Mmpi-serial.5x5_amazon.ICLM45CNED.goldbach_nag.clm-edTest.GC.again_45_nag.clm2.h0.compare_hist.clm4_5_1_r077_redo
+CLM tag used for the baseline comparisons: clm4_5_1_r077
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r077
+Originator(s): andre (Benjamin Andre,LAWRENCE BERKELEY NATIONAL LABORATORY,510-486-4617)
+Date: Thu Jul 10 21:55:11 MDT 2014
+One-line Summary: Refactor from Jinyun Tang (LBL) to make hydrology more modular and eventually allow runtime selection of different physics implementations.
+
+Purpose of changes: Refactor a number of routines in clm45 hydrology to move duplicate code into reusable routines, make the code more modular for eventual unit testing and run time selection of different physics.
+
+Requirements for tag: bit for bit, regular testing
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self, clm-cmt
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do:
+
+ models/lnd/clm/src/util_share/SimpleMathMod.F90 - reuseable array functions
+ models/lnd/clm/src/clm4_5/main/init_hydrology.F90 - initialize different hydrology submodules.
+ models/lnd/clm/src/clm4_5/main/FuncPedotransferMod.F90 - modularize
+ models/lnd/clm/src/clm4_5/biogeophys/SurfaceResistanceMod.F90 - modularize
+ models/lnd/clm/src/clm4_5/biogeophys/RootBiophysMod.F90 - modularize
+ models/lnd/clm/src/clm4_5/biogeophys/SoilWaterMovementMod.F90 - modularize
+ models/lnd/clm/src/clm4_5/biogeophys/SoilMoistStressMod.F90 - modularize
+ models/lnd/clm/src/clm4_5/biogeophys/SoiWatRetCurveParMod.F90 - modularize
+
+
+List all existing files that have been modified, and describe the changes:
+
+ models/lnd/clm/bld/query-xFail - check python version and provide an error message of it is too old.
+ models/lnd/clm/src/clm4_5/biogeochem/CNC14DecayMod.F90 - remove unused min/max variables that conflict with intrinsics with gfortran.
+
+
+ models/lnd/clm/src/clm4_5/main/initTimeConstMod.F90 - modularize
+ models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90 - allocate new variable, fix bounds on porosity
+ models/lnd/clm/src/clm4_5/main/controlMod.F90 - initialize new hydrology modules
+ models/lnd/clm/src/clm4_5/main/clmtype.F90 - add new variable
+ models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90 - modularize
+ models/lnd/clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90 - modularize
+ models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90 - modularize
+ models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90 - modularize
+ models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90 - modularize
+
+CLM testing: regular
+
+ build-namelist tests:
+
+ yellowstone - n/a
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel - OK clm40, OK clm45
+ yellowstone_pgi - OK clm40, OK clm45
+ goldbach_nag - OK clm40, OK clm45
+ goldbach_intel - OK clm40, OK clm45
+ goldbach_pgi - OK clm40, OK clm45
+
+ short tests (aux_clm_short) (generally these are NOT used when making a tag):
+
+ yellowstone_intel - n/a
+ yellowstone_pgi - n/a
+ goldbach_nag - n/a
+
+ tools testing:
+
+ yellowstone interactive - n/a
+ goldbach interactive - n/a
+
+CLM tag used for the baseline comparisons: clm4_5_1_r076
+
+Changes answers relative to baseline: No, bit for bit
+
+ Note: SMS_D_Mmpi-serial.5x5_amazon.ICLM45CNED.goldbach_nag.clm-edTest is not bit for bit. This is the same test Stef had problems with in clm4_5_75. He has looked at it and given the ok make the tag as is.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_1_r076
+Originator(s): erik (Erik)
+Date: Mon Jul 7 14:24:07 MDT 2014
+Orig Date: Wed Jun 25 13:49:49 MDT 2014 (Date of what was tagged as clm4_6_0, before we changed naming convention)
+One-line Summary: Answer changes for fire code from Fang Li
+
+Purpose of changes:
+
+Several changes to CN Fire model. Some fixes for non-transient, as well as limiting of fire for high tropical
+forest coverage. Change some units from per time-step to per second. Change Lightning input dataset from just
+cloud to ground to total lightning. Some fire parameters were also changed and re-tuned for Qian forcing.
+Some more documentation on fire fields was added.
+
+When -ed_mode is sent to CLM build-namelist, a particular ED params dataset is used over the default. Make
+a simple change that allows ED to run when CN is off. Add a 1850 and transient 20thC and rcp=8.5 datasets
+for 1x1_brazil.
+
+Requirements for tag: Fix bugs 1805 and 1719, lower fire amount in amazon
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 1996 -- change cloud to ground lightning dataset to total lightning
+ 1995 -- change units from per time-step to per second
+ 1805 -- fire fix for non-transient
+ 1719 -- remove double counting of baf in fire area
+ 1992 -- allow ED to run when use_cn=.false.
+ 1988 -- Add ED params dataset.
+ 1991 -- transient datasets for 1x1_brazil
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ specific rparams file for ED
+ add 1850 and 20thC, rcp8.5 datasets for 1x1_brazil
+ use lightning file that is total lightning not just cloud-to-ground
+
+Describe any substantial timing or memory changes:
+ The test SMS.f19_g16.IRCP45CN.yellowstone_pgi showed a memory increase
+
+Code reviewed by: self, lifang
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts
+ scripts to addclm50_n06_ED_scripts_015_140305_rev
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/bld/unit_testers/build-namelist_test.pl --------- Add some ED tests
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml - Different
+ params file for ED, and add 1850 and 20thC, rcp8.5 datasets for 1x1_brazil
+ and use lightning file that is total lightning not just cloud-to-ground
+ M models/lnd/clm/bld/CLMBuildNamelist.pm ------------------------- Pass use_ed
+ when getting paramfile
+ M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90 ------------- Revisions from
+ Fang Li (2014), change parameters, add documentation, tropical forests will
+ only burn if > 60% coverage, change some fields units to per second rather than
+ per time-step,
+ M models/lnd/clm/src/clm4_5/biogeochem/CNInitMod.F90 ------------- change units
+ for nfire, and farea_burned
+ M models/lnd/clm/src/clm4_5/main/clmtype.F90 --------------------- Change units
+ for nfire, lfc, lfc2, baf_crop, baf_peatf, fbac, fbac1, farea_burned
+ M models/lnd/clm/src/clm4_5/main/histFldsMod.F90 ----------------- Change units for:
+ LFC2, NFIRE, FAREA_BURNED, BAF_CROP, BAF_PEATF
+ M models/lnd/clm/src/clm4_5/biogeophys/ED/EDPhotosynthesisMod.F90 Allow to work
+ when use_cn is .false., use c3psn+1 in finding index for dr array.
+
+CLM testing:
+
+ build-namelist tests: yes
+
+ NOTE: 191 of the 537 compare tests fail, compared to clm4_5_75, because of changes in the namelist.
+
+ yellowstone yes
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ goldbach_nag yes
+ goldbach_intel yes
+ goldbach_pgi yes
+
+CLM tag used for the baseline comparisons: clm4_5_75
+
+Changes answers relative to baseline:
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CN and BGC with CLM4.5 physics
+ CLM4.0 for all modes, and CLM4.5 with SP or ED should be identical
+ - what platforms/compilers: All
+ - nature of change: new climate
+
+ Fang Li, ran simulations with Qian forcing on yellowstone and tuned fire parameters to that forcing.
+ However, her simulations had a minor bug in the conversion of total lightning to just cloud-to-ground
+ (latitude in degree's was used for a cosine, rather than latitude in radians -- see bug 1996).
+
+===============================================================
+===============================================================
+Tag name: clm4_5_75
+Originator(s): muszala (Stefan Muszala,UCAR/TSS,303-497-1320)
+Date: Fri May 30 11:18:35 MDT 2014
+One-line Summary: update externals to rtm1_0_38 and esmf_wrf_timemgr_140529
+
+Purpose of changes: update externals to rtm1_0_38 and esmf_wrf_timemgr_140529. These modifications
+are based on valgrind errors that orginated in src/riverroute. Tested in clm4_5_72 to make sure
+everything was still BFB (at least w.r.t. CLM testing). Retested (results below) against clm4_5_73.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+-models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_37
++models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_38
+
+-models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_130213
++models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_140529
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes: N/A
+
+CLM testing:
+
+in addition to other clm tests I updated the rtm and esmf externals in cesm1_3_alpha09c and
+ran two B cases.
+
+Note: There is one ED test (SMS_D_Mmpi-serial.5x5_amazon.ICLM45CNED.goldbach_nag.clm-edTestGb) that failed for me in my
+testing but passed for Bill with the same checkout. We will keep an eye on this, but it's not super-critical at the moment.
+Differences are very small in cpl. voc fields (largest RMS difference is 1e-13).
+
+>>more TestStatus
+PASS ERS_PT.T31_g37.B1850CN.yellowstone_gnu
+PASS ERS_PT.T31_g37.B1850CN.yellowstone_gnu.memleak
+
+>>more TestStatus
+PASS ERS.ne30_g16.B1850C5CN.yellowstone_intel
+PASS ERS.ne30_g16.B1850C5CN.yellowstone_intel.memleak
+
+ build-namelist tests: N/A
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel - 40 OK - 45 OK - component gen comp - OK
+ yellowstone_pgi - 40 OK - 45 OK - component gen comp - OK
+
+ goldbach_nag - 40 OK - 45 OK
+ goldbach_intel - 40 OK - 45 OK
+ goldbach_pgi - 40 OK - 45 OK
+
+CLM tag used for the baseline comparisons: clm4_5_73
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_74
+Originator(s): sacks (sacks)
+Date: Wed May 28 16:05:36 MDT 2014
+One-line Summary: misc. bfb changes - see detailed summary below
+
+Purpose of changes:
+
+ (1) Rename fpftdyn to flanduse_timeseries, and make related changes to names
+ throughout the code. This rename is in preparation for an upcoming tag
+ where this file will take on more general uses (e.g., transient crop
+ areas).
+
+ (2) Decrease thresholds for water, snow and energy balance checks (these were
+ too permissive)
+
+ (3) Move stuff out of clm_varcon into landunit_varcon (for constants specific
+ to CLM's landunits) and column_varcon (for constants specific to CLM's
+ columns) - analogous to the existing pftvarcon
+
+ (4) Move some routines out of initGridCellsMod into a new initSubgridMod
+
+ (5) Make time_info a public member of dyn_file_type, which allows removing a
+ bunch of delegation methods. And rename some things in time_info_type for
+ clarity.
+
+ (6) Rework metadata for the description of landunit, column and pft types
+ on the history and restart files, to centralize these descriptions to the
+ appropriate place in the code.
+
+ (7) Add general-purpose functionality for setting up subgrid structure for
+ unit tests
+
+ (8) Move unit tests into source tree, rather than being in
+ test/unit_testers. Now the top-level script is in models/lnd/clm/src.
+
+ (9) Fix baseline comparisons for PTCLM tests
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ - 1928 (create landunit_varcon.F90 and column_varcon.F90 from parts of clm_varcon.F90)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: fpftdyn renamed to flanduse_timeseries
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts: addclm50_n03_ED_scripts_015_140305_rev -> addclm50_n04_ED_scripts_015_140305_rev
+ - Rename CLM's fpftdyn to flanduse_timeseries in tests; update perl5lib
+
+ tools/unit_testing: unit_testing_0_04 -> unit_testing_0_05
+ - the major change here is allowing rebuilds with intel without needing to
+ specify --clean
+
+ models/lnd/clm/tools/PTCLM: PTCLM2_140423 -> PTCLM2_140521
+ - rename fpftdyn -> flanduse_timeseries, and other related renames
+
+List all files eliminated:
+
+========= Renamed
+D models/lnd/clm/tools/clm4_5/mksurfdata_map/pftdyn_hist_simyr1850-2005.txt
+D models/lnd/clm/tools/clm4_0/mksurfdata_map/pftdyn_hist_simyr1850-2005.txt
+
+========= Move unit tests into source tree
+D models/lnd/clm/test/unit_testers/clm4_5/biogeophys/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/biogeophys/Daylength_test/test_daylength.pf
+D models/lnd/clm/test/unit_testers/clm4_5/biogeophys/Daylength_test/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/biogeophys/Daylength_test
+D models/lnd/clm/test/unit_testers/clm4_5/biogeophys
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynLandunitArea_test/test_update_landunit_weights_one_gcell.pf
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynLandunitArea_test/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynLandunitArea_test/test_update_landunit_weights.pf
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynLandunitArea_test
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test/test_dynVarShared.F90
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test/test_dynVarTimeInterp.pf
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test/test_dynVarTimeUninterp.pf
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynTimeInfo_test/test_dynTimeInfo.pf
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynTimeInfo_test/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynTimeInfo_test
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynInitColumns_test/test_init_columns.pf
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynInitColumns_test/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynInitColumns_test
+D models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid
+D models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/ncdio_var.F90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/GetGlobalValuesMod_mock.F90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/ncdio_var.F90.in
+D models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/clm_time_manager_mock.F90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/ncdio_pio_mock.F90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/spmdMod_mock.F90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/do_genf90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/ncdio_pio_mock.F90.in
+D models/lnd/clm/test/unit_testers/clm4_5/mock/util_share
+D models/lnd/clm/test/unit_testers/clm4_5/mock/csm_share/shr_sys_mod_mock.F90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/csm_share/mct_mod_mock.F90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/csm_share/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/mock/csm_share
+D models/lnd/clm/test/unit_testers/clm4_5/mock/main/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/mock/main/histFileMod_mock.F90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/main
+D models/lnd/clm/test/unit_testers/clm4_5/mock/dyn_subgrid/dynFileMod_mock.F90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/dyn_subgrid/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/mock/dyn_subgrid
+D models/lnd/clm/test/unit_testers/clm4_5/mock/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/mock
+D models/lnd/clm/test/unit_testers/clm4_5/CMakeLists.txt
+D models/lnd/clm/test/unit_testers/clm4_5/README
+D models/lnd/clm/test/unit_testers/clm4_5
+D models/lnd/clm/test/unit_testers
+
+
+List all files added and what they do:
+
+========= Renamed
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/landuse_timeseries_hist_simyr1850-2005.txt
+A models/lnd/clm/tools/clm4_0/mksurfdata_map/landuse_timeseries_hist_simyr1850-2005.txt
+
+========= Move stuff out of clm_varcon into landunit_varcon (for constants
+ specific to CLM's landunits) and column_varcon (for constants specific
+ to CLM's columns) - analogous to the existing pftvarcon.
+A models/lnd/clm/src/clm4_5/main/landunit_varcon.F90
+A models/lnd/clm/src/clm4_5/main/column_varcon.F90
+
+
+========= Move some routines out of initGridCellsMod - these are lower-level
+ routines that can also be used by unit test code. So initGridCellsMod
+ contains higher-level stuff that is specific to how the subgrid
+ structure is set up in a production run; and initSubgridMod contains
+ lower-level stuff that doesn't know or care how things are actually
+ set up, conceptually.
+A models/lnd/clm/src/clm4_5/main/initSubgridMod.F90
+
+========= Add general-purpose functionality for setting up subgrid structure for unit tests
+A models/lnd/clm/src/unit_test_shr/unittestSubgridMod.F90
+A models/lnd/clm/src/unit_test_shr/CMakeLists.txt
+A models/lnd/clm/src/unit_test_shr
+
+
+========= Move unit tests into source tree; also modify some unit tests to take
+ advantage of the new unittestSubgridMod; also add tests of
+ subgridWeightsMod and clm_glclnd
+A models/lnd/clm/src/clm4_5/main/test/subgridWeights_test/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/main/test/subgridWeights_test/test_subgridWeights.pf
+A models/lnd/clm/src/clm4_5/main/test/subgridWeights_test
+A models/lnd/clm/src/clm4_5/main/test/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/main/test/clm_glclnd_test/test_clm_glclnd.pf
+A models/lnd/clm/src/clm4_5/main/test/clm_glclnd_test/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/main/test/clm_glclnd_test
+A models/lnd/clm/src/clm4_5/main/test
+A models/lnd/clm/src/clm4_5/biogeophys/test/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/biogeophys/test/Daylength_test/test_daylength.pf
+A models/lnd/clm/src/clm4_5/biogeophys/test/Daylength_test/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/biogeophys/test/Daylength_test
+A models/lnd/clm/src/clm4_5/biogeophys/test
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynLandunitArea_test/test_update_landunit_weights_one_gcell.pf
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynLandunitArea_test/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynLandunitArea_test/test_update_landunit_weights.pf
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynLandunitArea_test
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynVar_test/test_dynVarShared.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynVar_test/test_dynVarTimeInterp.pf
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynVar_test/test_dynVarTimeUninterp.pf
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynVar_test/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynVar_test
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynTimeInfo_test/test_dynTimeInfo.pf
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynTimeInfo_test/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynTimeInfo_test
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynInitColumns_test/test_init_columns.pf
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynInitColumns_test/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test/dynInitColumns_test
+A models/lnd/clm/src/clm4_5/dyn_subgrid/test
+A models/lnd/clm/src/README.unit_testing
+A models/lnd/clm/src/unit_test_mocks/util_share/ncdio_var.F90
+A models/lnd/clm/src/unit_test_mocks/util_share/GetGlobalValuesMod_mock.F90
+A models/lnd/clm/src/unit_test_mocks/util_share/ncdio_var.F90.in
+A models/lnd/clm/src/unit_test_mocks/util_share/clm_time_manager_mock.F90
+ - also add a routine to this mock, needed because of refactor of
+ dyn_file_type / dyn_time_inof
+A models/lnd/clm/src/unit_test_mocks/util_share/ncdio_pio_mock.F90
+A models/lnd/clm/src/unit_test_mocks/util_share/spmdMod_mock.F90
+A models/lnd/clm/src/unit_test_mocks/util_share/do_genf90
+A models/lnd/clm/src/unit_test_mocks/util_share/CMakeLists.txt
+A models/lnd/clm/src/unit_test_mocks/util_share/ncdio_pio_mock.F90.in
+A models/lnd/clm/src/unit_test_mocks/util_share
+A models/lnd/clm/src/unit_test_mocks/csm_share/shr_sys_mod_mock.F90
+A models/lnd/clm/src/unit_test_mocks/csm_share/mct_mod_mock.F90
+A models/lnd/clm/src/unit_test_mocks/csm_share/CMakeLists.txt
+A models/lnd/clm/src/unit_test_mocks/csm_share
+A models/lnd/clm/src/unit_test_mocks/main/CMakeLists.txt
+A models/lnd/clm/src/unit_test_mocks/main/histFileMod_mock.F90
+A models/lnd/clm/src/unit_test_mocks/main
+A models/lnd/clm/src/unit_test_mocks/dyn_subgrid/dynFileMod_mock.F90
+A models/lnd/clm/src/unit_test_mocks/dyn_subgrid/CMakeLists.txt
+A models/lnd/clm/src/unit_test_mocks/dyn_subgrid
+A models/lnd/clm/src/unit_test_mocks/CMakeLists.txt
+A models/lnd/clm/src/unit_test_mocks
+A models/lnd/clm/src/CMakeLists.txt
+
+List all existing files that have been modified, and describe the changes:
+
+========= Renamed fpftdyn -> flanduse_timeseries, and other related changes to
+ variable names
+ (NOTE: Some source files are listed both here and elsewhere in the
+ ChangeLog entry)
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata_map.namelist
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata.pl
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/README
+M models/lnd/clm/tools/shared/ncl_scripts/sample_inlist
+M models/lnd/clm/tools/shared/ncl_scripts/sample_outlist
+M models/lnd/clm/tools/clm4_0/mksurfdata_map/src/clm_varctl.F90
+M models/lnd/clm/tools/clm4_0/mksurfdata_map/mksurfdata_map.namelist
+M models/lnd/clm/tools/clm4_0/mksurfdata_map/mksurfdata.pl
+M models/lnd/clm/tools/clm4_0/mksurfdata_map/README
+M models/lnd/clm/bld/CLMBuildNamelist.pm
+M models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+M models/lnd/clm/bld/test_build_namelist/t/input/namelist_defaults_clm4_5_test.xml
+M models/lnd/clm/bld/test_build_namelist/t/input/namelist_definition_clm4_5_test.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_0.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/use_cases/20thC_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml
+M models/lnd/clm/doc/UsersGuide/trouble_shooting.xml
+M models/lnd/clm/doc/UsersGuide/single_point.xml
+M models/lnd/clm/doc/UsersGuide/tools.xml
+M models/lnd/clm/doc/UsersGuide/adding_files.xml
+M models/lnd/clm/doc/UsersGuide/custom.xml
+M models/lnd/clm/doc/UsersGuide/ptclm.xml
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varpar.F90
+M models/lnd/clm/src/clm4_5/main/subgridRestMod.F90
+M models/lnd/clm/src/clm4_5/main/restFileMod.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/surfrdUtilsMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynSubgridDriverMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynHarvestMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynpftFileMod.F90
+M models/lnd/clm/src/clm4_0/biogeochem/CNEcosystemDynMod.F90
+M models/lnd/clm/src/clm4_0/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_0/main/pftdynMod.F90
+M models/lnd/clm/src/clm4_0/main/controlMod.F90
+M models/lnd/clm/src/clm4_0/main/clm_varctl.F90
+M models/lnd/clm/src/clm4_0/main/clm_driver.F90
+M models/lnd/clm/src/clm4_0/biogeophys/BiogeophysRestMod.F90
+
+========= Updated PTCLM external to rename fpftdyn -> flanduse_timeseries
+M models/lnd/clm/tools/SVN_EXTERNAL_DIRECTORIES
+
+========= Decrease threshold for water & snow balance checks by 3 orders of
+ magnitude; decrease threshold for energy balance checks by 2 orders of
+ magnitude
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+
+========= Separate clm_varcon into clm_varcon, column_varcon and landunit_varcon
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+M models/lnd/clm/src/clm4_5/main/CMakeLists.txt
+
+========= Move some routines out of initGridCellsMod, into a new initSubgridMod
+ (see detailed notes above)
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+
+========= Make time_info a public member of dyn_file_type. This allows us to
+ remove all methods from dyn_file_type (which were just delegating
+ responsibility to time_info_type). Also rename some methods and
+ variables in time_info_type.
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynFileMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynTimeInfoMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynHarvestMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynpftFileMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarMod.F90.in
+ - also change intent(in) to intent(inout), fixing a gfortran problem
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeUninterpMod.F90.in
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeUninterpMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeInterpMod.F90.in
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeInterpMod.F90
+
+========= Rework metadata for the description of landunit, column and pft types
+ on the history and restart files. Point is to centralize the
+ definition of these different types as much as possible (rather than,
+ e.g., having restFileMod know about the translation between landunit
+ indices and names). For the history file, I am removing the metadata
+ from the PCT_LANDUNIT long name, instead putting it in global
+ metadata, as is done for the restart file.
+M models/lnd/clm/src/clm4_5/main/subgridWeightsMod.F90
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+M models/lnd/clm/src/clm4_5/main/restFileMod.F90
+M models/lnd/clm/src/clm4_5/main/pftvarcon.F90
+
+========= Change 'use' statements based on my split of clm_varcon into
+ clm_varcon, landunit_varcon and column_varcon
+M models/lnd/clm/src/clm4_5/biogeochem/CNWoodProductsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4InitMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVEstablishmentMod.F90
+M models/lnd/clm/src/clm4_5/main/initTimeConstMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_5/main/subgridRestMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_glclnd.F90
+M models/lnd/clm/src/clm4_5/main/subgridMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/initColdMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/main/ED/EDCLMLinkMod.F90
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+M models/lnd/clm/src/clm4_5/main/surfrdMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SnowHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInputMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/TridiagonalMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/clm_driverInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/H2OSfcMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynLandunitAreaMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynConsBiogeochemMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynEDMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynCNDVMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynConsBiogeophysMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynInitColumnsMod.F90
+
+========= Fix baseline comparisons for PTCLM tests
+M models/lnd/clm/test/tools/TSMscript_tools.sh
+M models/lnd/clm/test/tools/TBLscript_tools.sh
+
+========= Just changes in whitespace
+M models/lnd/clm/src/clm4_5/dyn_subgrid/CMakeLists.txt
+M models/lnd/clm/src/clm4_5/biogeophys/CMakeLists.txt
+
+
+CLM testing:
+
+ Most testing done on dynlu_crops_n01_addclm50bld_n06_clm4_5_72; PTCLM and
+ tools testing done on dynlu_crops_n03_addclm50bld_n06_clm4_5_72
+
+ Note that the branch was up-to-date with addclm50bld_n06_clm4_5_72; this is
+ identical to clm4_5_73 except for a fix to the build-namelist tests (see below)
+
+ build-namelist tests:
+
+ yellowstone: ok
+ compared against addclm50bld_n06_clm4_5_72 (essentially clm4_5_73)
+ expected diffs for transient cases
+
+ The following tests also failed when comparing the baseline against
+ itself (NOTE: this is apparently fixed in clm4_5_73):
+
+ 466/497 < FAIL>
+ 467/497 < FAIL>
+ 496/497 < FAIL>
+ 497/497 < FAIL>
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ goldbach_nag: ok
+ goldbach_intel: ok
+ goldbach_pgi: ok
+
+ Most comparisons were done against clm4_5_72.
+
+ These comparisons failed due to a problem with component_gen_comp and the
+ SSP test; manual comparisons show these to be identical to Erik's tests (for
+ clm4_5_73):
+
+ FAIL SSP.f19_g16.I1850CLM45BGC.yellowstone_pgi.clm-default.GC.0520-2021.45.p.clm2.h0.compare_hist.clm4_5_72
+ FAIL SSP.f19_g16.I1850CLM45BGC.yellowstone_pgi.clm-default.GC.0520-2021.45.p.clm2.h1.compare_hist.clm4_5_72
+
+ For new tests added in Erik's upcoming tag (clm4_5_73), I did manual
+ comparisons against Erik's baselines (cpl & clm hist for the yellowstone
+ tests, just cpl for goldbach tests) - all PASS.
+
+ Note that CLM hist files were NOT compared for any goldbach tests, because
+ there were no CLM hist file baselines for clm4_5_72.
+
+ tools testing:
+
+ yellowstone interactive: ok
+
+ Compared against addclm50bld_n06_clm4_5_72 (essentially clm4_5_73)
+
+ Failures in the following baseline comparisons, due to changed name of output
+ file (surfdata.pftdyn -> landuse.timeseries). Manual comparisons showed the
+ output files to be identical in all cases:
+
+ 010 bl754 TBLtools.sh clm4_0 mksurfdata_map tools__s namelist ...................................\c
+ rc=7 FAIL
+ 012 blg54 TBLtools.sh clm4_5 mksurfdata_map tools__s namelist ...................................\c
+ rc=7 FAIL
+ 018 bl974 TBLscript_tools.sh clm4_0 mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools_\c
+ rc=7 FAIL
+ 030 bli74 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools_\c
+ rc=7 FAIL
+ 040 blfg4 TBLscript_tools.sh shared PTCLM PTCLMmkdata PTCLM_USUMB_Global_clm4_5^buildtools ......\c
+ rc=7 FAIL
+
+ Other than that, all tests & baseline comparisons passed
+
+CLM tag used for the baseline comparisons: clm4_5_72, except where noted above
+
+Changes answers relative to baseline: NO - bfb
+
+===============================================================
+===============================================================
+Tag name: clm4_5_73
+Originator(s): erik (Erik Kluzek,UCAR/TSS,303-497-1326)
+Date: Wed May 28 15:33:10 MDT 2014
+One-line Summary: Add the stub ability for clm5_0 physics to CLM build system
+
+Purpose of changes:
+
+Add phys=clm5_0 as an option to the build. Currently, nothing is different in the code, this just
+adds the capability to do clm5_0 as a seperate configuration. The one thing that is different between
+clm4_5 and clm5_0 is the setting of urban_hac.
+
+Fix several issues needed for CAM: problem in DryDeposition (reoccurance of bug 1883, that was fixed and then
+unfixed in clm4_5_48), fix for internal compiler errors. CAM has been using the branch version of this
+since: cam5_3_29.
+
+Work on updates for PTCLM. Add some new sites for Rosie and Jinyun. Correct the call to mkmapdata.sh.
+Have CLM1PT forcing directory to use DIN_LOC_ROOT_CLMFORC so you can point it to a location seperate
+from DIN_LOC_ROOT. Add a new support script to PTCLM to submit a list of sites to batch: PTCLMsublist.
+Also allow release_tags in version find. Get buildtools to work on edison/hopper.
+
+Get tools to work on hopper and edison, and update mapping to use ESMF6.3.0.
+
+Fix various bugs: internal compiler error on janus, trigger an error if user_datm.streams.txt file is
+ readonly (rather than hang). Use DIN_LOC_ROOT_CLMFORC for CLM1PT.
+
+Requirements for tag: Fix bug 1883 and 1985 for Cheryl and CAM, fix PTCLM, add clm5_0, tools on hopper/edison
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+ 1985 Internal compiler error on yellowstone with CLM in CAM standalone build
+ 1965 Internal compiler error on janus with CLM on janus
+ 1938 Upgrade mkmapdata to ESMF6.3.0
+ 1937 Using a read-only user_datm.streams.txt file causes cesm_setup to hang
+ 1936 CLM1PT forcing directory needs to use DIN_LOC_ROOT_CLMFORC
+ 1935 Changes needed to get tools to build on hopper...
+ 1933 Correct call to mkmapdata.sh in PTCLM
+ 1925 Add more sites to PTCLM
+ 1904 check for LSF_PJL_TYPE in regridbatch.sh doesn't work correctly
+ 1883 uninitialized variable in DryDepVelocity.F90
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: Add clm5_0 as a new supported physics type
+
+Describe any changes made to the namelist: Set urban_hac according to physics
+ clm5_0=ON_WASTEHEAT, and clm4_5=ON
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self (clm_phys perl object reviewed by team: bandre, muszala, sacks)
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, datm
+ scripts to addclm50_n03_ED_scripts_015_140305_rev
+ datm to datm8_140312
+ csm_share to share3_140418 Use trunk version rather than branch
+ tools/mapping to mapping_131217a
+ PTCLM to PTCLM2_140423
+
+List all files eliminated:
+
+ D models/lnd/clm/bld/unit_testers/env_run.xml -- This file is now built dynamically when the tester is run.
+
+List all files added and what they do:
+
+ A models/lnd/clm/bld/query-xFail --- Add a script from Ben Andre to read and report on expected fails.
+ A models/lnd/clm/bld/env_run.xml --- envxml_dir option is now required, so this provides a env_*.xml
+ file that can be read by default, when build-namelist is called outside of CESM for testing.
+ A models/lnd/clm/bld/config_files/clm_phys_vers.pm - Enter physics version as a string i.e.: clm4_0
+ and then have the ability to interpret it as different types so you can do logical operations
+ on physics versions
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/tools/shared/mkmapdata/regridbatch.sh ------- Add ability to run on hopper
+ M models/lnd/clm/tools/shared/mkmapdata/mkmapdata.sh --------- Add ability to run on hopper/edison
+ remove jaguarpf, and upgrade to ESMF6.3.0
+ M models/lnd/clm/tools/shared/ncl_scripts/getregional_datasets.ncl - New version of NCL requires
+ load before "begin" statement
+
+ M models/lnd/clm/bld/configure ------------- Allow phys=clm5_0 and add in new clm_phys_vers object
+ M models/lnd/clm/bld/README ---------------- Update info on files
+ M models/lnd/clm/bld/CLMBuildNamelist.pm --- Put list of required options at top of help, and make envxml_dir
+ a required option. Add in use of clm_phys_vers object. Make sure use_ed_spitfire is only on if use_ed is
+ on. If CLM_UPDATE_GLC_AREAS=TRUE and phys=clm4_0 trigger an error
+ M models/lnd/clm/bld/config_files/config_definition_clm4_5.xml - Add all three physics version options
+ M models/lnd/clm/bld/config_files/config_definition_clm4_0.xml - Add info. about clm4_5/clm5_0 options
+
+ M models/lnd/clm/bld/unit_testers/build-namelist_test.pl - Add use of clm_phys_vers, and create env_run.xml
+ on the fly. Also add new tests for glacier update areas, and ED: usespitfireButNOTED, useEDclm40, useEDContradict2
+ useEDContradict, clm40andUpdateGlc, clm40andUpdateGlc, UpdateGlcContradict, UpdateGlcNoGLCMe, and tests
+ for clm5_0
+
+ M models/lnd/clm/bld/test_build_namelist/t/test_vichydro.pm ----- Needs to use clm_phys_vers object
+ M models/lnd/clm/bld/test_build_namelist/test_build_namelist.pl - Needs to use clm_phys_vers object
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml - urban_hac depends on clm4_5/clm5_0
+
+-------------- Add phys=clm5_0 for all use-cases that test on phys
+ M models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/20thC_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/2000_glacierMEC_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850_glacierMEC_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_glacierMEC_transient.xml
+
+------------- Fix bugs 1883 (rs over lake) and 1983 (CAM internal compiler error) and 1965
+------------- (janus internal compiler error)
+ M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90 ----- Set rs over lake
+ M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90 -------- Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_5/main/restFileMod.F90 -------------- Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_5/main/surfrdMod.F90 ---------------- Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_5/biogeophys/UrbanInputMod.F90 ------ Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90 -- Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_5/dyn_subgrid/dynHarvestMod.F90 ----- Add use only for clmtype and dynVarTimeUninterpMod
+ fixes internal compiler error on janus (1965)
+ M models/lnd/clm/src/clm4_0/biogeochem/DryDepVelocity.F90 ----- Set rs over lake
+ M models/lnd/clm/src/clm4_0/main/clm_initializeMod.F90 -------- Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_0/main/subgridRestMod.F90 ----------- Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_0/main/pftdynMod.F90 ---------------- Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_0/main/iniTimeConst.F90 ------------- Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_0/main/restFileMod.F90 -------------- Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_0/main/surfrdMod.F90 ---------------- Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_0/biogeophys/UrbanInputMod.F90 ------ Add use only for ncd_pio
+ M models/lnd/clm/src/clm4_0/biogeophys/BiogeophysRestMod.F90 -- Add use only for ncd_pio
+
+CLM testing:
+
+ a) regular
+ b) build_namelist
+ c) tools
+
+ build-namelist tests:
+
+ yellowstone yes
+
+ regular tests (aux_clm40, aux_clm45, with '-model_gen_comp clm2'):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ goldbach_nag yes
+ goldbach_intel yes
+ goldbach_pgi yes
+
+ tools testing:
+
+ yellowstone interactive yes
+
+CLM tag used for the baseline comparisons: clm4_5_72
+
+Changes answers relative to baseline: No (bit-for-bit)
+
+===============================================================
+===============================================================
+Tag name: clm4_5_72
+Originator(s): muszala (Stefan Muszala)
+Date: Mon May 5 17:47:52 MDT 2014
+One-line Summary: Introduce code for Ecosystem Demography (CLM(ED)) Model
+
+Purpose of changes: Introduce code for Ecosystem Demography (CLM(ED)) Model - first functional tag.
+
+A large chunk of this code was written and re-written by Rosie Fisher.
+
+"Introduce code for Ecosystem Demography (CLM(ED)) Model. Adds capability to allow plant functional
+types to compete for light, to represent recovery from disturbance, and to allow disturbances
+(i.e. fire) to only afflict some fraction of the canopy, and to represent vegetation at the scale
+of cohorts of trees. Note that this is a large change and includes:
+
+1. Significant alterations to canopy albedo and surface radiation calculations
+2. New photosynthesis scheme, based on existing science but to allow for more complex canopy structure
+3. Introduction of a new allocation and growth scheme, (no consistent with that in CLM(CN)
+4. Removal, for now, of Nitrogen limitation capabilities
+5. Introduction of the SPITFIRE fire model, which interacts with ED via it's representation of
+ size-structured mortality and removal of litter pools.
+6. Introduction of a simple seed bank model to allow persistence of vegetation through fire events.
+7. For ED compsets there exists a cohort dimension on the restart files.
+
+Cold starts and restarts work for the following. The 1x1_brazil is the most heavily tested case both from
+science and SE standpoint:
+
+1x1_brazil.ICLM45CNED.yellowstone_[intel | pgi]
+5x5_amazon.ICLM45CNED.yellowstone_[intel | pgi]
+1x1_brazil.ICLM45CNED.goldbach_[nag | intel | pgi ]
+5x5_amazon.ICLM45CNED.goldbach_[nag | intel | pgi ]
+
+Cold starts work for:
+
+f10_f10.ICLM45CNED.yellowstone_[intel | pgi].
+f19_g16.ICLM45CNED.yellowstone_[intel | pgi]
+
+Code Origins:
+
+The ED code in CLM is originally based on code by Moorcroft (www.oeb.harvard.edu/faculty/moorcroft/code_and_data/index.html)
+and has been heavily modified in regards to both scientific implementation and assumptions. Fom a software engineering
+perspective, ED was rewritten from C into F2003 and the structure of the code has been significantly altered to fit into
+the CESM/CLM framework.
+
+The SPITFIRE code is based on (http://www.biogeosciences.net/7/1991/2010/bg-7-1991-2010.pdf) and has been significantly
+altered and extended to fit into the CESM/CLM framework.
+
+Other points:
+ 1. removed many unused variables and module uses as reported by nag
+ 2. changing text wrapping on comments so they end at 139 characters
+ 3. changed many text based logical operators (.ne., .lt., .ge.) with their math. equivlanet (/=, > , <=)
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: ED functionality brought in. Does not change existing
+configurations. For ED, there are now a CNED and BGCED compset. Use CNED until BGC issues are worked
+out.
+
+Describe any changes made to the namelist: ED functionality brought in. Does not change existing
+configurations. For ED compsets, there are two new namelist variables. They are:
+
+use_ed = .true.
+use_ed_spit_fire = .true.
+
+use_ed_spit_fire is set to true by default if use_ed is on. Unless you are running our ED tests, you will
+have to change your user_nl_clm to something like:
+
+paramfile ='/glade/p/cesmdata/cseg/inputdata/lnd/clm2/edParams/CLMPARAMS_ED_011514.nc'
+finidat = ''
+hist_mfilt = 365
+hist_nhtfrq = -24
+
+hist_empty_htapes = .true.
+
+hist_fincl1='NPP','GPP','BTRAN','TOTVEGC','H2OSOI','TLAI','LITTER_IN','LITTER_OUT',
+'STORVEGC','FIRE_AREA','SCORCH_HEIGHT','FIRE_INTENSITY','FIRE_TFC_ROS','fire_fuel_mef',
+'LITTERC','fire_fuel_bulkd','fire_fuel_sav','FIRE_NESTEROV_INDEX','PFTbiomass',
+'PFTleafbiomass','FIRE_ROS','WIND','TFC_ROS','DISPVEGC','AREA_TREES','AREA_PLANT'
+
+If on goldbach, use:
+
+paramfile ='/fs/cgd/csm/inputdata/lnd/clm2/edParams/CLMPARAMS_ED_011514.nc'
+
+List any changes to the defaults for the boundary datasets: N/A.
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by: Stefan Muszala and Rosie Fisher. Detailed code review by Bill Sacks, Mariana Vertenstein,
+Ben Andre, and Erik Kluzek. Discussion of code review included Dave Lawrence, Forrest Hoffmann
+and Ryan Knox.
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+Changed externals to a branch_tag that supports ED compsets. We are using branch_tags because trunk
+scripts does not work with all of the current CLM tests:
+
+-scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/add_dynlu_tests_tags/add_dynlu_tests_n03_scripts4_140305
++scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/ED_scripts_tags/ED_scripts_015_140305_rev
+
+List all files eliminated: N/A
+
+List all files added and what they do:
+
+### SPITFIRE code
+# new SPITEFIRE directory
+A + models/lnd/clm/src/clm4_5/main/spitfireSF
+# main SPITFIRE code
+A + models/lnd/clm/src/clm4_5/main/spitfireSF/SFParamsMod.F90
+# handle SPITFIRE parameters
+A + models/lnd/clm/src/clm4_5/main/spitfireSF/SFMainMod.F90
+
+# pull out, move to a shared location, place in own module
+A + models/lnd/clm/src/util_share/quadraticMod.F90
+
+### new source and directories for ED
+## ED code required for biogeophysics
+# ED directory in biogeophys
+A + models/lnd/clm/src/clm4_5/biogeophys/ED
+# Calculates daily carbon flux drivers from hourly calculations.
+A + models/lnd/clm/src/clm4_5/biogeophys/ED/EDAccumulateFluxesMod.F90
+# Calculates absorbed, reflected and transmitted radiation in diffuse and direct streams for
+# each of the canopy layer x PFT x leaf layer three-dimensional matrix. Uses iterative Norman
+# radiation transfer scheme.
+A + models/lnd/clm/src/clm4_5/biogeophys/ED/EDSurfaceAlbedoMod.F90
+# Main photosynthesis model. Calculates leaf level fluxes on a canopy layer x PFT x leaf layer
+# three-dimensional matrix. Sums to canopy to produce overall canopy conductance. Unpacks leaf-
+# level fluxes into cohort level fluxes. Uses same scientific assumptions as CLM4.5.
+A + models/lnd/clm/src/clm4_5/biogeophys/ED/EDPhotosynthesisMod.F90
+# Generates PFT specific BTRAN vector for each ED patch. Includes option for SPA-like calculations.
+A + models/lnd/clm/src/clm4_5/biogeophys/ED/EDBtranMod.F90
+
+## ED code required for biogeochemistry
+# ED directory in biogeochem
+A + models/lnd/clm/src/clm4_5/biogeochem/ED
+# Determines which cohorts are in the upper and lower canopy layers. Sets leaf area index inputs to biogeophysics calculations.
+A + models/lnd/clm/src/clm4_5/biogeochem/ED/EDCanopyStructureMod.F90
+# Initializes some ED-specific variables to zero at startup.
+A + models/lnd/clm/src/clm4_5/biogeochem/ED/EDSetValuesMod.F90
+# Contains allometric relationships between vegetation properties (height, dbh, LAI, dead biomass, live biomass, crown area) biogeochem/ED/EDPatchDynamicsMod.F90 : Creates patches, fuses similar patches, controls disturbance and generation of area.
+A + models/lnd/clm/src/clm4_5/biogeochem/ED/EDGrowthFunctionsMod.F90
+# Creates, fuses, terminates, sorts, counts and copies cohort structures.
+A + models/lnd/clm/src/clm4_5/biogeochem/ED/EDCohortDynamicsMod.F90
+# Contains all calculations of derivatives of biomass, litter and seed pools. Also includes phenology model, seed and litter production and decay models, and canopy optimization model.
+A + models/lnd/clm/src/clm4_5/biogeochem/ED/EDPhysiologyMod.F90
+# Creates, fuses, terminates, sorts, counts and copies patch structures.
+A + models/lnd/clm/src/clm4_5/biogeochem/ED/EDPatchDynamicsMod.F90
+
+## ED core functionality and types that interact with CLM (generally not science)
+# ED directory in main
+A + models/lnd/clm/src/clm4_5/main/ED
+# Transmits required information for CLM (tlai, htop, tile weights). Updates ED-specific history field variables.
+A + models/lnd/clm/src/clm4_5/main/ED/EDCLMLinkMod.F90
+# Initializes ED PFT parameter structure.
+A + models/lnd/clm/src/clm4_5/main/ED/EDInitTimeConst.F90
+# Contains ED-specific variables for CLM
+A + models/lnd/clm/src/clm4_5/main/ED/EDClmType.F90
+# Adds history field variables specific to ED to history file.
+A + models/lnd/clm/src/clm4_5/main/ED/EDHistFldsMod.F90
+# Prints out and reads in ED state vector to/from history files.
+A + models/lnd/clm/src/clm4_5/main/ED/EDRestVectorMod.F90
+# Initializes ED-specific variables for CLM
+A + models/lnd/clm/src/clm4_5/main/ED/EDClmTypeInitMod.F90
+# Allocates ED PFT specific variables.
+A + models/lnd/clm/src/clm4_5/main/ED/EDPftvarcon.F90
+# Initializes ED site, patch and cohort structures, either to restarting or bare ground values.
+A + models/lnd/clm/src/clm4_5/main/ED/EDInitMod.F90
+# Allocates and initializes ED parameters (that are not PFT specific).
+A + models/lnd/clm/src/clm4_5/main/ED/EDParamsMod.F90
+# Main ED model routine. Calls all other daily ED dynamics, integrates variables, checks carbon balance.
+A + models/lnd/clm/src/clm4_5/main/ED/EDMainMod.F90
+# Contains ED type structures (cohort, site, patch) and static values.
+A + models/lnd/clm/src/clm4_5/main/ED/EDTypesMod.F90
+
+# utility routine to help in reading parameter files
+A + models/lnd/clm/src/clm4_5/main/paramUtilMod.F90
+# transfers weights calculated internally by ED into wtcol.
+A + models/lnd/clm/src/clm4_5/dyn_subgrid/dynEDMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+### build modifications
+# add ED source directories for build
+M models/lnd/clm/bld/configure
+# build namelist additions for ED
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+# setup_cmdl_ed_mode addition. sets namelist vars
+M models/lnd/clm/bld/CLMBuildNamelist.pm
+
+### util_share modifications
+# add function is_beg_curr_day()
+M models/lnd/clm/src/util_share/clm_time_manager.F90
+# modify get_proc_bounds to include beg, end cohort
+M models/lnd/clm/src/util_share/accumulMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/util_share/ncdio_pio.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/util_share/ncdio_pio.F90.in
+
+### 4_5 Modifications
+# change text based logical with math style (.gt. to >, .ne. to /=)
+# modify get_proc_bounds to include beg, end cohort
+M models/lnd/clm/src/clm4_5/biogeochem/CNRestMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNNitrifDenitrifMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate1Mod.F90
+# add use_ed logical(s)
+M models/lnd/clm/src/clm4_5/biogeochem/CNBalanceCheckMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+# add use_ed logical(s)
+M models/lnd/clm/src/clm4_5/biogeochem/CNMRespMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNPrecisionControlMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNInitMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate1Mod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNAnnualUpdateMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+# add use_ed logical(s)
+M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+# move some variables from stack to heap.
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeBGCMod.F90
+# add use_ed logical(s)
+M models/lnd/clm/src/clm4_5/biogeochem/CNSetValueMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+# change spacing, text wrapping
+M models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeCNMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeochem/CNC14DecayMod.F90
+
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/main/initInterp.F90
+# change spacing, text wrapping
+M models/lnd/clm/src/clm4_5/main/clm_varpar.F90
+# add call to call EDInitTimeConst
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/main/initTimeConstMod.F90
+# add decomposition for cohort dimension
+M models/lnd/clm/src/clm4_5/main/decompInitMod.F90
+# add call for ed_init
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+# add use_ed logical(s)
+M models/lnd/clm/src/clm4_5/main/subgridRestMod.F90
+# add code for cohort dimension
+M models/lnd/clm/src/clm4_5/main/subgridMod.F90
+# add use_ed logical(s)
+M models/lnd/clm/src/clm4_5/main/accFldsMod.F90
+# add use_ed logical(s)
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/main/initColdMod.F90
+# change get_proc_global to support cohort dimension
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+# add use_ed logical to support cohort dimension
+M models/lnd/clm/src/clm4_5/main/restFileMod.F90
+# broadcast ed namelist variables
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+# use_ed logical to call edmodel
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+# change spacing, text wrapping
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+# clean up unsued variables from nag compiler warnings
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+# add routine set_cohort_decomp
+# add use_ed logical(s)
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/main/initSoilParVICMod.F90
+# change spacing, text wrapping
+# add use_ed logical(s)
+M models/lnd/clm/src/clm4_5/main/pftvarcon.F90
+# add support for cohort dimension
+M models/lnd/clm/src/clm4_5/main/decompMod.F90
+# change spacing, text wrapping
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+# add call to read ED and SPITFIRE params
+R + models/lnd/clm/src/clm4_5/main/readParamsMod.F90
+# add routine set_cohort_decomp
+# add use_ed logical(s)
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeInitMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInputMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeophys/FrictionVelocityMod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+# change get_proc_global to support cohort dimension
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+# change get_proc_global to support cohort dimension
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+# change get_proc_global to support cohort dimension
+M models/lnd/clm/src/clm4_5/biogeophys/ActiveLayerMod.F90
+# change get_proc_global to support cohort dimension
+M models/lnd/clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90
+# change spacing, text wrapping
+# add use_ed logical(s)
+# calculate ed root fractionation
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+# add use_ed logical(s) for forc_solai and parsun
+# use_ed reporting
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+# add use_ed logical(s) for norman_radiation
+# change spacing
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+# remove unused variable
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeCon.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+# change text based logical with math style (.gt. to >, .ne. to /=)
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+# add use_ed logical(s) for call dyn_ED
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynSubgridDriverMod.F90
+
+### 4_0 Modifications
+# add optional cohort argument for new ED dimension to get_proc_global and
+# get_proc_bounds_old for 40 backward compatibility
+M models/lnd/clm/src/clm4_0/main/decompMod.F90
+
+CLM testing:
+
+--SNICARFRC - moved ERI_D.T31_g37.ICLM45.goldbach_nag.clm-SNICARFRC to goldbach and nag. This is a BFAIL.
+
+--Testing for new ED compsets. All compare hist portions were BFAIL's since this is the first time
+the tests are being put in place.
+
+--ED and yellowstone [ intel | pgi ]
+ PASS ERS_D_Mmpi-serial.1x1_brazil.ICLM45CNED.yellowstone_[intel | pgi].clm-edTest
+ PASS SMS.f10_f10.ICLM45CNED.yellowstone_[intel | pgi].clm-edTest
+ PASS SMS.f19_g16.ICLM45CNED.yellowstone_[intel | pgi].clm-edTest
+ PASS SMS_D_Mmpi-serial.5x5_amazon.ICLM45CNED.yellowstone_[intel | pgi].clm-edTest
+
+--ED and goldbach [nag | intel | pgi ]
+ PASS ERS_D_Mmpi-serial.1x1_brazil.ICLM45CNED.goldbach_[nag | intel | pgi ].clm-edTestGb
+ PASS SMS_D_Mmpi-serial.5x5_amazon.ICLM45CNED.goldbach_[nag | intel | pgi ].clm-edTestGb
+
+--CLM history file comparison:
+
+yellowstone [intel | pgi] - OK
+
+I ran without the -model_gen_comp option, but ran component_gen_comp and summarize_cprnc_diffs
+by hand for both yellowstone_intel and yellowstone_pgi. These are both OK
+
+--Regular tests (aux_clm testlist)
+
+ yellowstone_intel - OK
+ yellowstone_pgi - OK
+ goldbach_nag - OK
+ goldbach_intel - OK
+ goldbach_pgi - OK
+
+CLM tag used for the baseline comparisons: clm4_5_71
+
+Changes answers relative to baseline: No. Existing compsets do not change.
+If you run with an *ED* compset, then results will differ, but that is expected.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_71
+Originator(s): Bill Sacks & Jeremy Fyke
+Date: Fri May 2 13:00:10 MDT 2014
+One-line Summary: 2-way feedbacks for glacier, veg columns compute glacier SMB, and related changes
+
+Purpose of changes:
+
+ (1) Bring in two-way feedbacks for glacier when coupled to CISM, via dynamic
+ landunits, so that CLM's glacier area remains consistent with CISM's
+ glacier area. Also update CLM's glacier topography to be consistent with
+ CISM.
+
+ (2) Add an elevation class "0", which provides surface mass balance over the
+ vegetated portion of the grid cell. This is used to achieve glacial
+ inception in CISM. Along with this change, also (a) set the topographic
+ height of non-glacier areas based on bare land topography from CISM, and
+ (b) change the downscaling of atmospheric fields so that they are also
+ downscaled over vegetated columns within CISM's ice mask, to achieve
+ greater consistency between what's happening in the glacier and vegetated
+ portions of CISM's domain. (Note that, because longwave radiation is
+ normalized, downscaling it over the vegetated column also changes answers
+ over glacier columns.) These changes were primarily from Jeremy Fyke.
+
+ (3) Rework some consistency checks to play nicely with dynamic landunits.
+
+ (4) Rework unit test build to use libraries for the clm source and csm_share source
+
+ (5) Misc. other changes, as noted below.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ - 1969 (incorrect values for QSNWCPICE_NODYNLNDUSE)
+ - 1929 (dynFileMod breaks with gfortran 4.8)
+ - 1832 (logic for weights error check differs between clm4.0 and clm4.5)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+
+Describe any changes made to the namelist:
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by:
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+========= Add tests
+-scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_140305
++scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/add_dynlu_tests_tags/add_dynlu_tests_n03_scripts4_140305
+
+========= Pull in Machines_140318, needed for goldbach
+-scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/arfs_tags/arfs_01_mach140218
++scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/arfs_tags/arfs_02_mach140218
+
+========= Changes needed for elevation class 0, etc.
+-models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_07
++models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_10
+-models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140416
++models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140501
+
+
+List all files eliminated:
+
+========= Point to real decompMod rather than mock
+D models/lnd/clm/test/unit_testers/clm4_5/mock/main/decompMod_boundsTypeDecl.F90
+
+List all files added and what they do:
+
+========= Most of reweightMod.F90 moved here; also includes the following changes:
+ (1) renames some subroutines
+ (2) adds some diagnostic fields that are written to the history file
+ (3) adds some utility routines such as get_landunit_weight
+A models/lnd/clm/src/clm4_5/main/subgridWeightsMod.F90
+
+========= Add code to initialize newly-active columns
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynInitColumnsMod.F90
+
+========= New unit tests
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynInitColumns_test/test_init_columns.pf
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynInitColumns_test/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynInitColumns_test
+
+========= Need new mocks and new real files now that we use the real decompMod, and also because of endrun calls
+A models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/GetGlobalValuesMod_mock.F90
+A models/lnd/clm/test/unit_testers/clm4_5/mock/csm_share/mct_mod_mock.F90
+A models/lnd/clm/src/util_share/CMakeLists.txt
+
+========= Need stub histFileMod now that many modules include calls to hist_addfld
+A models/lnd/clm/test/unit_testers/clm4_5/mock/main/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/mock/main/histFileMod_mock.F90
+
+List all existing files that have been modified, and describe the changes:
+
+========= Update glacier cover and topographic heights based on values from CISM; rework
+ code to accommodate icemask and elevation class 0
+M models/lnd/clm/src/clm4_5/main/clm_glclnd.F90
+M models/lnd/clm/src/cpl/clm_cpl_indices.F90
+M models/lnd/clm/src/cpl/lnd_import_export.F90
+
+========= Change to be consistent with clm4_5 version, adding elevation class 0 and
+ x2s%icemask (neither of which are used in the clm4_0 version)
+M models/lnd/clm/src/clm4_0/main/clm_glclnd.F90
+
+========= Add glc_do_dynglacier namelist option, which triggers off of
+ CLM_UPDATE_GLC_AREAS; rename glc_dyntopo to glc_dyn_runoff_routing and make it
+ also trigger off of CLM_UPDATE_GLC_AREAS; add glc_snow_persistence_max_days; add
+ dynpft_consistency_checks and finidat_consistency_checks groups
+M models/lnd/clm/bld/user_nl_clm
+M models/lnd/clm/bld/CLMBuildNamelist.pm
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+M models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+M models/lnd/clm/bld/unit_testers/env_run.xml
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+ - also use parameter for file name length, so that other modules can
+ ensure consistency of char length
+
+========= Add functions to convert between col%itype and icemec class; also add
+ landunit_names vector
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+
+========= Add functions add_landunit, add_column, add_patch (cleans up this code, and will
+ assist with setting up unit tests) and use new functions from clm_varcon
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+
+========= Use new functions from initGridCellsMod
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynLandunitAreaMod.F90
+
+========= Remove old consistency checks for restart file, add new ones (these changes are
+ needed so that consistency checks work right with dynamic landunits, and we're
+ adding some new consistency checks that weren't in place before)
+M models/lnd/clm/src/util_share/clm_time_manager.F90
+M models/lnd/clm/src/util_share/ncdio_pio.F90
+M models/lnd/clm/src/util_share/ncdio_pio.F90.in
+M models/lnd/clm/src/clm4_5/main/restFileMod.F90
+M models/lnd/clm/src/clm4_5/main/subgridRestMod.F90
+ - also read all subgrid weights and glc topo on restart
+ (some of these used to be read in BiogeophysRestMod;
+ we need all of them with dynamic landunits)
+ - also remove redundant mcdate, mcsec
+ - also add icemask restart variable
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+ - also remove redundant PFT_WTGCELL, PFT_WTLUNIT, PFT_WTCOL
+ (equivalent variables are already output by subgridRestMod)
+ - also add snow_persistence
+
+========= Remove old consistency checks for pftdyn file, add new ones
+ (these changes are needed so that consistency checks work right with dynamic landunits)
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+ - also call init_subgrid_weights_mod, put call to update_clm_s2x in
+ loop over clumps, and move deallocation of topo_glc_mec to later
+M models/lnd/clm/src/clm4_5/main/clm_varsur.F90
+M models/lnd/clm/src/clm4_5/main/surfrdMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynpftFileMod.F90
+
+========= Add new dimensions for multi-level fields - for subgrid weight diagnostics; add
+ a dimension to accommodate fields dimensioned by glc_nec+1
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+
+========= Call new routines (update_clm_x2s, set_subgrid_diagnostic_fields, initialize_new_columns)
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynSubgridDriverMod.F90
+
+========= Make various code operate over veg as well as icemec columns
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+ - also put call to update_clm_s2x in a loop over clumps
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/clm_driverInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+ - also compute snow_persistence, and rework some code for clarity
+
+========= Add initialization of icemask & snow_persistence; change
+ initialization of glc_topo and h2osno
+M models/lnd/clm/src/clm4_5/main/initColdMod.F90
+
+========= Add l2g_scale_type = natveg
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+
+========= Fix QSNWCPICE_NODYNLNDUSE, add l2g_scale_type for QICE & related fields, add
+ SNOW_PERSISTENCE and ICE_MASK, change _FORC fields to include elevation class 0
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+
+========= Get rid of associate statement that caused problems with some compilers
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynFileMod.F90
+
+========= Track old col%active values, needed for initializing new columns
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynPriorWeightsMod.F90
+
+========= Moved most functionality to subgridWeightsMod.F90 (now just a small
+ wrapper to some of the stuff in subgridWeightsMod, whose main purpose
+ is to avoid a dependency of subgridWeightsMod on filterMod)
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+
+========= Changes for unit tests:
+ (1) Update unit tests to use libraries for the clm source and csm_share source
+ (2) New unit test
+ (3) Make unit tests work with latest CLM trunk
+ (4) Point to real decompMod rather than mock
+M models/lnd/clm/test/unit_testers/clm4_5/biogeophys/Daylength_test/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynLandunitArea_test/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynTimeInfo_test/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/ncdio_var.F90
+M models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/ncdio_var.F90.in
+M models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/spmdMod_mock.F90
+M models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/mock/csm_share/shr_sys_mod_mock.F90
+M models/lnd/clm/test/unit_testers/clm4_5/mock/csm_share/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/mock/main/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/mock/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/CMakeLists.txt
+M models/lnd/clm/src/clm4_5/main/CMakeLists.txt
+M models/lnd/clm/src/clm4_5/dyn_subgrid/CMakeLists.txt
+
+========= Add icemask & snow_persistence; remove unused glc_frac, glc_rofi & glc_rofl
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+
+========= Minor changes to 'use' statements for the sake of breaking dependencies for unit tests
+M models/lnd/clm/src/util_share/GetGlobalValuesMod.F90
+M models/lnd/clm/src/clm4_5/main/decompMod.F90
+
+========= Changes to comments only
+M models/lnd/clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynHarvestMod.F90
+M models/lnd/clm/src/util_share/domainMod.F90
+
+========= Remove no-longer-failing test, change failType of a test (it was RUN
+ rather than FAIL at least as far back as clm4_5_69)
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok
+
+ regular tests
+
+ NOTE: Most tests were done on bare_land_smb_n15_clm4_5_70, which did NOT
+ include r59820 (add a comma in histFileMod to fix a syntax error caught by
+ nag). After r59820, reran all goldbach_nag tests, plus one goldbach_pgi and
+ one goldbach_intel.
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ goldbach_nag: ok
+ goldbach_intel: ok
+ goldbach_pgi: ok
+
+ component_gen_comp on yellowstone_intel & yellowstone_pgi: ok
+
+CLM tag used for the baseline comparisons: clm4_5_69 (clm4_5_68 for a few tests
+ with missing baselines in clm4_5_69)
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All IG compsets (i.e., GLC compsets)
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ New Climate
+
+ Changes are:
+
+ (1) All IG compsets (clm4.0 & clm4.5) change due to new CISM external
+
+ (2) In addition, IG compsets with CLM4.5 change further due to:
+ (a) 2-way feedbacks (CLM updated to match CISM)
+ (b) downscaling done over vegetated landunits within the icemask
+
+ (3) Also, the QSNWCPICE_NODYNLNDUSE history diagnostic field changes for
+ ALL CLM4.5 runs, due to fixing bug 1969.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_70
+Originator(s): muszala (Stefan Muszala)
+Date: Fri Apr 18 08:24:44 MDT 2014
+One-line Summary: bring in SHR_ASSERT macros
+
+Purpose of changes: bring in SHR_ASSERT macros for Santos.
+
+Add: #include "shr_assert.h" to source files
+Remove: use shr_assert_mod , only : shr_assert
+
+then replace "call shr_assert" with SHR_ASSERT_ALL when asserting more than one dim
+
+- call shr_assert((ubound(carr) == (/bounds%endc/)), errMsg(__FILE__, __LINE__))
++ SHR_ASSERT_ALL((ubound(parr) == (/bounds%endp/)), errMsg(__FILE__, __LINE__))
+
+and use SHR_ASSERT when asserting one dimen
+
+- call shr_assert(bounds%level == BOUNDS_LEVEL_CLUMP, errMsg(__FILE__, __LINE__))
++ SHR_ASSERT(bounds%level == BOUNDS_LEVEL_CLUMP, errMsg(__FILE__, __LINE__))
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: Self, Santos, Sacks
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+-scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_140218
++scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/arfs_tags/arfs_01_mach140218
+
+-scripts/ccsm_utils/CMake https://github.com/quantheory/CMake_Fortran_utils/tags/CMake_Fortran_utils_140109
++scripts/ccsm_utils/CMake https://github.com/quantheory/CMake_Fortran_utils/tags/CMake_Fortran_utils_140403
+
+-models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_34
++models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_37
+
+-models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140303
++models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140416
+
+-models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_131231
++models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/branch_tags/shr_assert_macro_tags/shr_assert_macro_n04_share3_140115
+
+-models/utils/mct https://github.com/quantheory/MCT/tags/compiler_fixes_n03_MCT_2.8.3
++models/utils/mct https://github.com/quantheory/MCT/tags/compiler_fixes_n04_MCT_2.8.3
+
+-models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_8_9/pio
++models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_8_11/pio
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/TridiagonalMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/DaylengthMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/FrictionVelocityMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SNICARMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/H2OSfcMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BandDiagonalMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynLandunitAreaMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynTimeInfoMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynSubgridDriverMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeUninterpMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarMod.F90.in
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeInterpMod.F90.in
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeUninterpMod.F90.in
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynHarvestMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynPriorWeightsMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeInterpMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynpftFileMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynConsBiogeophysMod.F90
+
+M SVN_EXTERNAL_DIRECTORIES
+
+CLM testing:
+
+Tested with two sets of externals because:
+
+The old scripts do not allow any PGI tests to run while the new scripts have various issues
+with ERI tests and selected PGI tests plus nag debug runs (fixed in a more recent machines tag).
+
+By running with two sets of externals, I am confident that the source mods for SHR_ASSERT are
+working correctly. When CLM gets updates in scripts and machines, these will be updated in
+later tags.
+
+I) Those included in this tag
+II) Those that are a part of cesm1_3_alpha09b (only tested clm45).
+
+ build-namelist tests: N/A
+
+ regular tests: for (I) above:
+
+ yellowstone_intel - OK - component_comp_gen - OK
+ goldbach_nag - OK
+ goldbach_intel - OK
+
+ regular tests: for (II) above:
+
+1) Yellowstone + Intel : all ERI tests are completing ref1 and ref2 but die a silent death in the base case. Erik looks like you changed Testlists, so the two VIC tests might be expected.
+
+>>./cs.status.70Intel.yellowstone | grep -v CLM50 | grep -v PASS | grep -v tputcomp | grep -v ERI_D.ne30_g16.ICLM45BGC.yellowstone_intel.clm-vrtlay | grep -v ERS_D.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay | grep -v ERS_Ld211_D_P112x1.f10_f10.ICNCROP
+RUN ERI.f09_g16.ICLM45BGC.yellowstone_intel.GC.70Intel
+-- ref1 and ref2 run, then no output in non-ref run
+RUN ERI.f10_f10.ICLM45BGC.yellowstone_intel.GC.70Intel
+-- ref1 and ref2 run, then no output in non-ref run
+RUN ERI.f19_g16.ICLM45BGC.yellowstone_intel.GC.70Intel
+-- ref1 and ref2 run, then no output in non-ref run
+RUN ERI_D.T31_g37.ICLM45.yellowstone_intel.clm-SNICARFRC.GC.70Intel
+-- ref1 and ref2 run, then no output in non-ref run
+RUN ERI_D.f09_g16.ICLM45BGC.yellowstone_intel.GC.70Intel
+-- ref1 and ref2 run, then no output in non-ref run
+RUN ERI_D.f10_f10.ICLM45BGC.yellowstone_intel.GC.70Intel
+-- ref1 and ref2 run, then no output in non-ref run
+RUN ERI_D.f19_g16.ICLM45BGC.yellowstone_intel.GC.70Intel
+-- ref1 and ref2 run, then no output in non-ref run
+RUN ERI_D.ne30_g16.ICLM45BGC.yellowstone_intel.GC.70Intel
+-- ref1 and ref2 run, then no output in non-ref run
+RUN ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.GC.70Intel
+-- ref1 and ref2 run, then no output in non-ref run
+RUN ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.clm-default.GC.70Intel
+-- ref1 and ref2 run, then no output in non-ref run
+FAIL ERS_D.f10_f10.ICLM45VIC.yellowstone_intel.clm-vrtlay.GC.70Intel.compare_hist.clm4_5_69
+ 69 Comparing hist file with baseline hist file, /glade/scratch/muszala/ERS_D.f10_f10.ICLM45VIC.yellowstone_intel.clm-vrtlay.GC.70Intel/run/ERS_D.f10_f10.ICLM45VIC.yellowstone_intel.clm-vrtlay.GC.70Intel.cpl.hi.0001-01-12-00000 .nc /glade/p/cesmdata/cseg/ccsm_baselines/clm4_5_69/ERS_D.f10_f10.ICLM45VIC.yellowstone_intel.clm-vrtlay/cpl.hi.nc
+ 70 ncdump1 done
+ 71 ncdump2 done
+ 72 comparing split files x[a-z][a-z]
+ 73 xaa
+ 74 6979,6981c6979,6981 < 0.983149585541109, 0.972017300931466, 0.972017300784614, < 0.972017300929172, 0.964088275988772, 0.971373805810303, < 0.977583443108289, 0.983149585551217, 0.983149585568791, ---
+ 75 FAIL
+ 76 hist file comparison is FAIL
+FAIL SMS.f19_g16.ICLM45VIC.yellowstone_intel.clm-default.GC.70Intel.compare_hist.clm4_5_69
+ 60 /glade/u/spooldir/1397689222.575650.shell: Storing new baseline in /glade/p/cesmdata/cseg/ccsm_baselines/clm4_5_70/SMS.f19_g16.ICLM45VIC.yellowstone_intel.clm-default
+ 61 Comparing hist file with baseline hist file, /glade/scratch/muszala/SMS.f19_g16.ICLM45VIC.yellowstone_intel.clm-default.GC.70Intel/run/SMS.f19_g16.ICLM45VIC.yellowstone_intel.clm- default.GC.70Intel.cpl.hi.0001-01-06-00000.n c /glade/p/cesmdata/cseg/ccsm_baselines/clm4_5_69/SMS.f19_g16.ICLM45VIC.yellowstone_intel.clm-default/cpl.hi.nc
+ 62 ncdump1 done
+ 63 ncdump2 done
+ 64 comparing split files x[a-z][a-z]
+ 65 xad
+ 66 18300c18300 < 0.983410370293909, 0.984052369383093, 0.979227772964994, --- > 0.983410370293909, 0.984052369383093, 0.979228345951215, 18341,18350c18341,18350
+ 67 FAIL
+ 68 hist file comparison is FAIL
+
+2) Goldbach + NAG seems to have passed OK, _D runs have failed as expected. The reporting is messed up...ie., TestStatus.out look OK, but TestStatus does not.
+
+>> ./cs.status.70nag.goldbach | grep -v CLM50 | grep -v PASS | grep -v tputcomp | grep -v _D
+FAIL ERI.f10_f10.ICLM45BGC.goldbach_nag.clm-reduceOutput.GC.70nag
+--look at /scratch/cluster/muszala/tests/ERI.f10_f10.ICLM45BGC.goldbach_nag.clm-reduceOutput.GC.70nag/TestStatus.out and there is no FAIL
+FAIL ERI.f19_g16.ICLM45BGC.goldbach_nag.clm-reduceOutput.GC.70nag
+--/scratch/cluster/muszala/tests/ERI.f19_g16.ICLM45BGC.goldbach_nag.clm-reduceOutput.GC.70nag and there is no FAIL
+
+3) Goldbach + Intel - these look all like passes to me: see /scratch/cluster/muszala/tests/*/TestStatus.out
+
+>> ./cs.status.70intel.goldbach | grep -v CLM50 | grep -v PASS | grep -v tputcomp
+FAIL ERI.f10_f10.ICLM45BGC.goldbach_intel.clm-reduceOutput.GC.70intel
+-- Shows FAIL but PASSes in TestStatus.out
+FAIL ERI.f19_g16.ICLM45BGC.goldbach_intel.clm-reduceOutput.GC.70intel
+-- Shows FAIL but PASSes in TestStatus.out
+FAIL ERI_D.T31_g37.I1850CLM45.goldbach_intel.clm-reduceOutput.GC.70intel
+-- Shows FAIL but PASSes in TestStatus.out
+FAIL ERI_D.f10_f10.ICLM45BGC.goldbach_intel.clm-reduceOutput.GC.70intel
+-- Shows FAIL but PASSes in TestStatus.out
+FAIL ERI_D.f19_g16.ICLM45BGC.goldbach_intel.clm-reduceOutput.GC.70intel
+-- Shows FAIL but PASSes in TestStatus.out
+
+4) Goldbach + PGI - some are indicating FAIL with TestStatus.out shows PASSes, others are straight out FAILs
+
+./cs.status.70pgi.goldbach | grep -v CLM50 | grep -v PASS | grep -v tputcomp | grep -v SMS_Ly1.f19_g16.ICLM45BGCCROP.frankfurt_pgi
+FAIL ERI.f10_f10.ICLM45BGC.goldbach_pgi.clm-reduceOutput.GC.70pgi
+-- Shows FAIL but PASSes in TestStatus.out
+FAIL ERI.f19_g16.ICLM45BGC.goldbach_pgi.clm-reduceOutput.GC.70pgi
+-- ref1 fail:
+ 22 g005.cgd.ucar.edu - daemon did not report back when launched
+ 23 g006.cgd.ucar.edu - daemon did not report back when launched
+ 24 g009.cgd.ucar.edu - daemon did not report back when launched
+ 25 /usr/mpi/pgi/openmpi-1.4.3-qlc/bin/orted: error while loading shared libraries: libpgc.so: cannot open shared object file: No such file or directory
+FAIL ERI_D.f10_f10.ICLM45BGC.goldbach_pgi.clm-reduceOutput.GC.70pgi
+-- Shows FAIL but PASSes in TestStatus.out
+FAIL ERI_D.f19_g16.ICLM45.goldbach_pgi.clm-reduceOutput.GC.70pgi
+-- ref1 fail:
+ 1 /usr/mpi/pgi/openmpi-1.4.3-qlc/bin/orted: error while loading shared libraries: libpgc.so: cannot open shared object file: No such file or directory
+ 2 /usr/mpi/pgi/openmpi-1.4.3-qlc/bin/orted: error while loading shared libraries: libpgc.so: cannot open shared object file: No such file or directory
+ 3 /usr/mpi/pgi/openmpi-1.4.3-qlc/bin/orted: error while loading shared libraries: libpgc.so: cannot open shared object file: No such file or directory
+ 4 --------------------------------------------------------------------------
+ 5 A daemon (pid 29755) died unexpectedly with status 127 while attempting
+FAIL ERI_D.f19_g16.ICLM45BGC.goldbach_pgi.clm-reduceOutput.GC.70pgi
+-- ref1 fail:
+ 23 g021.cgd.ucar.edu - daemon did not report back when launched
+ 24 g022.cgd.ucar.edu - daemon did not report back when launched
+ 25 g023.cgd.ucar.edu - daemon did not report back when launched
+ 26 /usr/mpi/pgi/openmpi-1.4.3-qlc/bin/orted: error while loading shared libraries: libpgc.so: cannot open shared object file: No such file or directory
+FAIL SMS_Ld5.f19_g16.IRCP45CLM45BGC.goldbach_pgi.clm-decStart.GC.70pgi
+-- ERROR in /var/spool/torque/mom_priv/jobs/19500.goldbach.cgd.ucar.edu.SC: file /fs/cgd/csm/ccsm_baselines/clm4_5_69/SMS_Ld5.f19_g16.IRCP45CLM45BGC.goldbach_pgi.clm-decStart/cpl.hi.nc does not exist
+
+CLM tag used for the baseline comparisons: clm4_5_69
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_69
+Originator(s): andre (Benjamin Andre,LAWRENCE BERKELEY NATIONAL LABORATORY,510-486-4617)
+Date: Tue Mar 18 21:12:34 MDT 2014
+One-line Summary: start unit testing build-namelist
+
+Purpose of changes: start doing unit testing on construction of the clm namelist.
+This involved moving the contents of build-namelist into CLMBuildNamelist.pm and
+bringing in perl infrastructure to supplement Test::More. Initial test suites are
+implented for several name list variables.
+
+Requirements for tag: N/A
+
+Test level of tag: regular, build_namelist
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, clm-cmt
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do:
+
+ models/lnd/clm/bld:
+ CLMBuildNamelist.pm - contents of build-namelist
+ test_build_namelist/perl5lib/* - CPAN modules needed for unit testing
+ test_build_namelist/t/input/* - mock input files for build-namelist tests
+ test_build_namelist/t/template_test_XXX.pm - template for new tests
+ test_build_namelist/t/test_*.pm - unit tests
+ test_build_namelist/test_build_namelist.pl - unit test driver
+ test_build_namelist/README
+
+List all existing files that have been modified, and describe the changes:
+
+ models/lnd/clm/bld:
+ build-namelist - moved contents into CLMBuildNamelist.pm, now just a driver calling main function.
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone - OK new and existing generate tests
+ goldbach - OK new and existing generate tests
+
+ regular tests (aux_clm):
+
+ yellowstone_intel - OK
+ yellowstone_pgi - OK
+ goldbach_nag - OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs: N/A
+
+ short tests (aux_clm_short):
+
+ yellowstone_intel - OK
+ yellowstone_pgi - OK
+
+ tools testing: N/A
+
+CLM tag used for the baseline comparisons: clm4_5_68
+
+Changes answers relative to baseline: No
+
+
+===============================================================
+===============================================================
+Tag name: clm4_5_68
+Originator(s): erik (Erik)
+Date: Fri Mar 7 16:43:23 MST 2014
+One-line Summary: Update scripts to version that turns on transient CO2 streams for
+ transient compsets, and update CISM (changes answers)
+
+Purpose of changes:
+
+Bring in the scripts version that by default had transient CO2 for any transient
+compsets. You can still turn it off by setting DATM_CO2_TSERIES=FALSE in env_run.xml.
+Also bring in the latest CISM version that has answer changes for any IG compsets.
+It fixes fields sent from CISM to the coupler (fixes an exact restart problem).
+
+Requirements for tag:
+ update scripts and CISM, transient and IG compsets have different answers
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 979 (adding CO2 streams)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ scripts to scripts4_140305
+ cism to cism1_140303
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes: None
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone yes
+
+ regular tests (aux_clm):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ goldbach_nag yes
+ goldbach_pgi yes
+ goldbach_intel yes
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+
+ tools testing: None
+
+CLM tag used for the baseline comparisons: clm4_5_67
+
+Changes answers relative to baseline: Yes!
+
+ Summarize any changes to answers:
+ - what code configurations: transient and IG compsets
+ - what platforms/compilers: all
+ - nature of change: larger than roundoff
+
+===============================================================
+===============================================================
+Tag name: clm4_5_67
+Originator(s): mvertens
+Date: Thu Mar 6 16:53:23 MST 2014
+One-line Summary: removed initSurfAlb as part of the initialization
+
+Purpose of changes: removed the call to initSurfAlb as well as part of
+ the initialization and also removed the routine from the clm4.5
+ code base
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+M bld/namelist_files/namelist_defaults_clm4_5.xml
+ - clmi.ICRUCLM45BGCCROPmp24.0241-01-01.10x15_USGS_simyr2000_c140111.nc had not
+ in fact been created - this effected the PEM test in the goldbach clm45 test suite
+ - the default namelist has not been backed up to the original
+ clmi.ICRUCLM45BGCCROPmp24.0241-01-01.10x15_USGS_simyr2000_c131028.nc
+ for now
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M src/clm4_5/biogeochem/ch4InitMod.F90
+ - removal of code block that is no longer needed due to removal of initSurfAlb
+ (this had already been commented out in clm4_5_66)
+
+M src/clm4_5/main/initInterp.F90
+ - minor bug fix the turn off spval by default for nonactive points
+
+D src/clm4_5/main/initSurfAlbMod.F90
+M src/clm4_5/main/clm_initializeMod.F90
+ - removal of call initSurfAlb (main purpose of this tag)
+ - removal of code to upgrade old initial data files to have new metadata
+ a new scheme should be put in place with a namelist option to take clm4.5
+ restart datasets that have been created prior to the introduction of initInterp
+ and introduce the new metadata at run time
+
+M src/clm4_5/main/initColdMod.F90
+ - had to introduce setting values for the following variables in order to remove
+ call to initSurfAlb
+ cps%albgrd_pur(bounds%begc:bounds%endc,:) = 0.2_r8
+ cps%albgri_pur(bounds%begc:bounds%endc,:) = 0.2_r8
+ cps%albgrd_bc(bounds%begc:bounds%endc,:) = 0.2_r8
+ cps%albgri_bc(bounds%begc:bounds%endc,:) = 0.2_r8
+ cps%albgrd_oc(bounds%begc:bounds%endc,:) = 0.2_r8
+ cps%albgri_oc(bounds%begc:bounds%endc,:) = 0.2_r8
+ cps%albgrd_dst(bounds%begc:bounds%endc,:) = 0.2_r8
+ cps%albgri_dst(bounds%begc:bounds%endc,:) = 0.2_r8
+
+M src/clm4_5/main/clm_driver.F90
+ - just comments
+
+CLM testing:
+
+ regular tests (aux_clm): OK means only failures were expected
+
+ yellowstone_intel : OK
+ yellowstone_pgi : OK
+ goldbach_nag : OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi : OK
+
+CLM tag used for the baseline comparisons: clm4_5_66
+
+Changes answers relative to baseline: some - for all compsets where
+ finidat is set to blank, then answers will change relative to baseline
+
+ for all compsets wehre finidat is pointing to a dataset, answers will be bfb
+ compared to baseline
+
+===============================================================
+===============================================================
+Tag name: clm4_5_66
+Originator(s): mvertens
+Date: Mon Mar 3 10:50:24 MST 2014
+One-line Summary: refactoring of initialization and introduction of run-time finidat interpolation
+
+Purpose of changes: refactoring of initialization and introduction of run-time finidat interpolation
+
+Completely rewrote clm_initialize to leverage new initialization scheme
+In the new scheme, cold start initialization is ALWAYS called and values
+are overwritten by either an appropriate finidat file OR by calling
+finidat_interp to interplate finidat to the output resolution/mask.
+
+Requirements for tag:
+
+Test level of tag: regular, short, tools, build_namelist, doc
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+
+ 1930 (MEGAN does not work correctly with prognostic crops on)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+
+ NONE
+
+Describe any changes made to the namelist:
+
+ - ALL clm4.5 finidat files have been updated to contain new metadata that will enable the
+ online interpinic to operate on them. The time stamp on all new files has the date c140111.
+ The files have been created so that they are bit-for-bit compatible with the code base.
+ - The following new namelist variables have been added to the namelist_definition_clm4_5.xml file
+ - finidat_interp_source
+ if non-blank, then interpinic will be called to interpolate finidat_interp_source and
+ create output file specified by finidat_interp_dest.
+ - finidat_interp_dest
+ if finidat_interp_source is set to non-blank, then interpinic will be called
+ to interpolate finidat_interp_source and create output file finidat_interp_dest
+
+List any changes to the defaults for the boundary datasets:
+
+ None
+
+Describe any substantial timing or memory changes:
+
+ None
+
+Code reviewed by:
+
+ mvertens, sacks
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ None
+
+List all files eliminated:
+
+ The following file pairs were renamed and subsequently extensively modfified
+ Summaries of the modifications are below:
+
+ ------- New module initColdMod.F90 contains calls to initialize the cold start for
+ ------- the entire model. The cold start values are then overwritten with either
+ ------- an finidat file or an interpolation file using finidat_interp_source.
+D models/lnd/clm/src/clm4_5/main/mkarbinitMod.F90
+A models/lnd/clm/src/clm4_5/main/initColdMod.F90
+
+ ------- Renamed file
+D models/lnd/clm/src/clm4_5/biogeochem/STATICEcosysDynMod.F90
+A models/lnd/clm/src/clm4_5/biogeochem/SatellitePhenologyMod.F90
+
+ ------- Renamed file
+D models/lnd/clm/src/clm4_5/biogeochem/CNDVEcosystemDynIniMod.F90
+A models/lnd/clm/src/clm4_5/biogeochem/CNDVInitMod.F90
+
+ ------- Renamed file, removed initch4, merged routines initTimeConst_ch4 and makearbinit_ch4
+ --------into new routine initColdCH4.Also removed almost all associate statements
+ ------- (but kept the intput/output documentation) and used the explicit clmtype definition.
+D models/lnd/clm/src/clm4_5/biogeochem/initch4Mod.F90
+A models/lnd/clm/src/clm4_5/biogeochem/ch4InitMod.F90
+
+ ------- Renamed and combined files
+ ------- Migrated all CN cold start initialization for both soil and
+ ------- special landuntis into new routine initColdCN in new module biogeochem/CNInitMod.
+D models/lnd/clm/src/clm4_5/main/CNiniSpecial.F90
+D models/lnd/clm/src/clm4_5/main/CNiniTimeVar.F90
+A models/lnd/clm/src/clm4_5/biogeochem/CNInitMod.F90
+
+ ------- Renamed file and merged routines makearbinit and snow_depth2Lake
+ ------- into one new routine initColdSlake. Also removed almost all associate
+ ------- statements (but kept the intput/output documentation) and used the explict
+ ------- clmtype definition.
+A models/lnd/clm/src/clm4_5/biogeophys/SLakeInitMod.F90
+D models/lnd/clm/src/clm4_5/biogeophys/initSlakeMod.F90
+
+ ------- Renamed iniTimeConst, removed associate statements but kept
+ ------- the documentation of input/output and also explictly listed
+ ------- full clmtype variables
+D models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+A models/lnd/clm/src/clm4_5/main/initTimeConstMod.F90
+
+List all files added and what they do:
+
+ ------- New run-time interpolation of input finidat to target resolution/mask
+ ------- using the new namelist variables finidat_interp_source and finidat_interp_dest
+A models/lnd/clm/src/clm4_5/main/initInterp.F90
+
+ ------- Obtain/write global index space value for target point at given clmlevel
+A models/lnd/clm/src/util_share/GetGlobalValuesMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+ ------- In all files, unless otherwise noted added call to errMsg(__FILE__,__LINE__)
+ ------- and in some cases optional arguments of decomp_index and clmlevel also added
+
+ ------- See documentation for namelist changes above
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+ ------ Overloaded endrun subroutine to also print out global index infromation by
+ ------ calling new GetGlobalWrite routine if optional arguments decomp_index and
+ ------ clm_level are passed in
+M models/lnd/clm/src/util_share/abortutils.F90
+
+ ------- Replaced missing value setting of huge(1) with ispval
+M models/lnd/clm/src/util_share/accumulMod.F90
+
+ ------- Replaced endrun with call to shr_sys_abort
+M models/lnd/clm/src/util_share/domainMod.F90
+
+ ------- Added in missing values and special values for variable metadata - this
+ ------- is needed needed by initInterp
+M models/lnd/clm/src/util_share/restUtilMod.F90
+M models/lnd/clm/src/util_share/restUtilMod.F90.in
+
+ ------- Completely rewrote clm_initialize to leverage new initialization scheme
+ ------- In the new scheme, cold start initialization is ALWAYS called and values
+ ------- are overwritten by either an appropriate finidat file OR by calling
+ ------- finidat_interp to interplate finidat to the output resolution/mask.
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+
+ ------- Added new metadata and variables - include global indices for parent subgrid
+ ------- level(s) (i.e. column, landunit and gridcell for pfts)
+M models/lnd/clm/src/clm4_5/main/subgridRestMod.F90
+
+ ------- Moved view_factor routine and associated variable from a separate routine
+ ------- in UrbanMod to part of the initTimeConstUrban subroutine in UrbanInitMod
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+
+ ------- Removed vf_xx variables from restart file and also
+ ------- removed do_initsurfalb variable
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+
+ ------- In all files, unless otherwise noted added call to errMsg(__FILE__,__LINE__)
+ ------- and in some cases optional arguments of decomp_index and clmlevel also added
+M models/lnd/clm/src/util_share/ncdio_pio.F90
+M models/lnd/clm/src/util_share/ncdio_pio.F90.in
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNitrifDenitrifMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CropRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNBalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNMRespMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/MEGANFactorsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4RestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPrecisionControlMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4varcon.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate3Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSharedParamsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/VOCEmissionMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeBGCMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVEstablishmentMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeCNMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNC14DecayMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varpar.F90
+M models/lnd/clm/src/clm4_5/main/decompInitMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_glclnd.F90
+M models/lnd/clm/src/clm4_5/main/accFldsMod.F90
+M models/lnd/clm/src/clm4_5/main/ndepStreamMod.F90
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_5/main/restFileMod.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/main/surfrdUtilsMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+M models/lnd/clm/src/clm4_5/main/initSoilParVICMod.F90
+M models/lnd/clm/src/clm4_5/main/pftvarcon.F90
+M models/lnd/clm/src/clm4_5/main/surfrdMod.F90
+M models/lnd/clm/src/clm4_5/main/decompMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInputMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SnowHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CLMVICMapMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SNICARMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynLandunitAreaMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynTimeInfoMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynHarvestMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynFileMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynConsBiogeochemMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynPriorWeightsMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynpftFileMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarMod.F90.in
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ NOTE1: that all namelists compares where finidat was not blank will fail - since
+ new finidat files are used that have new metadata - BUT - the results are still bfb
+
+ yellowstone_intel - OK
+ expected failures:
+ ERI_D.ne30_g16.ICLM45BGC.yellowstone_intel.clm-vrtlay
+ ERS_D.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay
+
+ yellowstone_pgi - OK
+ expected failures:
+ ERH_D.f19_g16.I1850CLM45CN.yellowstone_pgi.clm-default
+ ERS.f09_g16.ICLM45VIC.yellowstone_pgi.clm-vrtlay
+
+ goldbach_nag - OK
+
+ goldbach_intel - OK
+
+ goldbach_pgi - OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ NOTE1: all .h1 tests fail since there are new meta data fields for
+ cols1d_active, pfts1d_active - and FILLDIFF is different
+
+ yellowstone_intel OK
+ yellowstone_pgi OK
+
+CLM tag used for the baseline comparisons: clm4_5_65
+
+Changes answers relative to baseline: No - bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_5_65
+Originator(s): mvertens (Mariana Vertenstein,UCAR/CSEG,303-497-1349)
+Date: Tue Feb 25 13:45:38 MST 2014
+One-line Summary: Turn off MEGAN vocs when crops is running
+
+Purpose of changes:
+
+MEGAN does not currently work with prognostic crops. It needs a table of pft-specific values, and that this table has only been created for the 16 "standard" (non-crop) pfts.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1930 (MEGAN does not work correctly with prognostic crops on)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.):s
+
+scripts4_140214a -> scripts4_140220
+Machines_140214 -> Machines_140218
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+========
+ Add a new logical variable - use_voc - that is true by default but
+ is set to false if prognostic cop is activated
+========
+
+M src/clm4_5/main/clm_initializeMod.F90
+M src/clm4_5/main/clm_atmlnd.F90
+M src/clm4_5/main/controlMod.F90
+M src/clm4_5/main/clm_varctl.F90
+M src/clm4_5/main/clm_driver.F90
+M src/clm4_5/main/histFldsMod.F90
+M src/clm4_0/main/clm_varctl.F90
+M src/cpl/clm_cpl_indices.F90
+
+
+CLM testing:
+
+ build-namelist tests: N/A
+
+ regular tests (aux_clm):
+
+ yellowstone_intel - OK
+ expected failures
+ ERI_D.ne30_g16.ICLM45BGC.yellowstone_intel.clm-vrtlay
+ ERS_D.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay
+ expected non-bfb failures due to VIC/CROP changes
+ ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.clm-default
+ ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel
+ SMS_Ly1.f19_g16.ICLM45BGCCROP.yellowstone_intel
+
+ yellowstone_pgi - OK
+ expected failures
+ ERH_D.f19_g16.I1850CLM45CN.yellowstone_pgi.clm-default
+ ERS.f09_g16.ICLM45VIC.yellowstone_pgi.clm-vrtlay
+
+ goldbach_nag - OK
+ expected non-bfb failures due to VIC/CROP changes
+ ERS_D.f10_f10.ICLM45BGCCROP.goldbach_nag.clm-allActive
+ ERS_Lm3.1x1_numaIA.ICLM45BGCCROP.goldbach_nag
+
+ goldbach_intel - OK
+ expected non-bfb failures due to VIC/CROP changes
+ ERS_Ly20.1x1_numaIA.ICLM45BGCDVCROP.goldbach_intel.clm-crop
+ PEM.f10_f10.ICLM45BGCCROP.goldbach_intel.clm-crop
+
+ goldbach_pgi - OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel - OK -
+ yellowstone_pgi - OK
+
+CLM tag used for the baseline comparisons: clm4_5_64
+
+Changes answers relative to baseline: No - except for VOC fields when
+ prognostic crop is on (this is a diagnostic only and does not impact
+ the answers)
+
+===============================================================
+===============================================================
+Tag name: clm4_5_64
+Originator(s): muszala (Stefan Muszala,UCAR/TSS,303-497-1320)
+Date: Wed Feb 19 09:19:40 MST 2014
+One-line Summary: fix and clean ncdio_pio.F90.in. clean clm_time_manager. update externals.
+
+Purpose of changes:
+
+Note 1: This is the last tag that is tested on frankfurt; new tests are on goldbach.
+
+Note 2: Pts. mode is being deprecated for science use as of this tag. Use PTCLM. Pts. mode
+ remains in place in our test system.
+
+Note 3: There is an unresolved problem with higher resolutions when dov2xy is .false. and we are
+ using pnetcdf. Please see bug 1730.
+
+ncdio_pio.F90.in - fix initialization problem where count and start are sometimes used without
+ being set.
+clm_time_manager - clean out unused variables
+update externals to support ED compsets, move pts. mode tests to testmods. Update Machines and
+ pio to address bug 1730.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.):s
+
+scripts4_140209 -> scripts4_140214a
+Machines_140213 -> Machines_140214
+pio1_8_8 -> pio1_8_9
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/util_share/clm_time_manager.F90
+M models/lnd/clm/src/util_share/ncdio_pio.F90
+M models/lnd/clm/src/util_share/ncdio_pio.F90.in
+
+CLM testing:
+
+ build-namelist tests: N/A
+
+ regular tests (aux_clm):
+
+ yellowstone_intel - OK
+ Expected due to change in pts. mode:
+ BFAIL SMS.f45_f45.I.yellowstone_intel.clm-ptsRLA.GC.64Intel.compare_hist.clm4_5_63
+ BFAIL SMS.f45_f45.I.yellowstone_intel.clm-ptsROA.GC.64Intel.compare_hist.clm4_5_63
+ BFAIL SMS_D_Mmpi-serial.f45_f45.ICLM45.yellowstone_intel.clm-ptsRLA.GC.64Intel.compare_hist.clm4_5_63
+ BFAIL SMS_Mmpi-serial.f45_f45.ICLM45.yellowstone_intel.clm-ptsRLA.GC.64Intel.compare_hist.clm4_5_63
+ Expected due to change in pio_buffer_size_limit
+ FAIL ERI.f09_g16.ICLM45BGC.yellowstone_intel.GC.64Intel.nlcomp
+ FAIL ERI_D.f09_g16.ICLM45BGC.yellowstone_intel.GC.64Intel.nlcomp
+ New Failure for VIC but due to dov2xy problem
+ ERS_D.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay
+
+ yellowstone_pgi - OK
+ Expected due to change in pts. mode:
+ BFAIL SMS.f45_f45.I.yellowstone_pgi.clm-ptsRLB.GC.64Pgi.compare_hist.clm4_5_63
+ BFAIL SMS_D_Mmpi-serial.f45_f45.ICLM45.yellowstone_pgi.clm-ptsRLA.GC.64Pgi.compare_hist.clm4_5_63
+ BFAIL SMS_Mmpi-serial.f45_f45.ICLM45.yellowstone_pgi.clm-ptsRLA.GC.64Pgi.compare_hist.clm4_5_63
+ Expected due to change in pio_buffer_size_limit
+ FAIL ERI.f09_g16.I1850CRUCLM45BGC.yellowstone_pgi.GC.64Pgi.nlcomp
+ FAIL ERI.f09_g16.ICLM45BGC.yellowstone_pgi.GC.64Pgi.nlcomp
+ FAIL ERI_D.f09_g16.ICLM45BGC.yellowstone_pgi.GC.64Pgi.nlcomp
+ FAIL ERS_D.hcru_hcru.ICRUCLM45BGC.yellowstone_pgi.GC.64Pgi.nlcomp
+ FAIL ERS_D.hcru_hcru.ICRUCN.yellowstone_pgi.GC.64Pgi.nlcomp
+ New Failure for VIC but due to dov2xy problem
+ ERS.f09_g16.ICLM45VIC.yellowstone_pgi.clm-vrtlay
+
+ frankfurt_nag - OK
+ Expected due to change in pts. mode:
+ BFAIL SMS_D_Mmpi-serial.f45_f45.ICLM45.frankfurt_nag.clm-ptsRLA.GC.64Nag.compare_hist.clm4_5_63
+ BFAIL SMS_Mmpi-serial.f45_f45.ICLM45.frankfurt_nag.clm-ptsRLA.GC.64Nag.compare_hist.clm4_5_63
+ BFAIL SMS_Mmpich.f45_f45.ICLM45.frankfurt_nag.clm-ptsRLA.GC.64Nag.compare_hist.clm4_5_63
+
+ frankfurt_intel - OK
+
+ frankfurt_pgi - OK
+ Expected due to change in pts. mode:
+ BFAIL SMS.f45_f45.ICLM45.frankfurt_pgi.clm-ptsRLB.GC.64Pgi.compare_hist.clm4_5_63
+ BFAIL SMS.f45_f45.ICLM45.frankfurt_pgi.clm-ptsROA.GC.64Pgi.compare_hist.clm4_5_63
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel - OK
+
+CLM tag used for the baseline comparisons: clm4_5_63
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_63
+Originator(s): sacks (sacks)
+Date: Fri Feb 14 07:22:37 MST 2014
+One-line Summary: add some code needed for dynamic landunits; activate 0-weight veg landunit sometimes
+
+Purpose of changes:
+
+(1) Add grc%landunit_indices(:,:), so you can find a given l index if you have
+ the g index (this will be needed in a few places for dynamic landunits)
+
+(2) Add code to update landunit weights; currently has no effect because
+ landunit areas don't change yet
+
+(3) Refactor logic in the is_active_X routines, and add logic to activate a
+ virtual vegetated landunit under some conditions (needed for coupling with
+ CISM, and helpful for dynamic landunits). Specifically, we activate a
+ virtual (0-weight) vegetated landunit for any grid cell that is NOT 100%
+ istice (i.e., standard glacier) (we exclude grid cells that are 100% istice
+ to avoid the performance penalty, because these aren't used for coupling
+ with CISM, and the only way this glacier can retreat is if another landunit,
+ like crop, increases there, which will rarely happen).
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes:
+
+ Performance about 6% worse for clm4.5 IG runs at f09 (i.e., with glcmec),
+ because of the new virtual vegetated columns. Not investigated for f19 or
+ T31, but probably a similar performance hit.
+
+ There were also a few memcomp failures
+
+Code reviewed by: quick review by mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ Machines: Machines_140207a -> Machines_140213 (to fix pgi on yellowstone)
+
+List all files eliminated:
+
+========= Remove "IN_PROGRESS"
+D models/lnd/clm/src/clm4_5/dyn_subgrid/dynLandunitAreaMod.F90.IN_PROGRESS
+
+List all files added and what they do:
+
+========= Add code to update landunit weights; currently has no effect because
+ landunit areas don't change yet
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynLandunitAreaMod.F90
+
+========= Add unit tests for dynLandunitAreaMod
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynLandunitArea_test
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynLandunitArea_test/test_update_landunit_weights_one_gcell.pf
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynLandunitArea_test/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynLandunitArea_test/test_update_landunit_weights.pf
+
+List all existing files that have been modified, and describe the changes:
+
+========= Add grc%landunit_indices(:,:), so you can find a given l index if you
+ have the g index (this will be needed in a few places for dynamic
+ landunits)
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+ - just add a comment
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+
+========= Call new code in dynLandunitAreaMod
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynSubgridDriverMod.F90
+
+========= Refactor logic in the is_active_X routines, and add logic to activate
+ a virtual vegetated landunit under some conditions (needed for
+ coupling with CISM, and helpful for dynamic landunits)
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+
+========= Add unit tests for dynLandunitAreaMod
+M models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/CMakeLists.txt
+M models/lnd/clm/src/clm4_5/dyn_subgrid/CMakeLists.txt
+M models/lnd/clm/src/clm4_5/main/CMakeLists.txt
+
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ NOTE: frankfurt intel & pgi ran on a slightly older version of the branch
+ (dynlu_weight_updates_glacier_n05_clm4_5_62, which did not include some
+ final minor refactoring to reweightMod); frankfurt nag & yellowstone
+ intel/pgi ran on the final version
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ frankfurt_intel: ok
+ frankfurt_pgi: ok
+ frankfurt_nag: ok
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+
+ A bunch of expected failures in h1 (1-d) hist files, due to the newly-active
+ points.
+
+
+ Also: Did a manual test to show that answers are the same for virtual
+ vegetated columns as they would be if the column had non-zero weight (to
+ ensure there are no dependencies on whether a column's weight is non-zero).
+
+CLM tag used for the baseline comparisons: clm4_5_62 for yellowstone (but
+clm4_5_61 for component_gen_comp); clm4_5_61 for frankfurt
+
+Changes answers relative to baseline: NO
+
+ However, note diffs in 1-d hist files due to newly-active points.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_62
+Originator(s): erik (Erik)
+Date: Mon Feb 10 04:16:07 MST 2014
+One-line Summary: Get PTCLM working robustly, US-UMB test working, add CO2 streams to datm, add more
+ consistency testing between compsets and user settings
+
+Purpose of changes:
+
+US-UMB fix in scripts and datm update. Fix so build-namelist will abort if there is an inconsistency with CLM_BLDNML_OPTS
+and user_nl_clm. Add CO2 streams as a built-in option to datm. Turn CO2 streams on with the DATM_CO2_TSERIES env_run.xml
+variable. Can be set to: none,20tr,rcp2.6,rcp4.5,rcp6.0,rcp8.5, by default is none.
+
+Requirements for tag:
+
+ datm -- CO2 update, streams improvements
+ Fix build-namelist consistency issues
+ Fix bug 1847 -- end1d in hist for clm4_0
+ Add envxml_dir
+ check that cndv and fpftdyn aren't on the same time
+ Add PTCLM tests to test_Driver
+ Add PTCLM test system in
+ Make PTCLM more robust
+
+Test level of tag: regular, tools, build_namelist
+
+Bugs fixed (include bugzilla ID):
+ 1918 -- sort options in build-namelist
+ 1917 -- remove WRF resolutions
+ 1903 -- buildtools fails for PTCLM
+ 1900 -- Remove BUILDHEAT and Qanth from output for CLM testing
+ 1896 -- CLM build-namelist should abort if use_cndv AND fpftdyn are set.
+ 1881 -- Add envxml_casedir option to CLM build-namelist
+ 1879 -- need error triggered when use_crop and CLM_BLDNL_OPTS are not consistent
+ 1847 -- 'histfilemod_mp_hist_restart_ncd_$END1D' is being used without being defined
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: frankfurt switched out for goldbach
+
+Describe any changes made to the namelist: CLM build-namelist changed to ensure user changes don't conflict with
+ command-line options
+
+List any changes to the defaults for the boundary datasets: remove WRF datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts to scripts4_140209
+ Machines to Machines_140207a
+ datm to datm8_140114
+ pio to pio1_8_8
+ cprnc to cprnc_140203
+
+ PTCLM to PTCLM2_140204
+
+List all files eliminated: move frankfurt to goldbach
+
+D models/lnd/clm/test/tools/tests_posttag_frankfurt_nompi
+
+List all files added and what they do: goldbach, and add PTCLM tools testing
+
+A + models/lnd/clm/test/tools/tests_posttag_goldbach_nompi
+A models/lnd/clm/test/tools/TCBscripttools.sh ------------ Add script to run buildtools for PTCLM
+A models/lnd/clm/test/tools/config_files/PTCLM__s -------- Config for PTCLM
+A models/lnd/clm/test/tools/nl_files/PTCLM_USUMB_clm4_0
+A models/lnd/clm/test/tools/nl_files/PTCLM_USUMB_clm4_5
+A models/lnd/clm/test/tools/nl_files/PTCLM_USUMB_Cycle_clm4_5
+A models/lnd/clm/test/tools/nl_files/PTCLM_USUMB_Global_clm4_5
+A models/lnd/clm/bld/unit_testers/myuser_nl_clm --- New build-namelist tests
+A models/lnd/clm/bld/unit_testers/env_run.xm
+
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/bld/build-namelist ----- Sort options, add -envxml_dir option, check that user hasn't contradicted themself
+ with CLM_BLDNML_OPTS and user_nl_clm, remove options: -noio, -nofire, -snicar_frc, -vsoilc, -exlaklayers, -clm4me
+ use Cwd::abs_path and remove home-grown absolute_path, add some more docmentation and comments, redo some ordering and names
+ M models/lnd/clm/bld/clm.buildnml.csh --- add -envxml_dir so will use env_*.xml files to expand env variables
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml --- Add irrig setting, remove WRF files
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml --- Add irrig, bgc_spinup, and bgc_mode
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml -- Remove WRF resolutions: us20, wus12
+ M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_0.xml - Remove WRF resolutions
+ M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml - Remove WRF resolutions, add bgc_mode
+ M models/lnd/clm/bld/unit_testers/build-namelist_test.pl ----------- Add a bunch of new tests
+ M models/lnd/clm/bld/user_nl_clm ----- make note of variables that should be done by command-line build-namelist options
+
+ M models/lnd/clm/src/clm4_0/main/histFileMod.F90 --- Fix bug 1847
+
+------------ Add PTCLM testing
+ M models/lnd/clm/test/tools/README.testnames
+ M models/lnd/clm/test/tools/test_driver.sh ------ Remove bluefire, lynx, mirage, jaguarpf -- switch frankfurt for goldbach
+ M models/lnd/clm/test/tools/TBLscript_tools.sh
+ M models/lnd/clm/test/tools/TSMscript_tools.sh
+ M models/lnd/clm/test/tools/input_tests_master
+ M models/lnd/clm/test/tools/tests_posttag_nompi_regression
+ M models/lnd/clm/test/tools/tests_posttag_yong
+ M models/lnd/clm/test/tools/tests_pretag_yellowstone_nompi
+
+------------ Don't die if debug and files were not created.
+ M models/lnd/clm/tools/clm4_0/mksurfdata_map/mksurfdata.pl
+ M models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata.pl
+
+CLM testing: regular, build-namelist, tools
+
+ build-namelist tests:
+
+ yellowstone
+
+ regular tests (aux_clm):
+
+ yellowstone_intel
+ yellowstone_pgi
+ goldbach_nag
+ edison_intel
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel
+ yellowstone_pgi
+
+ tools testing:
+
+ yellowstone interactive
+ goldbach interactive
+
+CLM tag used for the baseline comparisons: clm4_5_61
+
+Changes answers relative to baseline: None, bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_5_61
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Tue Feb 4 09:45:43 MST 2014
+One-line Summary: add 3-d snow history fields; continue harvest past end of pftdyn timeseries
+
+Purpose of changes:
+
+ There are two separate sets of changes in this tag; both apply just to CLM4.5:
+
+ (1) Addition of 3-d snow history fields: These history fields (inactive by
+ default) provide diagnostics for each layer of the snow pack. This
+ involved adding some additional history file infrastructure to handle
+ the variable number of snow pack layers. See the new section in the
+ user's guide (custom.xml) for a description of how these new history
+ fields work, and how to interpret them.
+
+ (2) Change the harvest logic for transient runs that extend past the end of
+ the pftdyn dataset: Until now, harvest was set to 0 when you passed the
+ end of the pftdyn dataset. With this tag, this behavior is changed, so
+ that for all years past the end of the pftdyn dataset, harvest rates
+ remain fixed at the last year's value.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes:
+
+ Memory use increases slightly, as observed by memcomp failures for a few
+ tests. This is presumably due to new fields in clmtype.
+
+Code reviewed by: Erik reviewed changes for the 3-d snow history fields.
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+========= Add 3-d snow history fields
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/doc/UsersGuide/custom.xml
+
+========= Add a new snow diagnostic, sub_surf_abs_SW
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+
+========= Continue harvest past end of pftdyn time series
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynTimeInfoMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynHarvestMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynFileMod.F90
+M models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynTimeInfo_test/test_dynTimeInfo.pf
+
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ frankfurt_intel: ok
+ frankfurt_pgi: ok
+ frankfurt_nag: ok
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+
+ Only failures are these expected failures (see notes on answer changes
+ below):
+
+ FAIL SMS_Ly3.1x1_tropicAtl.I20TRCLM45BGC.yellowstone_intel.clm-tropicAtl_subsetLate.compare_hist.clm4_5_60.clm2.h0
+ FAIL SMS_Ly5.1x1_tropicAtl.I20TRCLM45BGC.yellowstone_intel.clm-tropicAtl_subsetMid.compare_hist.clm4_5_60.clm2.h0
+ FAIL SMS_Ly8.1x1_tropicAtl.I20TRCLM45BGC.yellowstone_intel.clm-tropicAtl_subsetEarly.compare_hist.clm4_5_60.clm2.h0
+
+CLM tag used for the baseline comparisons: clm4_5_60
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM45 transient runs that continue past the end of the pftdyn dataset
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ New climate: Harvest rates are now continued past the end of the pftdyn
+ dataset (staying fixed at their value from the last year), for the
+ remainder of the simulation. This leads to potentially large answer
+ changes for transient runs that continue past the end of the pftdyn
+ dataset.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_60
+Originator(s): andre (Benjamin Andre,LAWRENCE BERKELEY NATIONAL LABORATORY,510-486-4617)
+Date: Thu Jan 30 18:27:03 MST 2014
+One-line Summary: refactor build-namelist
+
+Purpose of changes: : break build-namelist into small unit-testable functions
+ instead of a single massive script. Use output functions to standardize
+ screen output for errors, warnings and messages so that results can be
+ automatically searched by scripts.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, Erik
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/bld/build-namelist - major refactor described above
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml - fix incorrect comments
+ M models/lnd/clm/bld/unit_testers/build-namelist_test.pl - minor updates to check new output style
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone - pass compare and generate with only xfails
+ frankfurt - not tested, CLM-CMT believes tests may be broken.
+
+ regular tests (aux_clm):
+
+ yellowstone_intel - ok
+ yellowstone_pgi - ok
+ frankfurt_intel - ok
+ frankfurt_pgi - ok
+ frankfurt_nag - ok
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel - ok
+ yellowstone_pgi - ok
+
+ short tests (aux_clm_short):
+
+ yellowstone_intel - ok
+
+ tools testing: N/A
+
+CLM tag used for the baseline comparisons: clm4_5_59
+
+Changes answers relative to baseline: none, bit for bit
+
+
+===============================================================
+===============================================================
+Tag name: clm4_5_59
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Wed Jan 22 15:04:12 MST 2014
+One-line Summary: use new get_curr_yearfrac function in clm_time_manager
+
+Purpose of changes:
+
+ Use the new get_curr_yearfrac function in clm_time_manager in place of
+ dyn_time_weights. The reason is that, as Erik pointed out, dyn_time_weights
+ was out of place in dynUtilsMod, and really this functionality belongs in the
+ clm_time_manager module.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): none
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated:
+
+========= Made obsolete by get_curr_yearfrac in clm_time_manager
+D models/lnd/clm/src/clm4_5/dyn_subgrid/dynUtilsMod.F90
+D models/lnd/clm/test/unit_testers/clm4_5/mock/dyn_subgrid/dynUtilsMod_mock.F90
+
+List all files added and what they do:
+
+========= Mock out get_curr_yearfrac: return a fixed fraction
+A models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/clm_time_manager_mock.F90
+
+List all existing files that have been modified, and describe the changes:
+
+========= Fix get_curr_yearfrac to be real rather than integer
+M models/lnd/clm/src/util_share/clm_time_manager.F90
+
+========= Use get_curr_yearfrac instead of dyn_time_weights
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeInterpMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynCNDVMod.F90
+M models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeInterpMod.F90.in
+
+========= Update unit tests to pull in clm_time_manager (mock) rather than dynUtilsMod (mock)
+M models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/CMakeLists.txt
+M models/lnd/clm/test/unit_testers/clm4_5/mock/dyn_subgrid/CMakeLists.txt
+
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ frankfurt_intel: ok
+ frankfurt_pgi: ok
+ frankfurt_nag: ok
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+
+CLM tag used for the baseline comparisons: clm4_5_58
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM45, either transient or with DV
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ roundoff
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ Ran testing on an intermediate tag, where I computed the time weights in
+ both the old and new ways. I confirmed that the difference in time weights
+ (which is the only change in this tag) is always less than 1e-13. Actually,
+ this difference is always less than 2e-16, double-precision roundoff.
+
+ Also examined cpl hist diffs for a few select tests. Diffs are generally
+ ~ 1e-6 after 5 days.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_58
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Wed Jan 22 14:11:30 MST 2014
+One-line Summary: major refactor of transient pft code, in prep for dynamic landunits
+
+Purpose of changes:
+
+Major refactor of transient pft code, in prep for dynamic landunits.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 1899 (harvest rates remain non-zero even after the end of the pftdyn dataset)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Erik; design reviewed by CLM-CMT
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts: scripts4_140102 -> scripts4_140114
+ Machines: Machines_131206b -> Machines_140107
+ csm_share: share3_131226 -> share3_131231
+ pio: pio1_8_3 -> pio1_8_6
+
+ CMake: New external added
+
+List all files eliminated:
+
+========= renamed to dynConsBiogeophysMod.F90
+D models/lnd/clm/src/clm4_5/main/dynlandMod.F90
+
+========= renamed to dynpftFileMod.F90; much of the stuff in here moved to other
+ files in the dyn_subgrid directory
+D models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+
+========= renamed
+D models/lnd/clm/test/unit
+
+List all files added and what they do:
+
+========= Rename test/unit to test/unit_testers, and add unit tests for some of
+ the stuff in dyn_subgrid. This also involved adding some mocks -
+ particularly of ncdio_pio.
+A models/lnd/clm/test/unit_testers/clm4_5/biogeophys/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/biogeophys/Daylength_test/test_daylength.pf
+A models/lnd/clm/test/unit_testers/clm4_5/biogeophys/Daylength_test/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/biogeophys/Daylength_test
+A models/lnd/clm/test/unit_testers/clm4_5/biogeophys
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test/test_dynVarShared.F90
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test/test_dynVarTimeInterp.pf
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test/test_dynVarTimeUninterp.pf
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynVar_test
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynTimeInfo_test/test_dynTimeInfo.pf
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynTimeInfo_test/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/dynTimeInfo_test
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/dyn_subgrid
+A models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/ncdio_var.F90
+A models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/ncdio_var.F90.in
+A models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/ncdio_pio_mock.F90
+A models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/spmdMod_mock.F90
+A models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/do_genf90
+A models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/mock/util_share/ncdio_pio_mock.F90.in
+A models/lnd/clm/test/unit_testers/clm4_5/mock/util_share
+A models/lnd/clm/test/unit_testers/clm4_5/mock/csm_share/shr_sys_mod_mock.F90
+A models/lnd/clm/test/unit_testers/clm4_5/mock/csm_share/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/mock/csm_share
+A models/lnd/clm/test/unit_testers/clm4_5/mock/main/decompMod_boundsTypeDecl.F90
+A models/lnd/clm/test/unit_testers/clm4_5/mock/main/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/mock/main
+A models/lnd/clm/test/unit_testers/clm4_5/mock/dyn_subgrid/dynFileMod_mock.F90
+A models/lnd/clm/test/unit_testers/clm4_5/mock/dyn_subgrid/dynUtilsMod_mock.F90
+A models/lnd/clm/test/unit_testers/clm4_5/mock/dyn_subgrid/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/mock/dyn_subgrid
+A models/lnd/clm/test/unit_testers/clm4_5/mock/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/mock
+A models/lnd/clm/test/unit_testers/clm4_5/CMakeLists.txt
+A models/lnd/clm/test/unit_testers/clm4_5/README
+A models/lnd/clm/test/unit_testers/clm4_5
+A models/lnd/clm/test/unit_testers
+
+========= Make a new directory to hold all of the stuff related to dynamic
+ subgrid weights. Currently this means transient PFTs, but soon it will
+ also mean dynamic landunits. This includes stuff that used to be in
+ pftdynMod and dynlandMod, as well as a bit from clm_driver. I have
+ added a new driver for the dyn_subgrid stuff (dynSubgridDriverMod),
+ and pulled out much of the shared, lower-level functionality into new
+ modules (dynTimeInfoMod, dynFileMod, dynVarMod, dynVarTimeInterpMod,
+ dynVarTimeUninterpMod, dynUtilsMod [which will soon go away]). In
+ addition, I have separated the many routines in pftdynMod into
+ separate modules, each with a single, better-defined function.
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynTimeInfoMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynSubgridDriverMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynHarvestMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynFileMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynConsBiogeochemMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynPriorWeightsMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynpftFileMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeInterpMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynCNDVMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynConsBiogeophysMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeUninterpMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarMod.F90.in
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeInterpMod.F90.in
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynLandunitAreaMod.F90.IN_PROGRESS
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynUtilsMod.F90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/dynVarTimeUninterpMod.F90.in
+A models/lnd/clm/src/clm4_5/dyn_subgrid/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/dyn_subgrid
+
+========= script to generate files from their .in files using genf90
+A models/lnd/clm/src/clm4_5/dyn_subgrid/do_genf90
+
+
+========= move check_sums_equal_1 to a new module, partly to reduce dependencies
+ of unit tests, and partly because it is cleaner design to have it
+ outside of surfrdMod
+A models/lnd/clm/src/clm4_5/main/surfrdUtilsMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+========= Add dyn_subgrid directory
+M models/lnd/clm/bld/configure
+
+========= Add get_curr_yearfrac function (currently broken, will be fixed in
+ next tag)
+M models/lnd/clm/src/util_share/clm_time_manager.F90
+
+========= Change type(file_desc_t) to class(file_desc_t); add 'only' clause to
+ use statements to allow compilation with pgi
+M models/lnd/clm/src/util_share/ncdio_pio.F90
+M models/lnd/clm/src/util_share/ncdio_pio.F90.in
+
+========= Change 'use statement' for reworked dyn_subgrid code
+M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+
+========= Move surfrd_check_urban and surfrd_check_sums_equal_1 to more
+ appropriate places
+M models/lnd/clm/src/clm4_5/main/surfrdMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInputMod.F90
+
+========= Move some code into dynSubgridDriverMod
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+ - also remove incorrect header comment
+
+========= Add compute_higher_order_weights routine
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+
+========= Add unit testing support
+M models/lnd/clm/src/clm4_5/main/CMakeLists.txt
+
+
+========= Frankfurt-PGI tests now pass!
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ frankfurt_intel: ok
+ frankfurt_pgi: ok
+ frankfurt_nag: ok
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+
+CLM tag used for the baseline comparisons: clm4_5_57
+
+Changes answers relative to baseline: YES (very limited; see below)
+
+ Can change answers due to the following:
+
+ (1) Changes answers for harvest when a run starts inside the pftdyn timeseries
+ but extends beyond it, without an intervening restart (see bug 1899)
+
+ (2) Could theoretically change answers for yellowstone-pgi or hopper-pgi due
+ to machines updates, but no changes showed up in the yellowstone test suite
+
+===============================================================
+===============================================================
+Tag name: clm4_5_57
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Tue Jan 7 14:17:04 MST 2014
+One-line Summary: change CNDV water conservation to use the pftdyn method
+
+Purpose of changes:
+
+ For my dynamic landunit work, I was trying to reconcile what's going on with
+ prescribed transient PFTs (pftdyn) vs CNDV. The reason is that I'm trying to
+ set up an overall control flow for dynamic landunits, and you need to be able
+ to run either of these in conjunction with dynamic landunits.
+
+ In doing this, I noticed that water conservation is handled differently for
+ pftdyn vs CNDV:
+
+ For pftdyn, water conservation is done as described in section 21.2 of the
+ CLM4.5 tech note: water contents are summed before and after transition, and
+ the difference is put in the runoff term
+
+ CNDV appears not to use this before & after difference. Instead, it does a
+ correction for canopy water in pftdynMod: pftdyn_wbal.
+
+ For dynamic landunits, we're planning to use an approach like what is
+ currently done for pftdyn. I think it's going to be messy and confusing to
+ try to maintain the current CNDV approach when it's possible to have CNDV in
+ conjunction with dynamic landunits.
+
+ Thus, I am changing CNDV to use the pftdyn approach to water conservation,
+ whether or not you are running with dynamic landunits. This will change
+ answers for CNDV/BGCDV cases in CLM4.5, though I expect the effects to be
+ small.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: erik; concept approved by Sam Levis
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+========= Main changes are here; also minor (somewhat related) cleanup: fix some
+ section heading comments, add a timer (ndep_interp) - pulling out some
+ stuff that used to be (inappropriately) in the pftdynwts timer section
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+
+========= Remove a bunch of now-unneeded code, especially from pftdynMod
+M models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+
+
+CLM testing:
+
+ NOTE: Most testing was done from tag cndv_water_conservation_n01_clm4_5_55 -
+ up-to-date with clm4_5_55, NOT clm4_5_56. I then updated to clm4_5_56 and
+ reran just the three tests that were run for that tag (see its ChangeLog
+ entry, below), with comparison to clm4_5_56.
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+CLM tag used for the baseline comparisons: clm4_5_55 for most tests, clm4_5_56
+for three tests (see above note)
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM45 with DV (CNDV / BGCDV)
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ Not investigated, but expected to be larger than roundoff/same climate
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_56
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Thu Jan 2 09:06:32 MST 2014
+One-line Summary: update scripts external to fix I20TRCLM45BGC compset
+
+Purpose of changes: update scripts external to fix I20TRCLM45BGC compset
+
+Requirements for tag: fix bug 1869
+
+Test level of tag: limited (see below)
+
+Bugs fixed (include bugzilla ID): 1869 (I20TRCLM45BGC compset improperly defined)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts: scripts4_131203 -> scripts4_140102
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes: none
+
+CLM testing:
+
+ ONLY TESTED THE 3 AFFECTED TESTS from the yellowstone & frankfurt aux_clm test suites:
+
+ PASS PET_P15x2_Lm13.f10_f10.I20TRCLM45BGC.yellowstone_pgi.clm-reduceOutput.GC.140102-060037
+ PASS ERS_D.f10_f10.I20TRCLM45BGC.frankfurt_pgi.clm-decStart.GC.140102-060448
+ PASS ERS_Mmpich.f10_f10.I20TRCLM45BGC.frankfurt_nag.clm-decStart.GC.140102-060608
+
+CLM tag used for the baseline comparisons: clm4_5_55
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: Only I20TRCLM45BGC compsets
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ New climate for this compset: correctly uses CLM4.5 instead of CLM4.0 code.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_55
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Fri Dec 27 16:21:45 MST 2013
+One-line Summary: add hooks to Sean Santos's unit test framework, and begin to add CLM unit tests
+
+Purpose of changes:
+
+ (1) add hooks to Sean Santos's unit test framework
+
+ (2) begin to add CLM unit tests
+
+ Note: this tag currently does NOT have the CMake utilities that are needed to
+ run the unit tests. Instead, the instructions show how to point to a version
+ of these in my directory. That's because, as of the time I submitted this tag
+ for testing, the necessary working version of the CMake utilities was not yet
+ tagged. In the near future, another external could be added to pull in these
+ CMake utilities in the CLM directory tree.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self, santos
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ tools/unit_testing: added external
+ models/csm_share: share3_131101 -> share3_131226 (to get changes needed for building unit tests)
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+========= Set up unit test directories & CMakeLists.txt files, and add unit
+ tests for DaylengthMod. Note that the tests themselves are in
+ Daylength_test/test_daylength.pf. See the README file for how to run
+ the tests
+A models/lnd/clm/test/unit
+A models/lnd/clm/test/unit/clm4_5
+A models/lnd/clm/test/unit/clm4_5/README
+A models/lnd/clm/test/unit/clm4_5/CMakeLists.txt
+A models/lnd/clm/test/unit/clm4_5/mock
+A models/lnd/clm/test/unit/clm4_5/mock/decompMod_boundsTypeDecl.F90
+A models/lnd/clm/test/unit/clm4_5/mock/CMakeLists.txt
+A models/lnd/clm/test/unit/clm4_5/Daylength_test
+A models/lnd/clm/test/unit/clm4_5/Daylength_test/test_daylength.pf
+A models/lnd/clm/test/unit/clm4_5/Daylength_test/CMakeLists.txt
+
+========= Add CMakeLists.txt files that are needed to build unit tests
+A models/lnd/clm/src/clm4_5/main/CMakeLists.txt
+A models/lnd/clm/src/clm4_5/biogeophys/CMakeLists.txt
+
+List all existing files that have been modified, and describe the changes:
+
+========= Remove unneeded 'use' statement, to prevent pulling in more than is
+ necessary for the unit test build
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+
+========= Externals updated - see above
+M SVN_EXTERNAL_DIRECTORIES
+
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+ Also ran the new unit tests, as per the instructions in
+ models/lnd/clm/test/unit/clm4_5/README - all PASS
+
+CLM tag used for the baseline comparisons: clm4_5_54
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_54
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Fri Dec 27 15:55:05 MST 2013
+One-line Summary: update externals to cesm1_3_beta06
+
+Purpose of changes:
+
+ Update externals to cesm1_3_beta06 versions.
+
+ However, do NOT update RTM, because the latest version of RTM results in
+ failures for ERI _N2 tests.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): none
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: not investigated
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ Index: SVN_EXTERNAL_DIRECTORIES
+ ===================================================================
+ --- SVN_EXTERNAL_DIRECTORIES (.../trunk_tags/clm4_5_53) (revision 56268)
+ +++ SVN_EXTERNAL_DIRECTORIES (.../branch_tags/clm_update_externals_cesm1_3_beta06_tags/clm_update_externals_cesm1_3_beta06_n02_clm4_5_53) (revision 56268)
+ @@ -1,25 +1,25 @@
+ # CESM scripts, machines and driver
+ -scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_131126a
+ -scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130930b
+ -models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_02
+ +scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_131203
+ +scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_131206b
+ +models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_07
+
+ # Model components: Data atmosphere, and stub components as well as land-ice model
+ -models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_131116
+ -models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_03/socn
+ -models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_03/sice
+ -models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_03/sglc
+ -models/wav/swav https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_03/swav
+ -models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_34
+ -models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_130924
+ +models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_131201
+ +models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/socn
+ +models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sice
+ +models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sglc
+ +models/wav/swav https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/swav
+ +models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_34
+ +models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_131008
+
+ # Utilities: csm_share, esmf, timing, MCT, PIO
+ -models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_130918
+ +models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_131101
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_130213
+ -models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_130506
+ +models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_131108
+ models/utils/mct https://github.com/quantheory/MCT/tags/compiler_fixes_n03_MCT_2.8.3
+ -models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_7_2/pio
+ +models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_8_3/pio
+
+ # Mapping tools:
+ -tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_130529
+ +tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_131120
+ tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130716
+ models/lnd/clm/tools/shared/gen_domain https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130716/gen_domain_files
+
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+========= NCK tests no longer fail; change fail type of
+ ERS_D.f19_g16.IGRCP26CN.frankfurt_pgi from CFAIL to RUN
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: OK
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: generate only (no baselines from clm4_5_53)
+
+CLM tag used for the baseline comparisons: clm4_5_53
+
+Changes answers relative to baseline: YES, but only for multi-instance
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: Multi-instance
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ Not investigated, but suspected to be new climate
+
+ Note that NCK tests newly pass (they had been failing), so this answer change
+ for multi-instance tests is expected.
+
+ These answer changes show up in the following tests:
+
+ FAIL CME_N2.f10_f10.I1850CLM45BGC.yellowstone_intel.clm-default.GC.131227-063851.compare_hist.clm4_5_53
+ FAIL ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.GC.131227-063851.compare_hist.clm4_5_53
+ FAIL ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.clm-default.GC.131227-063851.compare_hist.clm4_5_53
+
+ FAIL CME_N2.f10_f10.I1850CLM45BGC.yellowstone_intel.clm-default.compare_hist.clm4_5_53.clm2.h0
+ FAIL ERI_N2.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.clm-default.compare_hist.clm4_5_53.clm2.h0
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_53
+Originator(s): muszala (Stefan Muszala,UCAR/TSS,303-497-1320)
+Date: Thu Dec 19 07:59:56 MST 2013
+One-line Summary: refactor restart interfaces
+
+Purpose of changes: Refactor restart interfaces. Most work done by mvertens.
+
+1) Add two *.F90.in files that use genf90.pl go generate source. This saves time
+ in dev. and maintenence. If you modify only the *.F90 file, your changes will
+ be lost. Instead modify the *F90.in file, then run genf90.pl on that file.
+ If you have questions, ask a clm developer for help.
+2) Restart capability has now been encapsulated in a subroutine call that uses
+ Fortran 2003 interfaces over type and dimension. For example:
+
+- if (flag == 'define') then
+- call ncd_defvar(ncid=ncid, varname='grainc_storage_to_xfer', xtype=ncd_double, &
+- dim1name='pft',long_name='grain C shift storage to transfer',units='gC/m2/s')
+- else if (flag == 'read' .or. flag == 'write') then
+- call ncd_io(varname='grainc_storage_to_xfer', data=pcf%grainc_storage_to_xfer, &
+- dim1name=namep, ncid=ncid, flag=flag, readvar=readvar)
+- if (flag=='read' .and. .not. readvar) then
+- if (is_restart()) call endrun
+- end if
+- end if
++ call restartvar(ncid=ncid, flag=flag, varname='grainc_storage_to_xfer', xtype=ncd_double, &
++ dim1name='pft', &
++ long_name='grain C shift storage to transfer', units='gC/m2/s', &
++ interpinic_flag='interp', readvar=readvar, data=pcf%grainc_storage_to_xfer)
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary data sets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated:
+
+! renamed for consistency
+D models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90
+
+List all files added and what they do:
+
+A models/lnd/clm/src/util_share/dtypes.h
+A models/lnd/clm/src/util_share/ncdio_pio.F90.in
+A models/lnd/clm/src/util_share/restUtilMod.F90
+A models/lnd/clm/src/util_share/restUtilMod.F90.in
+A models/lnd/clm/src/clm4_5/biogeochem/CNRestMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/util_share/clm_time_manager.F90
+M models/lnd/clm/src/util_share/accumulMod.F90
+M models/lnd/clm/src/util_share/ncdio_pio.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CropRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4RestMod.F90
+M models/lnd/clm/src/clm4_5/main/subgridRestMod.F90
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+M models/lnd/clm/src/clm4_5/main/restFileMod.F90
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeRestMod.F90
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ yellowstone_intel - OK
+ yellowstone_pgi - OK
+ frankfurt_intel - OK
+ frankfurt_pgi - OK
+ frankfurt_nag - OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel - OK
+
+CLM tag used for the baseline comparisons: clm4_5_52
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_52
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Tue Nov 26 22:07:32 MST 2013
+One-line Summary: turn on longwave radiation downscaling for glc_mec by default
+
+Purpose of changes: Turn on longwave radiation downscaling for glc_mec by default
+
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: set glcmec_downscale_longwave to true
+by default
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts: updated to scripts4_131126a, to get tweaked test list
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+========= Change glcmec_downscale_longwave to true by default
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+
+========= Remove build-namelist tests that are no longer xFails
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+========= Update scripts to scripts4_131126a, to get tweaked test list
+M SVN_EXTERNAL_DIRECTORIES
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: ok
+ frankfurt
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+ frankfurt_intel: ok
+ frankfurt_pgi: ok
+ frankfurt_nag: ok
+
+Expected baseline failures:
+FAIL ERI.f19_g16.IG1850CLM45.yellowstone_pgi.GC.131126-131812.compare_hist.clm4_5_51
+FAIL ERI.f19_g16.IG1850CLM45.yellowstone_pgi.GC.131126-131812.nlcomp
+
+Two BFAILs due to changed tests; I reran them as their old versions and confirmed that answers changed, as expected:
+BFAIL ERS_D.f19_g16.ICLM45GLCMEC.yellowstone_intel.clm-glcMEC_changeFlags.GC.131126-131807.compare_hist.clm4_5_51
+BFAIL PEM_D.f19_g16.IG1850CLM45.yellowstone_pgi.clm-glcMEC.GC.131126-214346.compare_hist.clm4_5_51
+
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: ok
+ yellowstone_pgi: ok
+
+CLM tag used for the baseline comparisons: clm4_5_51
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM45 with glc_mec (IG compsets)
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ larger than roundoff; not investigated whether it is same climate or new climate
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_51
+Originator(s): sacks (sacks)
+Date: Tue Nov 26 05:46:29 MST 2013
+One-line Summary: rework downscaling of atm fields for glc_mec
+
+Purpose of changes:
+
+A number of changes related to downscaling atm -> clm fields for glc_mec
+landunits:
+
+(1) Add new options to downscale precip (division into rain/snow) and longwave
+ radiation (developed by Bill Lipscomb). Both are currently off by default.
+
+(2) Move downscaling code out of clm_driverInit into clm_atmlnd.F90 - this is a
+ more appropriate module, and is a step towards modularity, because the code
+ to deal with the atmospheric forcing fields lives in the same module as the
+ definition of these atmospheric forcing fields.
+
+(3) Ensure that all code uses the downscaled, column-level fields where
+ possible. Previously, some code (which did not operate over glc_mec
+ landunits) used the non-downscaled, gridcell-level version of fields such as
+ forc_t. This was a problem because (a) it was confusing and error-prone, and
+ (b) we will soon be bringing in code to do downscaling over other landunits
+ as well as glc_mec landunits.
+
+(4) To support (3), and make it harder for someone to accidentally use the
+ gridcell-level version of a field when they should be using the downscaled,
+ column-level version: Broke clm_a2l into two pieces - one containing fields
+ that aren't downscaled, and one containing fields that are downscaled. For
+ fields that are downscaled, clearly distinguished the non-downscaled
+ versions so they couldn't be used by accident.
+
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: For glc_mec runs, added two new
+namelist options: glcmec_downscale_rain_snow_convert and
+glcmec_downscale_longwave.
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: erik; portions reviewed by Bill Lipscomb, Mariana, Stefan
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+========= Significant changes: includes stuff that used to be in
+ clm_driverInitMod; added new downscaling code for precip & lwrad;
+ split atm2lnd type into two types; reworked initialization interfaces;
+ removed unused field rainf, because it currently isn't used and could
+ theoretically become inconsistent with the downscaled rain/snow
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+
+========= Removed downscaling code from here
+M models/lnd/clm/src/clm4_5/biogeophys/clm_driverInitMod.F90
+
+========= Added call to downscale_forcings
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+
+========= Changed interface to init_atm2lnd: previously, clm_initialize used
+ clm_a2l from clm_atmlnd and passed it to init_atm2lnd_type (also in
+ clm_atmlnd) - there was no reason for this, it was confusing, and to
+ some extent broke modularity
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+
+========= New parameter
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+
+========= Removed some variables
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+
+========= New namelist control variables
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/bld/build-namelist
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+
+========= Removed unused rainf, added pointer to allow lnd_import_export to
+ remain identical between clm4_0 and clm4_5 code
+M models/lnd/clm/src/clm4_0/main/clm_atmlnd.F90
+
+========= Fixed test ids for failing build-namelist tests
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+========= Minor changes in lots of places to (a) use fields from
+ a2l_downscaled_col instead of clm_a2l, and (b) index those fields by
+ column rather than by gridcell
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/VOCEmissionMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+M models/lnd/clm/src/clm4_5/main/clm_glclnd.F90
+M models/lnd/clm/src/clm4_5/main/accFldsMod.F90
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/cpl/lnd_comp_esmf.F90
+M models/lnd/clm/src/cpl/lnd_import_export.F90
+M models/lnd/clm/src/cpl/lnd_comp_mct.F90
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: OK
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ Note the following expected nlcomp failures:
+ FAIL ERS_D.f19_g16.ICLM45GLCMEC.yellowstone_intel.clm-glcMEC.GC.131125-104703.nlcomp
+ FAIL ERI.f19_g16.IG1850CLM45.yellowstone_pgi.GC.131125-104751.nlcomp
+ FAIL PEM_D.f19_g16.IG1850CLM45.yellowstone_pgi.GC.131125-104751.nlcomp
+
+ Also, there were BFAILs for the following, implying that the baselines
+ didn't exist; I'm not too concerned because baseline comparisons passed for
+ similar tests:
+ BFAIL SMS_Ly1_Mmpich.f19_g16.ICLM45BGCCROP.frankfurt_nag.clm-reduceOutput.GC.131125-104832.compare_hist.clm4_5_50
+ BFAIL ERS_Ly5.f10_f10.I20TRCRUCLM45BGC.yellowstone_intel.clm-monthly_noinitial.GC.131125-104703.compare_hist.clm4_5_50
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+ As above, baselines were missing for this test:
+ BFAIL2 ERS_Ly5.f10_f10.I20TRCRUCLM45BGC.yellowstone_intel.clm-monthly_noinitial.compare_hist.clm4_5_50.clm2.h0 (baseline history file does not exist)
+ BFAIL2 ERS_Ly5.f10_f10.I20TRCRUCLM45BGC.yellowstone_intel.clm-monthly_noinitial.compare_hist.clm4_5_50.clm2.h1 (baseline history file does not exist)
+
+
+CLM tag used for the baseline comparisons: clm4_5_50
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_50
+Originator(s): erik (Erik)
+Date: Sun Nov 24 18:51:11 MST 2013
+One-line Summary: Bring in a crop of b4b bugfixes, fix getregional script,
+ start move of PTCLM to PTCLMmkdata tool
+
+Purpose of changes:
+
+Bring in a crop of bit-for-bit bug-fixes to the trunk for November.
+Fix the getregional_datasets script and initial move of PTCLM to just
+be a CLM tool under models/lnd/clm/tools/shared to create single-point
+datasets.
+
+New option to create_newcase "-user_mods_dir" for a directory with
+user chagnes such as user_nl_* namelist modification files, xmlchange_cmnds
+file with xmlchanges to make, and SourceMods/src.*/* files.
+
+Requirements for tag: bit-for-bit bug-fixes and work on PTCLM
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 1868 (Add user_mods_dir to create_newcase)
+ 1854 (Remove fndepdat no longer used)
+ 1842 (Remove unused variables from gridcell type) (Bill)
+ 1835 (Add write statement to pftdyn so you can see what it is doing)
+ 1828 (Clarify modulo used in irrigation code) (Bill)
+ 1770 (Remove sitespf_pt valid_values list for clm4_0)
+ 1724 (getregional script does NOT work)
+ 1625 (Problem setting finidat in CLM for RUN_TYPE=hybrid/branch)
+ 1543 (large-file format does NOT work in latest clm)
+ 1481 (Provide a more direct way to set a user provided finidat file)
+ 1437 (problems with link_dirtree -- no longer needed)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: SourceMods directory on create_newcase
+
+ New -user_mods_dir option to create_newcase that will copy SourceMods/src.*/*
+ files to the new case. Also copies user_nl_* files and xmlchange_cmnds
+
+Describe any changes made to the namelist: remove outnc_large_files
+ Remove outnc_large_files -- wasn't functional
+ (now always use 64-bit format)
+ Remove fndepdat from namelist_definition/defaults no longer used.
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, sacks (fixes for 1842 and 1828)
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts
+ scripts updated to scripts4_131119
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+------ Add externals for tools so PTCLM shows up in tool directory
+------ Add same files and a README file for getregional
+A models/lnd/clm/tools/SVN_EXTERNAL_DIRECTORIES
+A models/lnd/clm/tools/shared/ncl_scripts/sample_inlist
+A models/lnd/clm/tools/shared/ncl_scripts/sample_outlist
+A models/lnd/clm/tools/shared/ncl_scripts/README.getregional
+
+List all existing files that have been modified, and describe the changes:
+
+------ Get getregional_datasets script working again. Now operates on
+------ lists of files.
+M models/lnd/clm/tools/shared/ncl_scripts/getregional_datasets.pl
+M models/lnd/clm/tools/shared/ncl_scripts/getregional_datasets.ncl
+M models/lnd/clm/test/tools/TSMscript_tools.sh --- Copy sample_*list files
+M models/lnd/clm/test/tools/nl_files/getregional - change arguments
+
+------
+M models/lnd/clm/tools/clm4_0/interpinic/src/interpinic.F90
+
+------ Remove clm_startfile option and outnc_large_files
+M models/lnd/clm/bld/config_files/config_definition_clm4_0.xml --
+ Remove valid_values from sitespf_pt so can be anything.
+M models/lnd/clm/bld/unit_testers/build-namelist_test.pl -- Remove
+ clm_startfile option and move some clm4_5 configure options
+ to build-namelist
+M models/lnd/clm/bld/build-namelist --- Remove clm_startfile option
+ and outnc_large_files
+M models/lnd/clm/bld/clm.buildnml.csh - Remove clm_startfile option
+ set finidat/nrevsn like how is done in CAM
+
+------ Remove fndepdat and outnc_large_files
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0_tools.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_0.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5_tools.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90 -------- Remove
+ some gridcell variables not sued
+M models/lnd/clm/src/clm4_5/main/pftdynMod.F90 ------------- write
+ on what's going on
+M models/lnd/clm/src/clm4_5/main/controlMod.F90 ------------ Remove
+ outnc_large_files
+M models/lnd/clm/src/clm4_5/main/clmtype.F90 --------------- Remove
+ a bunch of gridcell variables not needed
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90 - Add
+ seconds_since_irrig_start_time temporary to clarify
+
+------
+M models/lnd/clm/src/clm4_0/main/pftdynMod.F90 --- add some writes statements
+M models/lnd/clm/src/clm4_0/main/controlMod.F90 -- remove outnc_large_files
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone yes
+ frankfurt no
+
+ NOTE: there were some tests that were passing but in the xFail list
+ from before clm4_5_49 that I marked as working.
+
+ regular tests (aux_clm):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ frankfurt_intel yes
+ frankfurt_pgi yes
+ frankfurt_nag yes
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+
+ tools testing:
+
+ yellowstone interactive yes
+
+CLM tag used for the baseline comparisons: clm4_5_49
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_5_49
+Originator(s): muszala (Stefan Muszala)
+Date: Sat Nov 16 07:51:27 MST 2013
+One-line Summary: Swenson anomaly forcing - Part 1
+
+purpose of changes:
+ add additional data streams to modify existing
+ data streams for purposes such as bias correction or specifying
+ future changes relative to baseline data streams, e.g. specifying
+ future atmospheric forcing anomalies when running CLM with data atmosphere.
+ Paired with datm8_131115.
+
+ For what to set in user_nl_cpl, user_nl_datm, see testing section.
+
+ This is part 1 of 2. Part 2 will address a general way to handle
+ streams in the DATM that is triggered off of an AF compset.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: swenson, kluzek, self
+
+List any svn externals directories updated (csm_share, mct, etc.): update to datm8_131116
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_0/main/clm_atmlnd.F90
+M models/lnd/clm/src/cpl/clm_cpl_indices.F90
+M models/lnd/clm/src/cpl/lnd_import_export.F90
+M SVN_EXTERNAL_DIRECTORIES
+
+CLM testing:
+
+Testing for features of this tag: f09_g16, ICRUCLM45
+
+anomaly forcing namelists
+ user_nl_cpl: cplflds_custom = 'Sa_prec_af->a2x', 'Sa_prec_af->x2l','Sa_tbot_af->a2x',
+ 'Sa_tbot_af->x2l','Sa_pbot_af->a2x', 'Sa_pbot_af->x2l','Sa_shum_af->a2x',
+ 'Sa_shum_af->x2l','Sa_u_af->a2x', 'Sa_u_af->x2 l','Sa_v_af->a2x',
+ 'Sa_v_af->x2l','Sa_swdn_af->a2x', 'Sa_swdn_af->x2l','Sa_lwdn_af->a2x',
+ 'Sa_lwdn_af->x2l'
+ user_nl_datm: anomaly_forcing = 'Anomaly.Forcing.Precip','Anomaly.Forcing.Temperature',
+ 'Anomaly.Forcing.Pressure','Anomaly.Forcing.Humidity','Anomaly.Forcing.Uwind',
+ 'Anomaly.Forcing.Vwind','Anomaly.Forcing.Shortwave','Anomaly.Forcing.Longwave'
+bias correction namelists
+ user_nl_cpl: cplflds_custom = 'Sa_precsf->a2x', 'Sa_precsf->x2l'
+ user_nl_datm: bias_correct = 'BC.CRUNCEP.GPCP.Precip'
+
+ build-namelist tests: N/A
+
+ regular tests (aux_clm):
+
+ yellowstone_intel - OK
+ yellowstone_pgi - OK
+ frankfurt_intel - OK
+ frankfurt_pgi - OK
+ frankfurt_nag - OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel - OK
+
+CLM tag used for the baseline comparisons: clm4_5_48
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_48
+Originator(s): muszala (Stefan Muszala)
+Date: Thu Nov 14 08:28:31 MST 2013
+One-line Summary: bug fixes for CLM dry deposition and MEGAN VOC emissions
+
+Purpose of changes: Bring in bug fixes from fvitt for CLM dry deposition and MEGAN VOC emissions. Any changes
+ to answers are limited to rare circumstances.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: fvitt, self
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/biogeochem/VOCEmissionMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+M models/lnd/clm/src/clm4_0/biogeochem/VOCEmissionMod.F90
+M models/lnd/clm/src/clm4_0/biogeochem/DryDepVelocity.F90
+
+CLM testing:
+
+ build-namelist tests: N/A
+
+ regular tests (aux_clm):
+
+ yellowstone_intel - OK
+ yellowstone_pgi - OK
+ frankfurt_intel - OK
+ frankfurt_pgi - OK
+ frankfurt_nag - OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel - OK
+
+CLM tag used for the baseline comparisons: clm4_5_47
+
+Changes answers relative to baseline: Answers change for some of the chemistry configurations, but
+ these changes will not be see in the CLM standalone tests.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_47
+Originator(s): muszala (Stefan Muszala)
+Date: Tue Nov 12 09:26:20 MST 2013
+One-line Summary: fix Bug 1858 - AGDD now reset annually
+
+Purpose of changes: Fix bug 1858. AGDD is now reset annually. Replace -99999_r8 with a
+ parameter in accumulMod.F90 which is used in accFldsMod.F90
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 1858
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: slevis, sacks, muszala
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/util_share/accumulMod.F90
+M models/lnd/clm/src/clm4_5/main/accFldsMod.F90
+
+CLM testing:
+
+ build-namelist tests: N/A
+
+ regular tests (aux_clm):
+
+ yellowstone_intel - OK
+ yellowstone_pgi - OK
+ frankfurt_intel - OK
+ frankfurt_pgi - OK
+ frankfurt_nag - OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel - OK
+
+CLM tag used for the baseline comparisons: clm4_5_46
+
+Changes answers relative to baseline: Generally, No. There may be changes
+ in DV compsets over very long simulation times and at very high resolutions.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_46
+Originator(s): sacks (sacks)
+Date: Fri Nov 8 17:26:02 MST 2013
+One-line Summary: remove zeroing out of slope for special landunits
+
+Purpose of changes:
+
+ Previously, there was code to zero out slope for grid cells with 100% special
+ landunits. However, there were a number of problems with this:
+
+ (1) With dynamic landunits, this is problematic, because a grid cell could
+ start as 100% special landunits, then later become < 100% special landunits
+ (e.g., due to retreating glaciers)
+
+ (2) Moreover, why should the slope of a special landunit depend on whether
+ the grid cell has 100% special landunits. This seems to be saying that, e.g.,
+ the slope of a glacier landunit depends on whether the grid cell is entirely
+ glacier or part glacier and part natural veg.
+
+ (3) And I guess moreover, why is the slope zeroed out for special landunits
+ in the first place?
+
+
+ From talking with Erik, we decided thish code was probably a relic from a
+ time when the surface dataset had some bad values (e.g., over Greenland /
+ Antarctica). This is no longer the case, so this code is no longer needed.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: erik
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ Following are the baseline failures, which are expected (see below):
+
+FAIL SMS_D.1x1_mexicocityMEX.ICLM45.frankfurt_intel.clm-default.GC.131107-223431.compare_hist.clm4_5_45
+FAIL SMS_D.1x1_vancouverCAN.ICLM45.frankfurt_pgi.clm-default.GC.131107-223435.compare_hist.clm4_5_45
+FAIL ERS_Mmpi-serial.1x1_mexicocityMEX.I1PTCLM45.frankfurt_nag.clm-default.GC.131107-223439.compare_hist.clm4_5_45
+FAIL SMS_D_Mmpi-serial.1x1_vancouverCAN.I1PTCLM45.frankfurt_nag.clm-default.GC.131107-223439.compare_hist.clm4_5_45
+FAIL ERI.f09_g16.ICLM45BGC.yellowstone_intel.GC.131107-223256.compare_hist.clm4_5_45
+FAIL ERI_D.f09_g16.ICLM45BGC.yellowstone_intel.GC.131107-223256.compare_hist.clm4_5_45
+FAIL ERI_D.ne30_g16.ICLM45BGC.yellowstone_intel.GC.131107-223256.compare_hist.clm4_5_45
+FAIL ERS_Lm3.1x1_vancouverCAN.I1PTCLM45.yellowstone_intel.clm-default.GC.131107-223256.compare_hist.clm4_5_45
+FAIL SMS.1x1_mexicocityMEX.I1PTCLM45.yellowstone_intel.clm-default.GC.131107-223256.compare_hist.clm4_5_45
+FAIL ERI.f09_g16.I1850CRUCLM45BGC.yellowstone_pgi.GC.131107-223301.compare_hist.clm4_5_45
+FAIL ERI.f09_g16.ICLM45BGC.yellowstone_pgi.GC.131107-223301.compare_hist.clm4_5_45
+FAIL ERI_D.f09_g16.ICLM45BGC.yellowstone_pgi.GC.131107-223301.compare_hist.clm4_5_45
+
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+ Following are the baseline failures, which are expected:
+
+FAIL ERS_Lm3.1x1_vancouverCAN.I1PTCLM45.yellowstone_intel.clm-default.compare_hist.clm4_5_45.clm2.h0
+FAIL ERS_Lm3.1x1_vancouverCAN.I1PTCLM45.yellowstone_intel.clm-default.compare_hist.clm4_5_45.clm2.h1
+FAIL SMS.1x1_mexicocityMEX.I1PTCLM45.yellowstone_intel.clm-default.compare_hist.clm4_5_45.clm2.h0
+FAIL SMS.1x1_mexicocityMEX.I1PTCLM45.yellowstone_intel.clm-default.compare_hist.clm4_5_45.clm2.h1
+
+
+CLM tag used for the baseline comparisons: clm4_5_45
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: Any configuration that includes a grid cell that
+ has 100% special landunits, including at least some urban
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ Not investigated carefully, but expected to be larger than roundoff/same
+ climate - since this only affects a very small number of grid cells, and
+ (I believe) only the urban pervious road in those grid cells
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_45
+Originator(s): sacks (sacks)
+Date: Fri Nov 8 16:10:22 MST 2013
+One-line Summary: refactor daylength calculation, and other minor changes
+
+Purpose of changes:
+
+ (1) Compute daylength in a single place, and compute necessary variables at
+ initialization rather than having them on the restart file
+
+ (2) Compute daylength-related variables at initialization rather than having
+ them on the restart file, both to clean things up and to fix some daylength
+ bugs at initialization (these bugs were fixed in a kludgey way in clm4_5_44,
+ and now are fixed robustly)
+
+ (3) Fix daylength calculation at the poles (previously blew up due to
+ roundoff errors) (doesn't change behavior currently, but could change
+ behavior / answers if there were a vegetated landunit at the pole)
+
+ (4) Fix sminn on restart, so that crop restarts can be bfb (bug 1846)
+
+ (5) Add all_active namelist variable that makes even 0-weight points active,
+ for testing purposes
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+ - 1846 (crop restarts aren't exact due to sminn field)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: add all_active namelist variable
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: (1) & (2) reviewed by erik, (3) by self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts: update from scripts4_131030 -> scripts4_131107a
+
+List all files eliminated: none
+
+List all files added and what they do:
+
+========= Compute daylength in a single place
+A models/lnd/clm/src/clm4_5/biogeophys/DaylengthMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+========= Refactor daylength calculation to just compute daylength in a single,
+ central place, and compute necessary variables at initialization
+ rather than having them on the restart file
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90
+ - also fix sminn on restart (bug 1846)
+M models/lnd/clm/src/clm4_5/biogeochem/CNSetValueMod.F90
+M models/lnd/clm/src/clm4_5/main/CNiniTimeVar.F90
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+
+========= Add all_active namelist variable that makes even 0-weight points
+ active, for testing purposes
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+
+
+========= Add & remove tests from xFail list
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+*** No longer tested (replaced by CME_Ly4)
+- Runs out of time. CME_Ly4.f10_f10.I1850CLM45BGC.yellowstone_intel.clm-monthly passes
+
+*** Now passes
+- restarts not exact due to bug 1846: crop restarts are not exact due to sminn field
+
++ Diffs in cpl log files in rofl, rofi and volr
++ Diffs in cpl log files in rofl, rofi and volr
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: OK
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ Note that we get the following failures in compare_hist:
+
+ FAIL PET_P15x2_Lm25.f10_f10.ICLM45BGCDVCROP.yellowstone_intel.clm-reduceOutput.GC.131107-214732.compare_hist.clm4_5_44
+ FAIL PET_P15x2_Ly3.f10_f10.ICLM45BGCCROP.yellowstone_intel.clm-irrigOn_reduceOutput.GC.131107-214732.compare_hist.clm4_5_44
+
+ However, I think that's expected due to the oddities in the clm4_5_44 tag
+ with openmp - see notes in the ChangeLog for clm4_5_44 for details. Note
+ that this one is identical to clm4_5_43, suggesting that clm4_5_45 undoes
+ the problem introduced in clm4_5_44:
+
+ PASS PET_P15x2_Lm25.f10_f10.ICLM45BGCDVCROP.yellowstone_intel.clm-reduceOutput.compare_hist.clm4_5_43.cpl.hi
+
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+ In particular, note that the clm hist comparison passes for the two above
+ tests that had unexpected cpl diffs:
+
+ PASS PET_P15x2_Lm25.f10_f10.ICLM45BGCDVCROP.yellowstone_intel.clm-reduceOutput.compare_hist.clm4_5_44.clm2.h0
+ PASS PET_P15x2_Ly3.f10_f10.ICLM45BGCCROP.yellowstone_intel.clm-irrigOn_reduceOutput.compare_hist.clm4_5_44.clm2.h0
+
+
+CLM tag used for the baseline comparisons: clm4_5_44
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_44
+Originator(s): sacks (sacks)
+Date: Fri Nov 8 08:19:56 MST 2013
+One-line Summary: temporary hack to daylength initialization to provide baselines for the next tag
+
+Purpose of changes:
+
+ The next tag (clm4_5_45) involves a major refactor to the daylength
+ calculation. That refactor is bfb in most respects, but gives differences in
+ the first time step in a few situations.
+
+ This tag (clm4_5_44) does the minimal changes needed to get the same results
+ as clm4_5_45, in order to have more confidence when testing clm4_5_45.
+
+ Note that the changes here are a kludge that will be reverted in clm4_5_45.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+ 1850 (incorrect daylength in first timestep of some runs)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: OK
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ There were a number of expected compare_hist failures, as described below.
+
+ There was one unexpected compare_hist failure:
+ FAIL PET_P15x2_Lm25.f10_f10.ICLM45BGCDVCROP.yellowstone_intel.clm-reduceOutput.GC.145424.compare_hist.clm4_5_43
+
+ Diffs are just in voc cpl fields. CLM hist files are identical at the end of
+ the run. An SMS version of this test passes. Interestingly, the kludgey code
+ that I have added for clm4_5_44 isn't even executed in this test... so the
+ only diffs should be in variable declarations and 'use' statements. And
+ clm4_5_45 (which I will tag soon) gives identical cpl hist files to
+ clm4_5_43. So there may just be a compiler fluke in the compilation of this
+ tag with openmp enabled. Because clm4_5_45 will give identical results to
+ clm4_5_43, I'm not worrying about this.
+
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+ A number of expected compare_hist failures, as described below.
+
+CLM tag used for the baseline comparisons: clm4_5_43
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM45 with BGC (and probably CN), involving
+ either (a) initial conditions interpolated from a different resolution, or
+ (b) a change in start date relative to the ref date of an initial file
+ (which shows up in ERI tests) - see bug 1850
+ - what platforms/compilers: ALL
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ Not investigated carefully, but almost certainly larger than
+ roundoff/same climate, since this code mod just changes things in the
+ first timestep.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_43
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Wed Nov 6 09:21:25 MST 2013
+One-line Summary: allocate memory for most landunits in every grid cell (needed for dynamic landunits)
+
+Purpose of changes:
+
+ Main change is to allocate memory for most landunits in every grid cell, to
+ support dynamic landunits. Note that we ALWAYS do this extra memory
+ allocation, so that the user isn't required to do interpinic between a
+ non-dynamic run and a dynamic landunit run. (If we eventually change the
+ restart file format / processing so that you can add / remove 0-weight points
+ at will, then we could potentially add some logic to only do this extra
+ allocation if we're using dynamic landunits.)
+
+ Supporting changes are (1) determining which grid cells have enough parameter
+ data to support urban landunits, (2) new initial conditions files, (3) in
+ mksurfdata_map, don't set soil parameters to 0 under glacier, and set urban
+ parameters even if urban cover is 0%
+
+ Other changes are:
+
+ (1) only do snow balance check over active columns
+
+ (2) fix interpinic bug (bug 1839)
+
+ (3) newer files for testing interpinic
+
+
+ NOTE: All CLM4.5 initial conditions will need to be interpinic'ed to be
+ usable in this tag (this has been done for all out-of-the-box initial
+ conditions)
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+ - 1839 (interpinic uses wrong column when there are 0-weight columns in the
+ input file)
+
+ - 1840 (snow balance check is executed over inactive columns)
+
+ - 1825 (surface datasets need urban parameters even when pcturb is 0
+ everywhere): partial fix - still waiting on new USUMB dataset
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: new initial conditions for CLM4.5
+
+List any changes to the defaults for the boundary datasets: new initial
+conditions for CLM4.5
+
+Describe any substantial timing or memory changes:
+
+ Significant memory increases for all CLM4.5 configurations - memory is now
+ allocated for all natural veg landunits, all crop landunits (if using
+ create_crop_landunit), and most urban landunits.
+
+ Also, significant performance decrease (~ 10%) associated with the above
+ change, which I believe is mainly due to decreased cache friendliness.
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated:
+
+========= Change files used for testing interpinic, in order to use files that
+ are up-to-date with the current version of CLM, for a more accurate test
+D models/lnd/clm/tools/clm4_5/interpinic/clmi.I2000CLM45BGC.2000-01-01.10x15_simyr2000_c130607.nc
+
+List all files added and what they do:
+
+========= Change files used for testing interpinic, in order to use files that
+ are up-to-date with the current version of CLM, for a more accurate test
+A models/lnd/clm/tools/clm4_5/interpinic/clmi.I2000CLM45BGC.2000-01-01.10x15_simyr2000_c131104.nc
+
+List all existing files that have been modified, and describe the changes:
+
+========= Determine which grid cells should have urban landunits
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varsur.F90
+M models/lnd/clm/src/clm4_5/main/surfrdMod.F90
+
+========= Create a natural veg landunit and crop landunit in all grid cells;
+ create an urban landunit in all grid cells for which we have
+ determined that urban is "valid"
+M models/lnd/clm/src/clm4_5/main/subgridMod.F90
+
+========= Change a comment
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+
+========= Only do snow balance check over active columns (fixes bug 1840)
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+
+========= Use new initial conditions files that are consistent with the expanded
+ 1-d memory structures
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+========= Don't set soil parameters to 0 under glacier; set urban parameters
+ even if urban cover is 0%
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mksoilMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mksurfdat.F90
+
+========= Fix interpinic bug (bug 1839)
+M models/lnd/clm/tools/clm4_5/interpinic/src/interpinic.F90
+
+========= Change files used for testing interpinic, in order to use files that
+ are up-to-date with the current version of CLM, for a more accurate test
+M models/lnd/clm/tools/clm4_5/interpinic/interpinic.runoptions
+
+
+========= Add two tests to the xFail list
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
++ Runs out of time. CME_Ly4.f10_f10.I1850CLM45BGC.yellowstone_intel.clm-monthly passes
++ Needs new surface dataset
+
+
+CLM testing:
+
+NOTE: main tests were done with
+dynlu_allocate_memory_n11_fix_cndv_time_averages_n01_clm4_5_41; tools tests with
+dynlu_allocate_memory_n12_clm4_5_42; build-namelist tests with a slightly older
+tag
+
+ build-namelist tests:
+
+ yellowstone: OK. However, the "correct" comparisons are spurious, because I
+ think the build-namelist test is broken
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+ tools testing:
+
+ yellowstone interactive: OK
+
+ *** Expected failures
+ 027 smiS4 TSMscript_tools.sh shared ncl_scripts getregional_datasets.pl getregional .............\c
+ rc=6 FAIL
+ 028 bliS4 TBLscript_tools.sh shared ncl_scripts getregional_datasets.pl getregional .............\c
+ rc=4 FAIL
+
+
+ *** Expected baseline failure (uses new input & output file, and there is a non-bfb change in interpinic)
+ 016 blh54 TBLtools.sh clm4_5 interpinic tools__ds runoptions ....................................\c
+ rc=7 FAIL
+
+ *** Expected diffs in PCT_SAND, PCT_CLAY, SOIL_COLOR
+ 012 blg54 TBLtools.sh clm4_5 mksurfdata_map tools__s namelist ...................................\c
+ rc=7 FAIL
+ 020 bli24 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_T31_crpglc_2000^tools__ds \c
+ rc=7 FAIL
+ 022 bli53 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_10x15_1850^tools__o ....\c
+ rc=7 FAIL
+ 024 bli54 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_10x15_1850^tools__ds ...\c
+ rc=7 FAIL
+ 026 bli57 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_10x15_1850^tools__do ...\c
+ rc=7 FAIL
+
+ *** Expected diffs in urban fields
+ 030 bli74 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools_\c
+ rc=7 FAIL
+ 032 bliT4 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp24_2000^tools_\c
+ rc=7 FAIL
+
+
+CLM tag used for the baseline comparisons: clm4_5_42
+
+Changes answers relative to baseline: NO - but see note below:
+
+ NOTE: This tag has the potential to change answers for cases using initial
+ conditions that were interpinic'ed using the out-of-the-box interpinic,
+ because of bugs in interpinic. This applies to CLM4.5 cases @ ne30 and hcru
+ resolutions, as well as CLM4.5 cases using DV @ f09. However, no diffs showed
+ up in the test suite, so it's possible that this isn't a problem.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_42
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Mon Nov 4 09:45:36 MST 2013
+One-line Summary: fix bug 1857 for CLM4.5 - CNDV running temperature means are incorrect
+
+Purpose of changes:
+
+ Fix bug 1857 for CLM4.5 (not yet fixed for CLM4.0!). From the bugzilla entry:
+
+ In this code in CNDVMod:
+
+ do p = bounds%begp, bounds%endp
+ g = pft%gridcell(p)
+ if (kyr == 2) then ! slevis: add ".and. start_type==arb_ic" here?
+ tmomin20(g) = t_mo_min(p) ! NO, b/c want to be able to start dgvm
+ agdd20(g) = agdd(p) ! w/ clmi file from non-dgvm simulation
+ end if
+ tmomin20(g) = (19._r8 * tmomin20(g) + t_mo_min(p)) / 20._r8
+ agdd20(g) = (19._r8 * agdd20(g) + agdd(p) ) / 20._r8
+ end do
+
+ Notice that this is a loop over p, but it's updating gridcell-level variables.
+ This means that the running temperature means aren't at all what they purport
+ to be. e.g., in a grid cell with the 17 natural PFTs and nothing else, the grid
+ cell-level values will get the 17 pft values averaged in each year, rather than
+ getting a single pft value per year. This means that these temperature
+ variables are closer to a single year's value than to a running mean.
+
+ The fix here should be simple: just change tmomin20 & agdd20 to pft-level
+ variables.
+
+
+ WARNING: USE CAUTION WHEN USING THIS TAG WITH AN OLDER RESTART FILE FROM A
+ CLM4.5 DV CASE (this is not a problem for any out-of-the-box initial
+ conditions files, but could apply if you have your own initial file from a DV
+ run): In this case, the two DV-related variables AGDD20 and TMOMIN20 will be
+ reset to their arbitrary initial conditions.
+
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 1857 - partial fix (still open for clm4.5)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVEcosystemDynIniMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVEstablishmentMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+
+========= Remove SSP compare_hist BFAIL from xFAIL list (Ben fixed this in the last tag)
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ All baseline comparisons pass except the following expected failure:
+ FAIL PET_P15x2_Lm25.f10_f10.ICLM45BGCDVCROP.yellowstone_intel.clm-reduceOutput.GC.104252.compare_hist.clm4_5_41
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+ All baseline comparisons pass except the following expected failure:
+ FAIL PET_P15x2_Lm25.f10_f10.ICLM45BGCDVCROP.yellowstone_intel.clm-reduceOutput.compare_hist.clm4_5_41.clm2.h0
+
+CLM tag used for the baseline comparisons: clm4_5_41
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM4.5 with DV
+ - what platforms/compilers: ALL
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ Definitely larger than roundoff, but not investigated as to whether it's
+ same climate or new climate.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_41
+Originator(s): andre (Benjamin Andre,LAWRENCE BERKELEY NATIONAL LABORATORY,510-486-4617)
+Date: Wed Oct 30 17:33:16 MDT 2013
+One-line Summary: update scripts to convert clm4_5 CPP flags to namelist variables.
+
+Purpose of changes: Convert clm4_5 CPP flags in controlMod.F90
+ into namelist variables, update scripts infrastructure
+ to generate cases with namelist variables for bgc
+ (CN, CNDV, methane, vsoilc_centbgc), crop, extra lake layers,
+ vic, nofire, noio, sitespf_pt, snicarfrc, maxpatch_pft.
+
+Requirements for tag:
+
+Test level of tag: regular, tools, build_namelist
+
+Bugs fixed (include bugzilla ID): 1728 (scripts4_20131030 tag). 1770 (clm4_5 portion).
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: removed clm4_5 CPP flags:
+ NOFIRE, LCH4, NITRIF, VERTSOILC, EXTRALAKELAYERS, VICHYDRO, CENTURY, CN,
+ CNDV, CROP, SNICAR, VANCOUVER, NOIO, MEXICOCITY
+
+Describe any changes made to the namelist: added namelist variables:
+
+ use_nofire, use_lch4, use_nitrif_denitrif, use_vertsoilc, use_extralakelayers,
+ use_vichydro, use_century_decomp, use_cn, use_cndv, use_crop, use_snicar_frc,
+ use_vancouver, use_mexicocity, use_noio
+
+ All new namelist variables are logicals.
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: Erik Kluzek, Bill Sacks
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts4_131030
+
+List all files eliminated:
+D models/lnd/clm/bld/config_files/config_definition.xml - split into clm4_X variants
+
+List all files added and what they do:
+A models/lnd/clm/bld/config_files/config_definition_clm4_5.xml
+A models/lnd/clm/bld/config_files/config_definition_clm4_0.xml
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata.pl - set crop=off by default to pick up correct defaults.
+M models/lnd/clm/bld/configure - completely remove clm4_5 only cpp flags, add physics dependent logic to clm4_0 flags.
+M models/lnd/clm/bld/queryDefaultNamelist.pl - point to physics specific config_definitions.xml file
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml - replace xml special characters with alternatives so file can be parsed. Add new build-namelist failures.
+
+M models/lnd/clm/bld/build-namelist - add logic for all new clm4_5 namelist variables, commandline options, switch defaults to use_N.
+
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml - add new namelist variables
+
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90 - add CNNDynamicsInit() and logic to set nfix_timeconst from use_nitrif_denitrif namelist instead of CPP.
+M models/lnd/clm/src/clm4_5/main/clm_varpar.F90 - convert ifdef'd parameters to variables
+M models/lnd/clm/src/clm4_5/main/controlMod.F90 - final conversion of CPP flags to namelist variables
+
+ Switch the following files to use new namelist variables for attributes:
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/20thC_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/2000_glacierMEC_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850_glacierMEC_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_glacierMEC_transient.xml
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone - most tests will fail, tests need to be updated in future tag
+ frankfurt
+
+ regular tests (aux_clm):
+ - nlcomp tests fail for all clm4_5 because of the new namelist variables.
+ - SSP tests should now be BFAIL, pass on next tag
+ - All hist comp are bit for bit.
+
+ yellowstone_intel ok
+ yellowstone_pgi ok
+ frankfurt_intel ok
+ frankfurt_pgi ok
+ frankfurt_nag ok
+
+ tools testing:
+
+ yellowstone interactive - smiS4 (getregional) also fails in clm4_5_40
+ frankfurt interactive - N/A
+
+CLM tag used for the baseline comparisons: clm4_5_40
+
+Changes answers relative to baseline: None.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_40
+Originator(s): muszala (Stefan Muszala,UCAR/TSS,303-497-1320)
+Date: Thu Oct 24 07:54:46 MDT 2013
+One-line Summary: fix Bug 1752 - urban conductances depend on weights in an undesirable way
+
+Purpose of changes:
+
+Three parts to this tag. Description from Oleson:
+
+1)
+ fix Bug 1752 - modified UrbanMod.F90 to calculate
+2)
+ add 2 new diagnostic history fields (FIRE_U, FIRE_R)
+ the conductances correctly. I created new variables to more clearly
+ distinguish between scaled and unscaled conductances.
+3)
+ fix small bug in which the history field output of some of the
+ anthropogenic heat flux variables are not bfb on restart when finidat is blank.
+ I found this when verifying bfb for the original bug fix. The cause of this
+ is initialization which sets non-urban to special value, instead of zero, which
+ is what is desired. On restart, this initialization is not done and the history
+ file set_nourb=0 has precedent. A related issue is that eflx_building_heat is
+ not zero for pervious/impervious road when nlevurb /= nlevgrnd.
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 1752
+
+ long test with irrigate=.true. (because irrigation likely never took effect
+ in the old test).
+
+Requirements for tag:
+
+Test level of tag: regular & build_namelist
+
+Bugs fixed (include bugzilla ID):
+ - 1827 / 1830: testmods don't work right for multi-instance tests (fix via
+ scripts update)
+ - 1829: PCT_SAND, PCT_CLAY and SOIL_COLOR are incorrect for some grid cells
+ (fix via new surface datasets)
+ - 1831: turning on irrigation leads to death in initialization (fix via a new
+ initial conditions file)
+
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: see changes in boundary datasets
+
+List any changes to the defaults for the boundary datasets:
+
+ - new surface datasets for all resolutions for CLM4.5: same as before except
+ for PCT_SAND, PCT_CLAY and SOIL_COLOR: these are no longer zeroed out under
+ points that are believed to be 100% glacier ("believed to be" because this
+ previously zeroed out some points that ended up having some other special
+ landunits, such as lake)
+
+ - new initial conditions file for CLM4.5, irrigate=.true., f10
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts: scripts4_131001 -> scripts4_131003
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+========= new logic for irrigation for finidat
+M models/lnd/clm/bld/build-namelist
+
+========= new surface datasets; distinguish finidat based on value of 'irriagte'
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+CLM testing:
+
+ build-namelist tests:
+
+ caldera: OK
+ The following failures were expected due to new surface datasets:
+ 413/439 < FAIL>
+ 418/439 < FAIL>
+ 423/439 < FAIL>
+ 428/439 < FAIL>
+ 433/439 < FAIL>
+ 438/439 < FAIL>
+
+
+ regular tests (aux_clm):
+
+ edison_intel (aux_clm_ys_intel & aux_clm_ys_pgi lists): OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ edison_intel: aux_clm_ys_intel list: OK
+ edison_intel: aux_clm_ys_pgi list: OK
+
+CLM tag used for the baseline comparisons: clm4_5_35
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All CLM45
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ Not investigated carefully, but probably larger than roundoff/same climate.
+
+ Answer changes are due to new surface datasets. This is due to fixing
+ bug 1829 (PCT_SAND, PCT_CLAY and SOIL_COLOR are incorrect for some grid
+ cells); it looks like this just affects answers over a small number of
+ lake points (e.g., 9 lake points in an f19 run).
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_35
+Originator(s): sacks (Bill Sacks)
+Date: Tue Oct 1 09:47:45 PDT 2013
+One-line Summary: get CLM running on edison
+
+Purpose of changes:
+
+ Update scripts and Machines externals to get the CLM test suite running on
+ edison; this will be our replacement for yellowstone while yellowstone is
+ down.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: N/A
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+-scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_130929
+-scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130927
++scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_131001
++scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130930b
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+========= Remove PEM test that should pass now; add xFails for edison
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+
+CLM testing:
+
+ NOTE: Testing was done on tag port_to_edison_02_clm4_5_34. Since then, scripts
+ has been updated from scripts4_130930a to scripts4_131001. However, the only
+ difference is the removal of some duplicated tests from the test list.
+
+ regular tests (aux_clm):
+
+ edison_intel (aux_clm_ys_intel & aux_clm_ys_pgi lists): OK
+ All pass except:
+
+ See "ERH_D.f19_g16.I1850CLM45CN.yellowstone_pgi.clm-default
+ Can't find user datasets
+ Seg fault while writing h1 file
+ See ERS_Ld211_D_P112x1.f10_f10.ICNCROP in yellowstone intel list
+
+ #1 and #4 have been failing on yellowstone, #2 and #3 are new failures on edison
+
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ NOTE: only generate done here, because no baselines existed
+
+ edison_intel: aux_clm_ys_intel list: OK
+ edison_intel: aux_clm_ys_pgi list: OK
+
+CLM tag used for the baseline comparisons: clm4_5_34
+
+ NOTE: Baseline comparisons only done for frankfurt tests; no baseline
+ comparisons done with component_gen_comp, because no baselines existed on
+ edison.
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_34
+Originator(s): erik (Erik Kluzek)
+Date: Mon Sep 30 01:30:25 MDT 2013
+One-line Summary: Get PTCLM working, fix a few small bugs
+
+Purpose of changes:
+
+Get PTCLM fully working. Allow PTCLM to work with ALL I compsets including clm4_0 compsets.
+Add a new option to PTCLM from Keith Oleson --cycle_forcing to set it up to cycle over the forcing.
+Another option is to build datasets in the "-mydatafiles" directory (by default under PTCLM
+directory). The datasets now have creation time-stamps in them as well. Rename QIAN_tower_yrs
+to -use_tower_yrs and remove QIANforcing (now chosen by compset).
+scripts now has four different I1PT compsets two new ones for CLM40CN and CLM45BGC. datm
+CLM_USRDAT domain file for CLM1PT forcing points to the ATM_DOMAIN_FILE/PATH. Add some new
+datasets to the siteDIR from Keith Oleson. Update documenation, remove unused template dir.
+Add a script to rename creation dates for map files, so you don't have to regenerate them
+each day.
+
+Fix a few small bugs. Allow clm4_5 to have suplnitro and bgc_spinup to only give a warning
+rather than die. Fix a corrupted rawdata PFT file. Fix mkscripgrid.ncl for regional SCRIP
+grid creation. Remove some leftover fine-mesh variables that aren't needed anymore. Remove
+reference to scaled_harvest in CLM build-namelist which was removed a long time ago.
+
+Remove a mapping file that didn't seem to be needed for clm4_0 mkmapdata (a default
+didn't exist for it either). Add -usr_mapdir option to clm4_0 mksurfdata.pl which is needed
+for PTCLM for clm4_0 compsets.
+
+Requirements for tag: Update scripts and get PTCLM working
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 1826 (allow clm4_5 and suplnitro to be ALL for bgc_spinup)
+ 1818 (two new options to PTCLM)
+ 1762 (Fix corrupted rawdata PFT file)
+ 1757 (Bug in mkscripgrid.ncl for regional/global SCRIP grid creation)
+ 1623 (Remove some leftover fine-mesh variables _a arrays)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: Allow two options to go without dying
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, Machines, datm, and csm_share
+ scripts to scripts4_130929
+ Machines to Machines_130927
+ datm to datm8_130919
+ csm_share to share3_130918
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/tools/shared/mkmapdata/mkmapdata.sh ------- Remove 10x10min_IGBPmergeICESatGIS for clm4_0
+ M models/lnd/clm/tools/shared/mkmapgrids/mkscripgrid.ncl --- Fix bug 1757 for regional grid creation
+ M models/lnd/clm/tools/clm4_0/mksurfdata_map/mksurfdata.pl - Add usr_mapdir option
+
+ M models/lnd/clm/bld/build-namelist - Allow missing clm_usrdat files to continue, remove scaled_harvest (long gone)
+ allow bgc_spinup and suplnitro to coexist with warning
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0_tools.xml - Fix corrupted 856 raw PFT file
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5_tools.xml - Fix corrupted 856 raw PFT file, remove 1000-1004 testyrs
+
+ M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90 - Remove finemesh _a variables
+ M models/lnd/clm/src/clm4_5/main/clmtype.F90 -------- Remove finemesh _a variables
+ M models/lnd/clm/src/clm4_0/main/clmtypeInitMod.F90 - Remove finemesh _a variables
+ M models/lnd/clm/src/clm4_0/main/clmtype.F90 -------- Remove finemesh _a variables
+
+CLM testing: regular, build_namelist, tools
+
+ build-namelist tests:
+
+ yellowstone yes
+ frankfurt yes
+
+ regular tests (aux_clm):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ frankfurt_intel yes
+ frankfurt_pgi yes
+ frankfurt_nag yes
+
+ tools testing:
+
+ yellowstone interactive yes
+
+CLM tag used for the baseline comparisons: clm4_5_33
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_5_33
+Originator(s): muszala (Stefan Muszala,UCAR/TSS,303-497-1320)
+Date: Thu Sep 26 10:42:56 MDT 2013
+One-line Summary: clean up from mistakes in previous tag
+
+Purpose of changes: clean up time-stamps and a mistake in clm4_5_32
+
+Requirements for tag: N/A
+
+Test level of tag: N/A
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: N/A
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes: N/A
+
+CLM testing: N/A
+
+CLM tag used for the baseline comparisons: N/A
+
+Changes answers relative to baseline: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_32
+Originator(s): muszala (Stefan Muszala,UCAR/TSS,303-497-1320)
+Date: Thu Sep 26 10:07:14 MDT 2013
+One-line Summary: bug fix tag - 1798, 1810
+
+Purpose of changes: fix bug 1798 and 1810.
+
+http://bugs.cgd.ucar.edu/show_bug.cgi?id=1798
+http://bugs.cgd.ucar.edu/show_bug.cgi?id=1810
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 1798 and 1810
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+- for bug 1798
+M models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+- for bug 1810
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_0/main/controlMod.F90
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ yellowstone_intel - OK
+ yellowstone_pgi - OK
+ frankfurt_intel - OK
+ frankfurt_pgi - OK
+ frankfurt_nag - OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel - OK
+
+CLM tag used for the baseline comparisons: clm4_5_31
+
+Changes answers relative to baseline: no
+
+===============================================================
+===============================================================
+Tag name: clm4_5_31
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Wed Sep 25 10:12:47 MDT 2013
+One-line Summary: fix bug 1820: incomplete conditional in CNSoyfix leads to buggy results and decomposition dependence
+
+Purpose of changes:
+
+ Fix bug 1820: incomplete conditional in CNSoyfix leads to buggy results and
+ decomposition dependence. Fix for this is based on analysis of the original
+ Agro-IBIS code.
+
+Requirements for tag: fix bug 1820, the following tests should now pass:
+ PET_P15x2_Lm25.f10_f10.ICLM45BGCDVCROP.yellowstone_intel
+ PET_P15x2_Ly3.f10_f10.ICLM45BGCCROP.yellowstone_intel
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+ 1820: incomplete conditional in CNSoyfix leads to buggy results and decomposition dependence
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Beth Drewniak, Sam Levis
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+
+========= Remove now-passing PET tests
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+CLM tag used for the baseline comparisons: clm4_5_30
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM45 with CROP
+ - what platforms/compilers: ALL
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ larger than roundoff, but not investigated in detail
+
+ Note that no changes were observed in the test suite, but this is due to
+ a limitation of the test suite (there are very few multi-year crop tests;
+ the only global multi-year tests are the newly-passing PET tests, which
+ don't have baselines)
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_30
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Tue Sep 24 13:08:01 MDT 2013
+One-line Summary: fix performance bug in decomposition initialization
+
+Purpose of changes:
+
+Fix performance bug in decomposition initialization (bug 1771). Code mods from
+Tony Craig.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 1771 ( Fix for an initialization performance bug)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes:
+
+ Improves timing of initialization for high resolution casse
+
+Code reviewed by: tcraig
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/main/decompInitMod.F90
+M models/lnd/clm/src/clm4_0/main/decompInitMod.F90
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+CLM tag used for the baseline comparisons: clm4_5_29
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_29
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Tue Sep 24 10:55:42 MDT 2013
+One-line Summary: fix threading in CLM4.5, and other misc fixes
+
+Purpose of changes:
+
+(Mostly) fix threading in CLM4.5. This consisted of:
+
+(1) Rework initGridCellsMod to keep all points in a clump contiguous
+
+(2) Add info in bounds derived type (not necessary, but this allows
+for more error checking and simplifies some code)
+
+(3) Fix which bounds are passed to reweightWrapup in initialization
+
+(4) Get rid of syntax like foo(:) = 0, instead using explicit bounds
+
+(5) Rework bounds declarations for subroutine array arguments, both in
+caller (explicitly subset argument by bounds) and callee (use
+assumed-shape array arguments rather than declaring upper bounds), and
+add assertions on array sizes.
+
+See https://wiki.ucar.edu/display/ccsm/Community+Land+Model+Developers+Guide
+("Guidelines for passing array arguments to subroutines") for the new
+conventions that are implemented here.
+
+(6) Fix crop threading bug, related to nyrs (bug 1598), both in clm4.5 and clm4.0
+
+However, note that there is still a crop threading bug (bug 1820), which will
+need to be fixed in a separate tag.
+
+
+Also, some unrelated changes:
+
+(1) Fix size of a megan variable, both in clm4.5 and clm4.0.
+
+(2) Remove some unused variables from Hydrology2Mod / SoilHydrologyMod
+
+(3) Fix some bugs in histFileMod / histFldsMod
+
+(4) Reorder a loop in SurfaceAlbedo to get better performance (especially with
+expanded memory allocation for dynamic landunits, in an upcoming tag)
+
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+
+ 1598 (crop threading in clm4.0 and clm4.5)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets:
+
+ Added f10 initial file for CLM4.5 BGCCROP, for testing purposes
+
+Describe any substantial timing or memory changes:
+
+ Timing is currently 5-10% worse, due to calls to shr_log_errMsg, within
+ shr_assert calls. This should return to previous timings in non-debug runs
+ once shr_assert calls are ifdef'ed out in non-debug runs (this requires a
+ csm_share update that Sean Santos is working on).
+
+Code reviewed by: portions reviewed by erik
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts4_130912 -> scripts4_130916
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+========= Significant rework to work with multiple clumps per proc
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+
+========= Add info in bounds derived type
+M models/lnd/clm/src/clm4_5/main/decompMod.F90
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+ - also fix crop threading bug (1598)
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+ - also use explicit bounds instead of things like foo(:)
+
+========= Remove some unused variables
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+ - also other bounds-related changes as above
+
+========= Change 'bounds' to 'bounds_proc', use clump bounds for call to
+ reweightWrapup, get rid of abort if running with openMP
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+
+========= Get rid of syntax like foo(:), instead using explicit bounds
+M models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/STATICEcosysDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_glclnd.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SnowHydrologyMod.F90
+
+========= Rework bounds declarations for subroutine array arguments, both in
+ caller (explicitly subset argument by bounds) and callee (use
+ assumed-shape array arguments rather than declaring upper bounds), and
+ add assertions on array sizes
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+ - also use explicit bounds instead of things like foo(:)
+M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAnnualUpdateMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+M models/lnd/clm/src/clm4_5/main/dynlandMod.F90
+M models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+ - also use explicit bounds instead of things like foo(:)
+M models/lnd/clm/src/clm4_5/main/pft2colMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+ - also use explicit bounds instead of things like foo(:)
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+ - also use explicit bounds instead of things like foo(:)
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/FrictionVelocityMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/TridiagonalMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+ - also use explicit bounds instead of things like foo(:)
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SNICARMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+ - also remove some now-unneeded temporary arrays
+ - also use explicit bounds instead of things like foo(:)
+M models/lnd/clm/src/clm4_5/biogeophys/H2OSfcMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+ - also reorder a loop to get better performance
+M models/lnd/clm/src/clm4_5/biogeophys/BandDiagonalMod.F90
+
+========= Fix crop threading bug (1598), in both clm4.5 and clm4.0, by reworking
+ where nyrs is updated
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CropRestMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+M models/lnd/clm/src/clm4_0/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_0/biogeochem/CropRestMod.F90
+M models/lnd/clm/src/clm4_0/main/clm_driver.F90
+
+========= Add comments
+M models/lnd/clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+
+========= Fix size of a megan variable
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+ - also initialize rootr, as it was in clm4_5_20 and prior
+M models/lnd/clm/src/clm4_0/main/clmtypeInitMod.F90
+
+========= Fix some hist file bugs:
+ - increase max number of characters allowed for hist field names
+ - when adding a field, make it work to say default='active' --
+ previously, explicitly setting default='active' did the same thing
+ as setting default='inactive'
+ - change ptr_pft to ptr_col for a few column-level history variables
+ - remove two duplicate hist_addfld calls
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+ - also add some bounds to array arguments
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+
+========= Add f10 initial file for CLM45 BGCCROP, for testing
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+========= Remove some PET tests from the xFail list; note that some PET tests
+ still fail due to bug 1820
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: OK
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+CLM tag used for the baseline comparisons: clm4_5_28
+
+Changes answers relative to baseline: NO, with the following minor exceptions:
+
+ In general, no answer changes for non-threaded runs (changes answers for
+ threaded runs due to significant bug fixes!)
+
+ Changes answers for CLM45 BGC CROP at f10 due to new initial conditions
+ (instead of cold start)
+
+===============================================================
+===============================================================
+Tag name: clm4_5_28
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Fri Sep 20 21:29:39 MDT 2013
+One-line Summary: fix FracH2oSfc bug
+
+Purpose of changes:
+
+Fix bug 1811: FracH2oSfc is called from within a loop over all points. Sean
+Swenson realized that the offending block of code is no longer needed, so we
+have removed it.
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 1811 (FracH2oSfc is called from within a loop
+over all points)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: Sean Swenson
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+========= Remove the offending (and no longer needed) block of code
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+
+========= Remove a now-unused variable
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+
+========= Move xFail test to the right location
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+
+CLM testing:
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+
+ See notes below on answer changes
+
+
+CLM tag used for the baseline comparisons: clm4_5_27
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All CLM45 cases
+ - what platforms/compilers: All
+ - nature of change: larger than roundoff/same climate
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ Sean Swenson performed two 20-year runs with and without the fix. His report
+ is:
+
+ There are some differences, mainly in runoff, and it looks like at the 1%
+ level mostly, with a few scattered points showing up on the significance
+ plots. Other fields like latent heat, soil moisture/temperature, or water
+ table show even less differences.
+
+ http://www.cgd.ucar.edu/staff/swensosc/public/diagnostics/test_frach2o-test_no_frach2o/setsIndex.html
+
+===============================================================
+===============================================================
+Tag name: clm4_5_27
+Originator(s): sacks (Bill Sacks,UCAR/CSEG,303-497-1762)
+Date: Fri Sep 20 20:43:16 MDT 2013
+One-line Summary: fix crop nyrs bug
+
+Purpose of changes: Fix bug 1815 (nyrs is incorrect at the start of a crop run,
+leading to incorrect GDD values for the first 20 years or so of a crop
+simulation)
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):
+- 1815 (nyrs is incorrect at the start of a crop run, leading to incorrect GDD
+values for the first 20 years or so of a crop simulation)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None (but see changes to boundary
+datasets below)
+
+List any changes to the defaults for the boundary datasets:
+
+ New crop initial conditions for CLM4.5 BGCCROP @ f19 - same as old dataset,
+ but with restyear changed from 1 to 0
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: slevis
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+========= Don't increment nyrs on the first timestep of a startup run, so that
+ nyrs is correctly 0 rather than 1 for the first year
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_0/biogeochem/CNPhenologyMod.F90
+
+========= New crop initial conditions for CLM4.5 BGCCROP @ f19 - same as old dataset,
+ but with restyear changed from 1 to 0
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+========= add test that sometimes runs out of time, move test from
+ yellowstone_intel to yellowstone_pgi
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: OK
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: OK
+ yellowstone_pgi: OK
+ frankfurt_intel: OK
+ frankfurt_pgi: OK
+ frankfurt_nag: OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: OK
+ - expected diffs in SMS_Ly1.f19_g16.ICLM45BGCCROP.yellowstone_intel
+ - baselines messed up for
+ ERS_Ly5.f10_f10.I20TRCRUCLM45BGC.yellowstone_intel, so comparison not
+ done for that test
+ yellowstone_pgi: NO BASELINES, SO COMPARISONS NOT RUN
+
+CLM tag used for the baseline comparisons: clm4_5_27
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All crop cases (clm4.0 or clm4.5) that either
+ use arbitrary initial conditions or use the clm4.5 out-of-the-box initial
+ conditions for BGCCROP @ f19
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new
+ climate): new climate
+
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+
+ Sam Levis examined a 3-year run with arbitrary initial conditions, and
+ verified that crop LAI is much more realistic in the new simulation
+ (previously, crop LAI was near-zero for the first few years)
+
+===============================================================
+===============================================================
+Tag name: clm4_5_26
+Originator(s): muszala (Stefan Muszala)
+Date: Thu Sep 19 17:07:11 MDT 2013
+One-line Summary: water balance and SMS_Ly1.f19_g16.ICLM45BGCCROP fix
+
+Purpose of changes: 1) Fix water balance error in f09_g16 I1850CRUCLM45BGC simulation
+ 2) Get all machine/compiler combinations of
+ SMS_Ly1.f19_g16.ICLM45BGCCROP working
+ - this fix required (1) and a fix to fthresh in RtmFloodInit
+ - new RTM tag rtm1_0_32 to go along with this
+ - PGI+frankfurt version of this test only work with 16 MPI processes
+
+Requirements for tag: N/A
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID):1808
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: S. Swenson, D. Lawrence
+
+List any svn externals directories updated (csm_share, mct, etc.): rtm1_0_32
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+
+Index: models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+===================================================================
+--- models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90 (revision 51190)
++++ models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90 (working copy)
+@@ -1110,15 +1110,8 @@
+ do j = 1, nlevsoi
+ if(h2osoi_liq(c,j)<0._r8)then
+ qflx_deficit(c) = qflx_deficit(c) - h2osoi_liq(c,j)
+- h2osoi_liq(c,j) = 0._r8
+ endif
+ enddo
+- !reduce qcharge if necessary
+- !ideally, I can set qflx_deficit as a local variable, but it is helpful
+- !to diagnose the problem associated with the solver for the richards' equation.
+- if(qflx_deficit(c)>0._r8)then
+- qcharge(c) = qcharge(c) - qflx_deficit(c)/dtime
+- endif
+ enddo
+
+ end associate
+@@ -1892,9 +1885,12 @@
+
+ do fc = 1, num_hydrologyc
+ c = filter_hydrologyc(fc)
+- xs1(c) = max(max(h2osoi_liq(c,1),0._r8)-max(0._r8,(pondmx+watsat(c,1)*dzmm(c,1)-h2osoi_ice(c,1))),0._r8)
+- h2osoi_liq(c,1) = min(max(0._r8,pondmx+watsat(c,1)*dzmm(c,1)-h2osoi_ice(c,1)), h2osoi_liq(c,1))
+
++ !scs: watmin addition to fix water balance errors
++ xs1(c) = max(max(h2osoi_liq(c,1)-watmin,0._r8)- &
++ max(0._r8,(pondmx+watsat(c,1)*dzmm(c,1)-h2osoi_ice(c,1)-watmin)),0._r8)
++ h2osoi_liq(c,1) = h2osoi_liq(c,1) - xs1(c)
++
+ if (urbpoi(clandunit(c))) then
+ qflx_rsub_sat(c) = xs1(c) / dtime
+ else
+
+CLM testing:
+
+- general note: for clm45 compsets-both clm and cpl history files change
+
+- specfic testing for these bug fixes:
+
+1) Water balance fix-ran a clone of run from Dave Lawrence:
+ -- create_newcase -compset I1850CRUCLM45BGC -res f09_g16 -mach yellowstone -case /glade/u/home/dlawren/expts/clm4.5/clm45bgc_1deg4519_1850spin_bd
+ -- this ran for over 25 years with no water balance errors.
+
+2) SMS_Ly1.f19_g16.ICLM45BGCCROP
+
+PASS SMS_Ly1.f19_g16.ICLM45BGCCROP.yellowstone_intel.clm-reduceOutput.115612
+PASS SMS_Ly1.f19_g16.ICLM45BGCCROP.yellowstone_pgi.clm-reduceOutput.115522
+
+PASS SMS_Ly1.f19_g16.ICLM45BGCCROP.frankfurt_intel.clm-reduceOutput.115217
+PASS SMS_Ly1_Mmpich.f19_g16.ICLM45BGCCROP.frankfurt_nag.clm-reduceOutput.120824
+PASS SMS_Ly1.f19_g16.ICLM45BGCCROP.frankfurt_pgi.clm-reduceOutput.016 -- only with 16 MPI tasks
+
+3) To make sure the RTM refactor did not create any BFB changes, I ran SMS_D.f19_g16.ICLM45BGCCROP.yellowstone_intel
+ with rtm1_0_31 and rivrtm/branch_tags/bcf_tags/bcf_02_rtm1_0_31.
+ - With flood_mode='NULL' - Coupler and land history files were BFB.
+ - With flood_mode='ACTIVE' - Coupler, rtm and land history files were BFB.
+
+ build-namelist tests: N/A
+
+ regular tests (aux_clm):
+
+ yellowstone_intel - OK - changes in cpl. hist files expected
+ yellowstone_pgi - OK - changes in cpl. hist files expected
+ frankfurt_intel - OK - changes in cpl. hist files expected
+ frankfurt_pgi - OK - changes in cpl. hist files expected
+ frankfurt_nag - OK - changes in cpl. hist files expected
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel - OK - changes in clm fields expected.
+
+CLM tag used for the baseline comparisons: clm4_5_25
+
+Changes answers relative to baseline: Yes. For CLM45 compsets. All changes are from the SoilHydrology mods
+as the RTM refactor and fthresh fix are BFB.
+
+Coupler history fields that change:
+
+l2x_Sl_avsdr, l2x_Sl_anidr, l2x_Sl_avsdf, l2x_Sl_anidf, l2x_Sl_tref, l2x_Sl_qref,
+l2x_Sl_t, l2x_Sl_fv, l2x_Sl_ram1, l2x_Sl_snowh, l2x_Sl_u10, l2x_Fall_swnet,
+l2x_Fall_taux, l2x_Fall_tauy, l2x_Fall_lat, l2x_Fall_sen, l2x_Fall_lwup,
+l2x_Fall_evap, l2x_Fall_flxdst1, l2x_Fall_flxdst2, l2x_Fall_flxdst3, l2x_Fall_flxdst4,
+l2x_Flrl_rofl, l2x_Fall_voc001, l2x_Fall_voc002, l2x_Fall_voc003, l2x_Fall_voc004,
+l2x_Fall_voc005, l2x_Fall_voc006, l2x_Fall_voc007, l2x_Fall_voc008, x2l_Flrr_volr,
+r2x_Forr_rofl, r2x_Forr_rofi, r2x_Flrr_volr, x2r_Flrl_rofl
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM45 compsets
+ - what platforms/compilers: All
+
+===============================================================
+===============================================================
+Tag name: clm4_5_25
+Originator(s): erik (Erik Kluzek)
+Date: Fri Sep 13 13:49:45 MDT 2013
+One-line Summary: Bring in Tony's changes to kick sno all the way up to the coupler layer, makes all
+ CESM components more similar to each other
+
+Purpose of changes:
+ Bring in Tony's cplupa branch (cplupa_n06_clm4_5_24) to trunk. This branch moves sno
+ fields all the way to the top coupler layer rather than being inside of CLM. This makes all
+ CESM components more similar to each other.
+
+ There was also some small fixes on the side that allow some more tests to work.
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 1789 (fix NoVSNoNI test)
+ 1788 (fix US-UMB test)
+ 1779 (fix RTM multi-instance)
+ 1777 (fix RTM branch cases)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: Add new CPP token to CLM USE_ESMF_METADATA (with no direct machanism to use)
+ We hate CPP tokens (but had to let this go, only needed by ESMF development folks)
+
+Describe any changes made to the namelist: drv namelist changes, no changes to CLM namelist
+
+List any changes to the defaults for the boundary datasets: CLM_USRDAT fsurdat files different directory for clm4_0 than clm4_5
+ remove missing ne16np4 fpftdyn file
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by: self, tcraig
+
+List any svn externals directories updated (csm_share, mct, etc.):, scripts, drv, cism, rtm, csm_share, data and stub models
+
+ scripts to scripts4_130912
+ drv to drvseq4_3_03
+ datm to datm8_130424
+ socn/sice/sglc/swav to stubs1_4_02
+ rtm to rtm1_0_31
+ cism to cism1_130905
+ csm_share to share3_130906
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/bld/unit_testers/build-namelist_test.pl ------------ Remove ne16 20thC test
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml ---- Remove missing ne16 fpftdyn file
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml - Different directory for
+ clm4_0/clm4_5 surface datasets
+
+ M models/lnd/clm/src/cpl_share/clm_cpl_indices.F90 - Field names change
+ M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90 ------ Remove "sno" and "s" fields
+ M models/lnd/clm/src/clm4_5/main/clm_glclnd.F90 ---- Remove rofi/rofl
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90 ---- Remove "sno" and "s" fields, add USE_ESMF_METADATA #ifdef
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_mct.F90 ----- Remove "sno" and "s" fields, add USE_ESMF_METADATA #ifdef
+ M models/lnd/clm/src/clm4_0/main/clm_glclnd.F90 ---- Remove rofi/rofl
+
+CLM testing: regular
+
+ build-namelist tests:
+
+ bluefire yes
+ frankfurt yes
+
+ regular tests (aux_clm):
+
+ yellowstone_intel yes
+ yellowstone_pgi yes
+ frankfurt_intel yes
+ frankfurt_pgi yes
+ frankfurt_nag yes
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel partial (not completed before tag made)
+ yellowstone_pgi partial (not completed before tag made)
+
+CLM tag used for the baseline comparisons: clm4_5_24
+
+Changes answers relative to baseline: None (bit-for-bit)
+
+ Although my "I compset" testing showed no changes, fully coupled changes
+ do show differences. Coupler namelists also change.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_24
+Originator(s): sacks (sacks)
+Date: Tue Sep 3 21:36:13 MDT 2013
+One-line Summary: update externals to cesm1_3_beta02 or later
+
+Purpose of changes:
+
+Update externals to cesm1_3_beta02 or later
+
+
+Requirements for tag:
+
+Test level of tag: regular
+
+Bugs fixed (include bugzilla ID): 1722 (Test failure with VIC and more_vertlayers)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: Not investigated
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts : scripts4_130816 -> scripts4_130830a
+ Machines : Machines_130529 -> Machines_130830
+ drv : drvseq4_2_33 -> drvseq4_2_35
+ cism : cism1_130502 -> cism1_130624
+ csm_share : share3_130528 -> share3_130723
+ timing : timing_130417 -> timing_130506
+ mct : compiler_fixes_n01_MCT_2.8.3 -> compiler_fixes_n03_MCT_2.8.3
+ mapping : mapping_130509 -> mapping_130716
+ gen_domain : mapping_130509/gen_domain_files -> mapping_130716/gen_domain_files
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+========= Remove trailing whitespace
+M .ChangeLog_template
+
+
+Index: models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+===================================================================
+--- models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml (revision 50759)
++++ models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml (working copy)
+@@ -51,17 +51,14 @@
+
+
+ use before define in history. runs with frankfurt_pgi, yellowstone_intel and yellowstone_pgi
+- floating point exception. problem with VIC combined with vertical layers
+ once threading fixed, this should pass
+ once threading fixed, this should pass
+- core dumps in ref1. Problems with vertical layers. Run without clm-vrtlay and clm-default and it runs
+ starting in clm4_5_07--The cpl.hi.nc file is not being copied on a generate like it should.
+ Cannot turn clm4me mode on -- without clm4_5 physics! problem in scripts4_130809b
+
+
+- Problem with scripts and testId string length. This passes with a long testId
++ Problem with scripts and testId string length. This passes with a long testId
+ Restart not BFB. Runs as ERH_D.f19_g16.I1850CLM45CN.yellowstone_pgi
+- floating point exception. problem with VIC combined with vertical layers
+ once threading fixed, this should pass
+ once threading fixed, this should pass
+ once threading fixed, this should pass
+@@ -78,7 +75,7 @@
+ Water balance errors followed by "negative conc. in ch4tran", then tries "-10^-12 < smin_nh4 < 0. resetting to zero.", then it exits at approximately 9 months. This same test passes with yellowstone_intel and frankfurt_intel
+
+
+- CMake 2.8.6 or higher is required. You are running version 2.6.4
++ Problem with cism build
+ Fails after reading clmforc.Li_2012_hdm_0.5x0.5_AVHRR_simyr1850-2010_c130401.nc. This same test passes with yellowstone_intel, yellowstone_pgi, frankfurt_intel and frankfurt_nag
+ Fails after reading clmforc.Li_2012_hdm_0.5x0.5_AVHRR_simyr1850-2010_c130401.nc. This same test passes with yellowstone_intel, yellowstone_pgi, frankfurt_intel and frankfurt_nag
+ Fails after reading clmforc.Li_2012_hdm_0.5x0.5_AVHRR_simyr1850-2010_c130401.nc. This same test passes with yellowstone_intel
+
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: YES
+ All PASS or xFAIL
+
+ regular tests (aux_clm):
+
+ yellowstone_intel: YES
+ yellowstone_pgi: YES
+ frankfurt_intel: YES
+ frankfurt_pgi: YES
+ frankfurt_nag: YES
+
+ All PASS or xFAIL
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel: YES
+ All PASS except for the following newly-passing tests (for
+ which these failures are unsurprising):
+
+ BFAIL2 ERI_D.ne30_g16.ICLM45BGC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_23.clm2.h0 (baseline history file does not exist)
+ BFAIL2 ERI_D.ne30_g16.ICLM45BGC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_23.clm2.h1 (baseline history file does not exist)
+ FAIL ERS_D.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_23.clm2.h0
+ FAIL ERS_D.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_23.clm2.h1
+
+ yellowstone_pgi: YES
+ Some answer changes (presumably due to compiler change)
+
+CLM tag used for the baseline comparisons: clm4_5_23
+ Note: renamed baselines for frankfurt nag because test names have changed
+
+Changes answers relative to baseline: YES
+
+ If a tag changes answers relative to baseline comparison the
+ following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ compsets using cism change answers for g2x gields
+
+ many tests change answers with pgi, both on yellowstone &
+ frankfurt, presumably due to new compiler
+
+ nature of change not investigated
+
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_23
+Originator(s): muszala (Stefan Muszala)
+Date: Thu Aug 22 09:42:43 MDT 2013
+One-line Summary: refactor to allow CH4 params. to be read from netcdf file and clean up clm4_5_20
+
+Purpose of changes:
+ The second of two tags that brings in parameters that are read from netcdf file (ch4 parameters). Please
+ see the ChangeLog entry for clm4_5_20.
+
+ - Bring in ch4 parameters
+ - Combine fconsts file and fpftcon file. New file name is paramfile (clm_params.c130821.nc)
+ - Refactor so that types, subroutine names and type instances have the names params in them (instead of consts)
+ - Remove many ch4 namelist vars. since they are now read from the param file
+ - Add new namelist called use_aereoxid_prog to control old aereoxid namelist
+
+ A bulk of this work was completed by Rajendra Paudel.
+
+Requirements for tag: N/A
+
+Test level of tag: regular and build_namelist
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: remove many optional ch4 namelists.
+ add new namelist valled use_aereoxid_prog.
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, dlawren, Rajendra Paudel
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts4_130730 -> scripts4_130816
+
+List all files eliminated:
+
+- these were renamed
+models/lnd/clm/src/clm4_5/biogeochem/CNSharedConstsMod.F90
+models/lnd/clm/src/clm4_5/main/readConstantsMod.F90
+
+List all files added and what they do:
+
+- renamed
+models/lnd/clm/src/clm4_5/biogeochem/CNSharedParamsMod.F90
+models/lnd/clm/src/clm4_5/main/readParamsMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+- refactor to remove old namelist vars. for ch4 and add new functionality
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+M models/lnd/clm/bld/build-namelist
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+- put in ch4 parameters and refactor const->params names
+M models/lnd/clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNitrifDenitrifMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNMRespMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4varcon.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeBGCMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeCNMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+M models/lnd/clm/src/clm4_5/main/initSoilParVICMod.F90
+M models/lnd/clm/src/clm4_5/main/pftvarcon.F90
+M models/lnd/clm/src/clm4_5/biogeophys/initSLakeMod.F90
+
+CLM testing:
+
+ build-namelist tests:
+
+ yellowstone: OK. Some changes to phys45 and phys45-crop. Should be OK in next tag.
+
+ regular tests (aux_clm): A few nlcomp differences that will go away in the next tag.
+ Other than a few expected failures due to new scripts entries, all OK.
+
+ yellowstone_intel OK
+ yellowstone_pgi OK
+ frankfurt_intel OK
+ frankfurt_pgi OK
+ frankfurt_nag OK
+
+ history file comparison with component_gen_comp and summarize_cprnc_diffs:
+
+ yellowstone_intel OK
+ yellowstone_pgi OK
+
+CLM tag used for the baseline comparisons: clm4_5_22
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_22
+Originator(s): muszala (Stefan Muszala)
+Date: Tue Jul 30 15:22:51 MDT 2013
+One-line Summary: aux_clm testlist reorganization
+
+Purpose of changes:
+
+ Reorganize all aux_clm tests and fix new failing tests. As part of
+ this, bring in a change from Maoyi for VIC w/vertical layers.
+ A few bug fixes to get new tests working.
+ Why did we do this?
+
+ 1) better balance between frankfurt, yellowstone and various compilers
+ 2) faster turn around time for development
+ 3) make sure current science functionality is properly tested
+ 4) removed outdated / irrelevant tests
+
+ Other points:
+
+ 1) Introduce regular and short test list. Testing now can consist of:
+
+ a) regular (must be run before handing off a tag to SEs and must be run
+ before committing a tag)
+ b) build_namelist (if namelists and/or build_system changed))
+ c) tools (only if tools are modified and no CLM source is modified)
+ d) short (for use during development and in rare cases where only a small
+ change with known behavior is added ... eg. a minor bug fix)
+ e) doc (no source testing required)
+
+ 2) PET tests will fail until threading is fixed in CLM
+
+Requirements for tag: N/A
+
+Test level of tag: regular. ran tests with old testlists to double check any new mods.
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: add no-vert:no-nitrif option to configure
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self. Extensive discussion regaring list content with Lawrence,
+ Sacks, Kluzek and Andre.
+
+List any svn externals directories updated (csm_share, mct, etc.): new scripts
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+- add no-vert:no-nitrif option
+M models/lnd/clm/bld/configure
+M models/lnd/clm/bld/config_files/config_definition.xml
+- Update expected failures
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+- Maoyi VIC+vertical layers fix
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+M models/lnd/clm/src/clm4_5/main/clm_varpar.F90
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+- Update change log template
+M .ChangeLog_template
+- Change intent out to inout for ciso_flux
+M models/lnd/clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+- Remove crop_prog check to get rid of unassociated pointer with NAG
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+- Fix bounds type error (should be intent=in)
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+
+Machines testing ran on: (Tests in priority order)
+
+ Note:
+
+ 1) All PET tests will fail until openMP is fixed.
+ 2) All IG compsets fail with the NAG compiler due to non-compliant f77 code.
+ 3) If one combination fails, it's backed up with a another combination that passes.
+ There are one or two exceptions to this.
+
+ I) aux_clm tests with old testlists:
+
+ yellowstone/aux_clm intel - OK
+ yellowstone/aux_clm pgi - OK
+ frankfurt/aux_clm intel - OK
+ frankfurt/aux_clm pgi - OK
+ frankfurt/aux_clm nag - OK
+
+ CESM history file comparison:
+
+ yellowstone/aux_clm intel - OK
+
+ II) aux_clm tests with new testlists:
+
+ yellowstone/aux_clm intel - OK
+ yellowstone/aux_clm pgi - OK
+ frankfurt/aux_clm intel - OK
+ frankfurt/aux_clm pgi - OK
+ frankfurt/aux_clm nag - OK
+
+ III) aux_clm_short tests with new testlists:
+
+ yellowstone/aux_clm intel - OK
+ yellowstone/aux_clm pgi - OK
+ frankfurt/aux_clm intel - OK
+ frankfurt/aux_clm pgi - OK
+ frankfurt/aux_clm nag - OK
+
+ CESM history file comparison: Not run since no baseline comparisons.
+
+CLM tag used for the baseline comparison tests if applicable: CLM4_5_21 with old testlist. Only ran generate with new testlists.
+
+Changes answers relative to baseline: Only for VIC with vertical layers.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_21
+Originator(s): muszala (Stefan Muszala)
+Date: Wed Jul 24 14:23:19 MDT 2013
+One-line Summary: ifdef and bounds refactor
+
+Purpose of changes:
+- Almost all implementation by Mvertens
+- Refactor ifdef use so that a majority are now in controlMod.F90. This is the
+first step to removing them competely.
+- Introduction of bounds_type and clump_type
+- Refactor interfaces to support bounds_type and clump_type
+- Bug fix from Sacks
+
+Also: Changed layout of landunit, column and pft-level arrays: Previously, all
+points for a given grid cell were grouped together. Now, all points for a given
+landunit type are grouped together. This improves performance of loops over
+filters, because it leads to more memory locality – this will be especially true
+when we add more 0-weight points to arrays for the purpose of dynamic
+landunits. For example, if a processor has 2 grid cells and there are 3 landunit
+types:
+
+Old layout in memory: (G1L1, G1L2, G1L3, G2L1, G2L2, G2L3)
+New layout in memory: (G1L1, G2L1, G1L2, G2L2, G1L3, G2L3)
+
+
+Requirements for tag: N/A
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: mvertens, sacks, self
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated:
+
+D models/lnd/clm/src/util_share/decompInitMod.F90
+D models/lnd/clm/src/util_share/ndepStreamMod.F90
+D models/lnd/clm/src/util_share/decompMod.F90
+D models/lnd/clm/src/clm4_5/main/initParametersMod.F90
+
+List all files added and what they do:
+
+A + models/lnd/clm/src/clm4_5/main/decompInitMod.F90
+A + models/lnd/clm/src/clm4_5/main/ndepStreamMod.F90
+A + models/lnd/clm/src/clm4_5/main/decompMod.F90
+A + models/lnd/clm/src/clm4_0/main/decompInitMod.F90
+A + models/lnd/clm/src/clm4_0/main/ndepStreamMod.F90
+A + models/lnd/clm/src/clm4_0/main/decompMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/doc/ChangeLog
+M models/lnd/clm/doc/ChangeSum
+M models/lnd/clm/src/cpl_share/clm_cpl_indices.F90
+M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNitrifDenitrifMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNGRespMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CropRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/initch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNBalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate3Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNMRespMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/MEGANFactorsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4RestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNWoodProductsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPrecisionControlMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVLightMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4varcon.F90
+M models/lnd/clm/src/clm4_5/biogeochem/STATICEcosysDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate3Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/VOCEmissionMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVEcosystemDynIniMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAnnualUpdateMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeBGCMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSetValueMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNVegStructUpdateMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVEstablishmentMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeCNMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNC14DecayMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSharedConstsMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+M models/lnd/clm/src/clm4_5/main/clm_varpar.F90
+M models/lnd/clm/src/clm4_5/main/CNiniTimeVar.F90
+M models/lnd/clm/src/clm4_5/main/dynlandMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_5/main/subgridRestMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_glclnd.F90
+M models/lnd/clm/src/clm4_5/main/accFldsMod.F90
+M models/lnd/clm/src/clm4_5/main/subgridMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+M models/lnd/clm/src/clm4_5/main/pft2colMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_5/main/clm_varsur.F90
+M models/lnd/clm/src/clm4_5/main/restFileMod.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+M models/lnd/clm/src/clm4_5/main/initSoilParVICMod.F90
+M models/lnd/clm/src/clm4_5/main/CNiniSpecial.F90
+M models/lnd/clm/src/clm4_5/main/pftvarcon.F90
+M models/lnd/clm/src/clm4_5/main/surfrdMod.F90
+M models/lnd/clm/src/clm4_5/main/readConstantsMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+M models/lnd/clm/src/clm4_5/main/mkarbinitMod.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInputMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SnowHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/FrictionVelocityMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/TridiagonalMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/ActiveLayerMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/QSatMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/clm_driverInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CLMVICMapMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SNICARMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/initSLakeMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/H2OSfcMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/FracWetMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BandDiagonalMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90
+M models/lnd/clm/src/clm4_0/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_0/main/clm_glclnd.F90
+M models/lnd/clm/src/clm4_0/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_0/main/controlMod.F90
+M models/lnd/clm/src/clm4_0/main/clm_varctl.F90
+M models/lnd/clm/src/clm4_0/main/clm_driver.F90
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: Not run
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel BFB
+ yellowstone/aux_clm pgi BFB
+ frankfurt/aux_clm intel BFB
+ frankfurt/aux_clm pgi BFB
+ frankfurt/aux_clm nag BFB
+
+ CESM history file comparison:
+
+ yellowstone/aux_clm intel BFB
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_20
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_20
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Sat Jul 20 10:54:43 MDT 2013
+One-line Summary: refactor to allow CN and BGC params. to be read from netcdf file
+
+Purpose of changes:
+
+The first of two tags that allows all parameters to be read from a netcdf file to
+provide sensitivity analysis capability, to increase modularity of code and to
+remove "magic numbers" from code. This tag introduces a new namelist variable
+"fconsts" which points to a netcdf file of CN and BGC parameters. In a future
+tag, this netcdf file will be combined with CH4 parameters and PFT parameters.
+
+Values are read in readConstantsMod.F90. Each module that requires
+a parameter provides a read subroutine. That read subroutine is called
+from readConstantsMod.F90 and places parameters into a private type
+for that module. For example, CNDecompMod.F90 provides readCNDecompConsts which
+is called from readConstantsMod and populates the type instance CNConstShareInst.
+CHConstShareInst is then used in CNDecompMod as:
+
++ sminn_to_denit_decomp_cascade_vr(c,j,k) = -CNDecompConstInst%dnp * pmnf_decomp_cascade(c,j,k
+
+which replaces:
+
+- dnp = 0.01_r8
+...
+- sminn_to_denit_decomp_cascade_vr(c,j,k) = -dnp * pmnf_decomp_cascade(c,j,k)
+
+A bulk of this work was completed by Rajendra Paudel.
+
+Requirements for tag: N/A
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: new namelist variable called fconsts.
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, D. Lawrence, R. Paudel. (for design: discussion w/ mvertens, sacks, kluzek)
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated:
+
+Renamed
+D models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeMod_BGC.F90
+D models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeMod_CENTURY.F90
+
+List all files added and what they do:
+
+Rename of Deleted files
+A + models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeBGCMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeCNMod.F90
+
+Module to read parameters shared by multiple modules
+A + models/lnd/clm/src/clm4_5/biogeochem/CNSharedConstsMod.F90
+
+Module that reads shared an private parameters
+A + models/lnd/clm/src/clm4_5/main/readConstantsMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+Add fconsts namelist variable
+M models/lnd/clm/bld/build-namelist
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+Add in functionality to read parameters off of netcdf file
+M models/lnd/clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNitrifDenitrifMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNMRespMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: OK. These FAILs should pass next round:
+
+ 418/444 < FAIL>
+ 423/444 < FAIL>
+ 428/444 < FAIL>
+ 433/444 < FAIL>
+ 438/444 < FAIL>
+ 443/444 < FAIL>
+
+ CESM test lists:
+
+CLM45 compsets have failures for nlcomp due to the introduction of fconsts namelist variable.
+For example:
+
+FAIL ERI.f19_g16.IG1850CLM45.yellowstone_pgi.GC.test20Pgi.nlcomp
+FAIL ERH_D.f19_g16.I1850CLM45CN.yellowstone_intel.GC.test20Intel.nlcomp
+
+ yellowstone/aux_clm intel OK
+ yellowstone/aux_clm pgi OK
+ frankfurt/aux_clm intel OK
+ frankfurt/aux_clm pgi OK
+ frankfurt/aux_clm nag OK
+
+ CLM history file comparison:
+
+ yellowstone/aux_clm intel OK
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_19
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_19
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Wed Jul 17 14:29:32 MDT 2013
+One-line Summary: fix setting of bd in iniTimeConst
+
+Purpose of changes:
+
+In iniTimeConst, bd (bulk density) was being set incorrectly, so that,
+for a given processor, the same value was being put in all (c,j)
+locations. In addition to being incorrect, this meant that results
+differed depending on processor count. This tag fixes this problem.
+
+This only affects CLM4.5 BGC runs, because the bd array is only used
+in CNNitrifDenitrifMod.F90. (However, as a side note: This array
+SHOULD be used in DUSTMod and initSLakeMod, which currently recompute
+bd.)
+
+
+Requirements for tag:
+
+Test level of tag:
+
+Bugs fixed (include bugzilla ID):
+ 1736 (bd set incorrectly in iniTimeConst, leads to results that depend on processor count)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: NO
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel yes ***
+ All PASS or xFAIL
+ yellowstone/aux_clm pgi yes
+ All PASS
+ frankfurt/aux_clm intel yes **
+ All PASS
+ frankfurt/aux_clm pgi yes
+ All PASS
+ frankfurt/aux_clm nag yes
+ All PASS
+
+ CESM history file comparison:
+ (ccsm_utils/Tools/component_gen_comp -compare $oldtag -generate $newtag -testid $testid -baselineroot $CESMDATAROOT/ccsm_baselines/ -model clm2)
+
+ yellowstone/aux_clm intel yes **
+ All PASS or BFAIL1 except CLM45BGC comparisons
+
+ test_driver.sh tools testing:
+
+ yellowstone interactive: no
+ frankfurt interactive: no
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_18
+
+Changes answers relative to baseline: YES
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All CLM45BGC
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ Larger than roundoff; still to be determined if this gives new
+ climate (Dave Lawrence will run a simulation to determine this)
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: (Not yet done)
+
+ URL for LMWG diagnostics output used to validate new climate: (Not
+ yet done)
+
+===============================================================
+===============================================================
+Tag name: clm4_5_18
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Tue Jul 9 10:07:08 MDT 2013
+One-line Summary: rework urban indexing
+
+Purpose of changes:
+
+Change urban derived type and local variables in UrbanMod subroutines to go
+lbl:ubl rather than 1:num_urbanl. There are a few reasons for this: (1) this
+works better when the urban filter can change (with dynamic landunits), (2) more
+consistency with the rest of the CLM code, (3) no longer have to remember
+whether a given variable should be indexed by fl or l. The downside is that it
+leads to slightly greater memory use.
+
+Along with doing this, I also changed a few loops in UrbanMod to be simpler
+(which is allowed with the above change).
+
+Also, no longer run over 0-weight urban columns - we don't have to do this any
+more now that I have reworked some loops in UrbanMod.
+
+
+Requirements for tag:
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes:
+
+ Increase in memory use by UrbanMod, which should lead to a small overall
+ memory increase - though this doesn't show up in most memcomp tests, showing
+ that the increase is pretty small.
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+========= Major changes to indexing: local variables now dimensioned lbl:ubl
+ rather than 1:num_urbanl. Also, remove canyon_hwr, wtroad_perv,
+ ht_roof and wtlunit_roof from urban_params, because there are
+ duplicate variables in clmtype.
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+
+========= No longer make 0-weight urban columns active
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+
+========= Remove unused wind_hgt_canyon from clmtype
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInitMod.F90
+
+========= Minor changes for new UrbanMod interfaces
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+
+
+
+Machines testing ran on: (Tests in priority order)
+ Standard Tag Pretag *** Standard Tag Posttag **
+
+ build-namelist unit tester: NO
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel yes ***
+ All PASS or xFAIL
+ yellowstone/aux_clm pgi yes **
+ All PASS
+ frankfurt/aux_clm intel yes **
+ All PASS
+ frankfurt/aux_clm pgi yes
+ All PASS, except the following, which appears to be a system problem:
+ FAIL ERI_D.f19_g16.ICLM45.frankfurt_pgi.GC.214513
+ I will rerun the above test once the system problem is resolved
+ frankfurt/aux_clm nag yes
+ All PASS
+
+ CESM history file comparison:
+ (ccsm_utils/Tools/component_gen_comp -compare $oldtag -generate $newtag -testid $testid -baselineroot $CESMDATAROOT/ccsm_baselines/ -model clm2)
+
+ yellowstone/aux_clm intel yes **
+ All PASS or BFAIL, except the following expected failures:
+
+ *** Expected failures because of failures in the base tests
+ FAIL ERS.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_17.clm2.h0
+ FAIL ERS.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_17.clm2.h1
+ FAIL ERS_Ld211_D_P112x1.f10_f10.ICNCROP.yellowstone_intel.clm-crop.compare_hist.clm4_5_17.clm2.h0 (no history file in test case)
+
+ *** Expected differences in cols1d_active and pfts1d_active, as well as
+ FILLDIFFs, due to making 0-weight urban columns no longer active
+ FAIL ERI_D.f10_f10.ICLM45BGC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_17.clm2.h1
+ FAIL ERS_D.f10_f10.ICLM45BGCNoVS.yellowstone_intel.clm-rootlit.compare_hist.clm4_5_17.clm2.h1
+ FAIL ERS_D.f10_f10.ICLM45BGC.yellowstone_intel.clm-ch4_set2_ciso.compare_hist.clm4_5_17.clm2.h1
+ FAIL ERS_D.f10_f10.ICLM45BGC.yellowstone_intel.clm-ch4_set3_pftroot.compare_hist.clm4_5_17.clm2.h1
+ FAIL ERS_D.f19_g16.ICLM45GLCMEC.yellowstone_intel.clm-glcMEC.compare_hist.clm4_5_17.clm2.h1
+ FAIL ERS.f19_g16.I1850CLM45BGC.yellowstone_intel.clm-default.compare_hist.clm4_5_17.clm2.h1
+ FAIL ERS.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.clm-default.compare_hist.clm4_5_17.clm2.h1
+ FAIL ERS_Ld3_D_P64x1.ne30_g16.ICLM45CN.yellowstone_intel.clm-default.compare_hist.clm4_5_17.clm2.h1
+ FAIL SSP.f19_g16.I1850CLM45BGC.yellowstone_intel.clm-default.compare_hist.clm4_5_17.clm2.h1
+
+
+ test_driver.sh tools testing:
+
+ yellowstone interactive: no
+ frankfurt interactive: no
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_17
+
+Changes answers relative to baseline: NO, except for inconsequential changes in
+1-d hist files (cols1d_active, pfts1d_active, and some FILLDIFFS, due to making
+0-weight urban columns no longer active)
+
+===============================================================
+===============================================================
+Tag name: clm4_5_17
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Wed Jul 3 10:54:03 MDT 2013
+One-line Summary: misc cleanup and bug fixes
+
+Purpose of changes:
+
+Bit-for-bit cleanup following from tag clm4_5_11. The biggest change is the
+removal of maxpatch, npatch_* and some related variables from clm_varpar (these
+were maintenance headaches).
+
+
+Requirements for tag:
+
+Test level of tag: standard + tools
+
+Bugs fixed (include bugzilla ID):
+
+ 1747 (need 1x1_tropicAtl datasets)
+ 1754 (mksurfdata_map problem making CH4 parameters for ne240 CLM4.5 surface dataset)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: New 1x1_tropicAtl datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+========= Remove maxpatch, npatch_* and a few related variables that are no
+ longer needed (these variables were a maintenance headache)
+M models/lnd/clm/src/clm4_5/main/clm_varpar.F90
+
+========= Rework code to not require the variables that were removed from clm_varpar
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+M models/lnd/clm/src/util_share/ncdio_pio.F90
+
+========= Remove unneeded 'use' statements
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+
+========= Allow roundoff-level errors (needed to make ne240 dataset)
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkCH4inversionMod.F90
+
+========= Fix generation of 1x1_tropicAtl datasets
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata.pl
+
+========= New 1x1_tropicAtl datasets
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+
+
+Machines testing ran on: (Tests in priority order)
+ Standard Tag Pretag *** Standard Tag Posttag **
+
+ build-namelist unit tester: yes
+ All PASS or xFAIL
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel yes ***
+ All PASS or xFAIL
+ yellowstone/aux_clm pgi yes **
+ All PASS
+ frankfurt/aux_clm intel yes **
+ All PASS
+ frankfurt/aux_clm pgi yes
+ All PASS
+ frankfurt/aux_clm nag yes
+ All PASS
+
+ Additional tests (with comparison to clm4_5_16, including
+ component_gen_comp; for the FARM test, used a sandbox corresponding to
+ cesm1_3_alpha01a for components other than CLM):
+
+ ERS_Ly5.f10_f10.I20TRCRUCLM45BGC.yellowstone_intel.clm-monthly_noinitial
+ SMS.T42_T42.FARM95C4.yellowstone_intel.clm-daily
+ [the clm-daily nl dir just sets hist_nhtfrq = -24]
+
+
+ CESM history file comparison:
+ (ccsm_utils/Tools/component_gen_comp -compare $oldtag -generate $newtag -testid $testid -baselineroot $CESMDATAROOT/ccsm_baselines/ -model clm2)
+
+ yellowstone/aux_clm intel yes **
+ All PASS or BFAIL1, except irrelevant failures from this failing test:
+ FAIL ERS.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_16.clm2.h0
+ FAIL ERS.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_16.clm2.h1
+
+ test_driver.sh tools testing:
+
+ yellowstone interactive: yes
+ All PASS except expected failures:
+
+ 027 smiS4 TSMscript_tools.sh shared ncl_scripts getregional_datasets.pl getregional .............\c
+ rc=6 FAIL
+ 028 bliS4 TBLscript_tools.sh shared ncl_scripts getregional_datasets.pl getregional .............\c
+ rc=4 FAIL
+
+
+ frankfurt interactive: no
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_16
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_16
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Tue Jul 2 09:22:41 MDT 2013
+One-line Summary: only run filters over 'active' points
+
+Purpose of changes:
+
+In preparation for dynamic landunits, we only want to run most filters over
+'active' points. This required changing landunit and column-level filters to
+only run over active points. In addition, I changed the nourbanp filter to only
+run over active points (in contrast to other pft-level filters, this filter had
+previously operated over non-active points, too).
+
+In addition, this tag includes some related changes, most of which were required
+to get the code to run correctly in light of the above changes. Some of these
+changes - in particular, the changes to reweightMod, filterMod, and the use of
+the new filter_inactive_and_active in some places - effectively undid that
+general filter change for select landunits (urban) or subroutine calls.
+
+
+Requirements for tag:
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: changes to filterMod reviewed by erik, mvertens,
+ stefan, dave lawrence & ben andre; other changes
+ only by self
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+========= Change main filters to just run over active points; add new filters
+ that include inactive as well as active points; refactor subroutines
+ to avoid code duplication now that we have two groups of filters
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+
+========= Change filter to just run over active points
+M models/lnd/clm/src/clm4_5/main/pft2colMod.F90
+
+========= Change urban columns & pfts to be active whenever their landunit is
+ active (to avoid making urban code messier)
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+
+========= Get rid of wt > 0 checks, which are no longer appropriate in the code
+ (checks of the active flags should be done instead - and these have
+ been folded in to the filters)
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+
+========= Make a loop more consistent in its use of a filter; remove undesirable
+ pactive check (because decomp_vertprofiles now sometimes operates on
+ inactive as well as active points)
+M models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+
+========= Use filters that include inactive points in call to
+ decomp_vertprofiles (this is needed because of the unusual placement
+ of this routine in the driver sequence)
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+
+========= Use filters that include inactive points in calls to SurfaceAlbedo and
+ UrbanAlbedo. For SurfaceAlbedo, this is necessary to avoid floating
+ point exceptions in transient cases; for UrbanAlbedo, this probably
+ isn't necessary now, but likely will be needed when we have dynamic
+ landunits, for the same reason that we need it for SurfaceAlbedo.
+M models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90
+
+========= Remove pactive check in setting up vegsol / novegsol filters - as far
+ as I can tell, this check is now unnecessary, and it led to the odd
+ result that novegsol included all inactive points (e.g., even inactive
+ istsoil points). Also add some comments and remove some obsolete
+ comments.
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+
+========= Remove pactive checks that are unnecessary now that the nourbanp
+ definition has changed
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+
+========= Only check errsoi_col on active columns (to prevent NaN-related
+ problems in crop runs)
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+
+========= Add comments
+M models/lnd/clm/src/clm4_5/main/CNiniSpecial.F90
+M models/lnd/clm/src/clm4_5/biogeophys/ActiveLayerMod.F90
+
+
+
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: yes
+ All PASS or xFAIL
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel yes
+ All PASS or xFail
+
+ Also ran the following, which PASSes (also PASSes cpl & clm
+ hist comparisons, except for expected failures in .h1 file
+ comparisons, as below):
+ ERS_Ly5.f10_f10.I20TRCRUCLM45BGC.yellowstone_intel.clm-monthly_noinitial
+
+ yellowstone/aux_clm pgi yes
+ All PASS
+
+ frankfurt/aux_clm intel yes
+ All PASS
+ frankfurt/aux_clm pgi yes
+ All PASS
+ frankfurt/aux_clm nag yes
+ All PASS
+
+ CESM history file comparison:
+ (ccsm_utils/Tools/component_gen_comp -compare $oldtag -generate $newtag -testid $testid -baselineroot $CESMDATAROOT/ccsm_baselines/ -model clm2)
+
+ yellowstone/aux_clm intel yes
+ All PASS, except:
+ *** Irrelevant, because this test fails
+ FAIL ERS.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_15.clm2.h0
+ FAIL ERS.f09_g16.ICLM45VIC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_15.clm2.h1
+
+ *** Expected diffs in h1 files: differences in cols1d_active,
+ pfts1d_active, and related FILLDIFFs in a number of variables
+ FAIL ERI_D.f10_f10.ICLM45BGC.yellowstone_intel.clm-vrtlay.compare_hist.clm4_5_15.clm2.h1
+ FAIL ERS_D.f10_f10.ICLM45BGCNoVS.yellowstone_intel.clm-rootlit.compare_hist.clm4_5_15.clm2.h1
+ FAIL ERS_D.f10_f10.ICLM45BGC.yellowstone_intel.clm-ch4_set2_ciso.compare_hist.clm4_5_15.clm2.h1
+ FAIL ERS_D.f10_f10.ICLM45BGC.yellowstone_intel.clm-ch4_set3_pftroot.compare_hist.clm4_5_15.clm2.h1
+ FAIL ERS_D.f19_g16.ICLM45GLCMEC.yellowstone_intel.clm-glcMEC.compare_hist.clm4_5_15.clm2.h1
+ FAIL ERS.f19_g16.I1850CLM45BGC.yellowstone_intel.clm-default.compare_hist.clm4_5_15.clm2.h1
+ FAIL ERS.f19_g16.ICRUCLM45BGCCROP.yellowstone_intel.clm-default.compare_hist.clm4_5_15.clm2.h1
+ FAIL ERS_Ld3_D_P64x1.ne30_g16.ICLM45CN.yellowstone_intel.clm-default.compare_hist.clm4_5_15.clm2.h1
+ FAIL SSP.f19_g16.I1850CLM45BGC.yellowstone_intel.clm-default.compare_hist.clm4_5_15.clm2.h1
+
+ test_driver.sh tools testing:
+
+ yellowstone interactive: no
+ frankfurt interactive: no
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_15
+
+Changes answers relative to baseline: NO, except for changes in fill
+patterns & active flags in 1-d hist files, as noted above
+
+===============================================================
+===============================================================
+Tag name: clm4_5_15
+Originator(s): muszala (Stefan Muszala)
+Date: Mon Jul 1 10:44:05 MDT 2013
+One-line Summary: complete associate refactor for pointers in clm4_5 source
+
+Purpose of changes: Refactor all clm4_5 source so that pointers assignements are
+ placed in associate blocks at the start of a subroutine. This allows us to
+ get rid of pointer declarations, makes the code easier to modify, makes the
+ code more robust and sets us up for future interface refactorings. The refactor is
+ explained in more detail in models/lnd/clm/tools/clm4_5/refactorTools/README.
+
+- real(r8), pointer :: fdry(:) ! fraction of foliage that is green and dry [-] (new)
+- fdry => pps%fdry
++ associate(&
++ fdry => pps%fdry & ! Output: [real(r8) (:)] fraction of foliage that is green and dry [-] (new)
++ )
+ ...
+- end subroutine FracWet
++ end associate
++ end subroutine FracWet
+
+Requirements for tag: N/A
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: BGC compsets will see increases
+ in memory (highwater) use. This can be seen in memcomp portions of testing with
+ one specific example. Something to keep track of.
+
+ FAIL ERS.f19_g16.I1850CLM45BGC.yellowstone_intel.clm-default.GC.4515preIntel.memcomp.clm4_5_14
+ - highwater goes from 166 MB in clm4_5_14 to 236 MB in clm4_5_15
+ FAIL ERS.f19_g16.I1850CLM45BGC.yellowstone_intel.rtm-rtmOff.GC.4515preIntel.memcomp.clm4_5_14
+ FAIL ERS.f19_g16.I1850CLM45BGC.yellowstone_intel.rtm-rtmOnFloodOnEffvelOff.GC.4515preIntel.memcomp.clm4_5_14
+ FAIL ERS.f19_g16.I1850CLM45BGC.yellowstone_intel.rtm-rtmOnFloodOnEffvelOn.GC.4515preIntel.memcomp.clm4_5_14
+ FAIL ERS.f19_g16.I1850CLM45BGC.yellowstone_intel.rtm-rtmOnIceOn.GC.4515preIntel.memcomp.clm4_5_14
+ FAIL ERS.f19_g16_r01.I1850CLM45BGC.yellowstone_intel.rtm-rtmOnFloodOnEffvelOff.GC.4515preIntel.memcomp.clm4_5_14
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do:
+
+A models/lnd/clm/tools/clm4_5/refactorTools/associate/refactor_new.pl
+A models/lnd/clm/tools/clm4_5/refactorTools/associate/README
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNitrifDenitrifMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNGRespMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CropRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/initch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNBalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate3Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNMRespMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeMod_BGC.F90
+M models/lnd/clm/src/clm4_5/biogeochem/MEGANFactorsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPrecisionControlMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVLightMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate3Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/VOCEmissionMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAnnualUpdateMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeMod_CENTURY.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSetValueMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNVegStructUpdateMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVEstablishmentMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNC14DecayMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_5/main/mkarbinitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInputMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SnowHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/FrictionVelocityMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/TridiagonalMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/ActiveLayerMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/clm_driverInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CLMVICMapMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SNICARMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/initSLakeMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/H2OSfcMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/FracWetMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+
+Machines testing ran on: (Tests in priority order)
+ build-namelist unit tester: OK
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel OK
+ yellowstone/aux_clm pgi OK
+ frankfurt/aux_clm intel OK
+ frankfurt/aux_clm pgi OK
+ frankfurt/aux_clm nag OK
+
+ CESM history file comparison:
+
+ yellowstone/aux_clm OK
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_14
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_14
+Originator(s): muszala (Stefan Muszala)
+Date: Thu Jun 20 07:51:54 MDT 2013
+One-line Summary: preparation for associate refactor in clm4_5_15
+
+Purpose of changes:
+ Most work by mvertens.
+ - prep. work for modifying associate
+ - refactor subgridAveMod.F90 to accept upper and lower bounds
+ - remove duplicate pointer uses
+ - remove inicPerpMod.F90 and is_perpetual use
+
+Requirements for tag: N/A
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated:
+
+D models/lnd/clm/src/clm4_5/main/inicPerpMod.F90
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/STATICEcosysDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAnnualUpdateMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+M models/lnd/clm/src/clm4_5/main/pft2colMod.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: OK. Fixed generate numbering.
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel OK
+ yellowstone/aux_clm pgi OK
+ frankfurt/aux_clm intel OK
+ frankfurt/aux_clm pgi OK
+ frankfurt/aux_clm nag OK
+
+ CESM history file comparison:
+ (ccsm_utils/Tools/component_gen_comp -compare $oldtag -generate $newtag -testid $testid -baselineroot $CESMDATAROOT/ccsm_baselines/ -model clm2)
+
+ yellowstone/aux_clm intel OK
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_13
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_13
+Originator(s): andre (Benjamin Andre,LAWRENCE BERKELEY NATIONAL LABORATORY,510-486-4617)
+Date: Fri Jun 14 15:01:33 MDT 2013
+One-line Summary: hydrology reordering from Jinyun Tang
+
+Purpose of changes:
+ reordering the operations of the hydrology. hydrology with and without drainage
+
+Requirements for tag: N/A
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: Jinyun Tang, Dave Lawrence
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90 - splits out leaching
+ M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90 - add icefrac and qflx_deficit
+ M models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90 - update calls to CNEcosystemDyn
+ M models/lnd/clm/src/clm4_5/main/clm_driver.F90 - update calls to CNEcosystemDyn and Hydrology
+ M models/lnd/clm/src/clm4_5/main/clmtype.F90 - add icefrac and qflx_deficit
+ M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90 - splits out drainage calculations
+ M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90 - calculate water table before subsurface drainage, icefraction, water deficit
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: yes/no ***
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel yes OK
+ yellowstone/aux_clm pgi yes OK
+ frankfurt/aux_clm intel no
+ frankfurt/aux_clm pgi no
+ frankfurt/aux_clm nag no
+
+ CESM history file comparison:
+ (ccsm_utils/Tools/component_gen_comp -compare $oldtag -generate $newtag -testid $testid -baselineroot $CESMDATAROOT/ccsm_baselines/ -model clm2)
+
+ test_driver.sh tools testing: N/A
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_12
+
+Changes answers relative to baseline: yes
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: all clm 4.5 with hydrology
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate): new climate
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+
+ casename: ERS_D.f19_g16.ICLM45 - an 11-year base line simulation
+ was created with the standard trunk version, then a comparison run
+ was created with the version including hydrology re-ordering. The
+ comparisons were evaluated by looking at the relative differences
+ for hydrologic variables as QDRAI, EFLX_LH_TOT, QRUNOFF. Large
+ relative differences were found for these variables in a few grid
+ cells, but their absolute magnitudes in those grid cells were
+ small. Tests were also conducted with VIC hydrology on, the
+ change in results were similar as that when VIC hydrology was off.
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_5_12
+Originator(s): muszala (Stefan Muszala)
+Date: Thu Jun 13 09:41:56 MDT 2013
+One-line Summary: NoVS test, NAG mods and remove TWS from restart file
+
+Purpose of changes:
+
+ -Fix (from jedwards) for ERS_D.f10_f10.ICLM45BGCNoVS.yellowstone_intel.clm-rootlit test
+ Previous tags using ICLM45BGCNoVS are suspect!
+ -Since I tested this with NAG there are also port mods to CLM that I had to put in.
+ -Remove TWS from BiogeophysRestMod.F90 per sacks request. OK'd by swenson.
+ -Update to rtm1_0_29
+
+Requirements for tag: N/A
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): 1746
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, NoVs error: Charlie Koven and jedwards
+
+List any svn externals directories updated (csm_share, mct, etc.): rtm1_0_28 -> rtm1_0_29
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+-NoVS fix and NAG mods
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+-Nag mods
+M models/lnd/clm/src/clm4_5/biogeochem/CNNitrifDenitrifMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4varcon.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+-remove TWS from restart
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+-update to rtm1_0_29
+M SVN_EXTERNAL_DIRECTORIES
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: no
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel OK
+ yellowstone/aux_clm pgi OK
+ frankfurt/aux_clm intel OK
+ frankfurt/aux_clm pgi OK
+ frankfurt/aux_clm nag OK
+
+ CESM history file comparison:
+ (ccsm_utils/Tools/component_gen_comp -compare $oldtag -generate $newtag -testid $testid -baselineroot $CESMDATAROOT/ccsm_baselines/ -model clm2)
+
+ yellowstone/aux_clm intel OK. The only differences are in ERS_D.f10_f10.ICLM45BGCNoVS.yellowstone_intel.clm-rootlit.
+ These are expected due to the fix in ch4Mod.F90.
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_11
+
+Changes answers relative to baseline: Only for ERS_D.f10_f10.ICLM45BGCNoVS.yellowstone_intel.clm-rootlit. Previous
+ versions of this compset should be considered suspect. Fields that change (119 in total) for this test are:
+
+ ACTUAL_IMMOB, CH4STRESS_SAT, CH4STRESS_UNSAT, CH4_AERE_DEPTH_SAT, CH4_AERE_DEPTH_UNSAT, CH4_EBUL_DEPTH_SAT,
+ CH4_OXID_DEPTH_SAT, CH4_OXID_DEPTH_UNSAT, CH4_PROD_DEPTH_SAT, CH4_SURF_AERE_SAT, CH4_SURF_AERE_UNSAT,
+ CH4_SURF_DIFF_SAT, CH4_SURF_DIFF_UNSAT, CH4_SURF_EBUL_SAT, CH4_TRAN_DEPTH_SAT, CH4_TRAN_DEPTH_UNSAT,
+ COL_CTRUNC, COL_NTRUNC, CONC_CH4_SAT, CONC_CH4_UNSAT, CONC_O2_SAT, CONC_O2_UNSAT, CWDC, CWDC_LOSS,
+ CWDC_TO_LITR2C, CWDC_TO_LITR3C, CWDN, CWDN_TO_LITR2N, CWDN_TO_LITR3N, DENIT, ER, FCH4, FCH4TOCO2,
+ FUELC, F_DENIT, F_N2O_DENIT, F_N2O_NIT, F_NIT, GROSS_NMIN, HR, LAND_UPTAKE, LITHR, LITR1C, LITR1C_TO_SOIL1C,
+ LITR1N, LITR1N_TO_SOIL1N, LITR1_HR, LITR2C, LITR2C_TO_SOIL1C, LITR2N, LITR2N_TO_SOIL1N, LITR2_HR, LITR3C,
+ LITR3C_TO_SOIL2C, LITR3N, LITR3N_TO_SOIL2N, LITR3_HR, LITTERC, LITTERC_HR, LITTERC_LOSS, NBP, NEE, NEM, NEP,
+ NET_NMIN, O2STRESS_SAT, O2_AERE_DEPTH_SAT, O2_DECOMP_DEPTH_SAT, O2_DECOMP_DEPTH_UNSAT, POTENTIAL_IMMOB,
+ POT_F_DENIT, POT_F_NIT, SMINN_TO_SOIL1N_L1, SMINN_TO_SOIL1N_L2, SMINN_TO_SOIL1N_S2, SMINN_TO_SOIL1N_S3,
+ SMINN_TO_SOIL2N_L3, SMINN_TO_SOIL2N_S1, SMINN_TO_SOIL3N_S1, SMINN_TO_SOIL3N_S2, SMIN_NH4, SMIN_NO3,
+ SMIN_NO3_LEACHED, SOIL1C, SOIL1C_TO_SOIL2C, SOIL1C_TO_SOIL3C, SOIL1N, SOIL1N_TO_SOIL2N, SOIL1N_TO_SOIL3N,
+ SOIL1_HR_S2, SOIL1_HR_S3, SOIL2C, SOIL2C_TO_SOIL1C, SOIL2C_TO_SOIL3C, SOIL2N, SOIL2N_TO_SOIL1N,
+ SOIL2N_TO_SOIL3N, SOIL2_HR_S1, SOIL2_HR_S3, SOIL3C, SOIL3C_TO_SOIL1C, SOIL3N, SOIL3N_TO_SOIL1N, SOIL3_HR,
+ SOILC, SOILC_HR, SOILC_LOSS, SOMHR, SR, TOTCOLC, TOTCOLCH4, TOTCOLN, TOTECOSYSC, TOTECOSYSN, TOTLITC, TOTLITN,
+ TOTSOMC, TOTSOMN
+
+===============================================================
+===============================================================
+Tag name: clm4_5_11
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Tue Jun 11 20:54:11 MDT 2013
+One-line Summary: Change pct_pft and related surface dataset variables to be % of landunit
+
+Purpose of changes:
+
+Main purpose is to change pct_pft and related surface dataset variables to be %
+of landunit rather than % of grid cell. This is needed to support transient PFTs
+with dynamic landunits. This required substantial changes in both mksurfdata_map
+and CLM. This also required generating all new surface datasets.
+
+A very related change is the separation of PCT_PFT in the surface dataset into
+PCT_NAT_PFT and PCT_CFT; in addition to these two variables, there are also new
+PCT_NATVEG (% of natural veg landunit on the gridcell) and PCT_CROP (% of crop
+landunit on the gridcell) variables. Note that the separation of PCT_PFT into
+natural vs crop was only done on the surface dataset -- raw datasets to
+mksurfdata_map have not been changed, nor have most of the CLM data structures.
+
+In addition, this tag includes the following:
+
+(1) Renumbered landunits to (a) add separate landunit numbers for each urban
+landunit, (b) do away with the obsolete shallow lake, and (c) group together
+similar landunits
+
+(2) In any urban landunit, allocate space for ALL urban columns. Previously,
+there were some urban landunits with only one of the two road types. This change
+simplifies the code and only adds a relatively small number of columns in memory.
+
+(3) Modified interpinic, partly to have compatibility with (1), partly to fix
+urban bug (allowed by (1)), and partly to fix an unrelated bug
+
+(4) All new initial conditions for CLM4.5, to have compatibility with (1) and (2)
+
+(5) Check _OPENMP in initialization rather than driver
+
+(6) Tighten error check in reweightMod: checkWeights. It seems like this error
+check can be stricter with the new pct_pft formulation
+
+
+Requirements for tag: Standard test + tools
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID):
+ 1621 (normalization issue in ne120np4 datasets and in CLM)
+ 1675 (need to relax error tolerance in reweightMod: weightsOkay) -- note
+ that I have actually TIGHTENED the tolerance, but that seems to be
+ okay now
+ 1702- PARTIAL FIX (clm4.5 interpinic doesn't work right for urban)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+ 1747 - need 1x1_tropicAtl surface dataset and pftdyn dataset for clm4_5_11 and later
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: Changes to fsurdat and finidat for
+CLM4.5, as described below
+
+List any changes to the defaults for the boundary datasets:
+
+ For CLM4.5, all surface datasets and initial conditions files have been
+ recreated. For surface datasets, changes result in only roundoff-level
+ differences in the pct_* fields. For initial conditions, the new initial
+ conditions are effectively the same as the old, but bugs in interpinic
+ prevent them from being exactly the same.
+
+Describe any substantial timing or memory changes:
+
+ Slight (probably < 1%) increase in memory for all CLM4.5 cases, due to
+ allocation of ALL urban columns wherever there is an urban landunit
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): NONE
+
+List all files eliminated:
+
+========= Remove unneeded modules (iulog moved to fileutils.F90)
+D models/lnd/clm/tools/clm4_5/mksurfdata_map/src/clm_varpar.F90
+D models/lnd/clm/tools/clm4_5/mksurfdata_map/src/clm_varctl.F90
+
+========= Now differs for clm4_0 and clm4_5, so copied to those two places
+D models/lnd/clm/src/util_share/clm_varsur.F90
+
+========= Replaced with new file for testing interpinic
+D models/lnd/clm/tools/clm4_5/interpinic/clmi.BCN.1949-01-01_10x15_USGS_simyr1850_c121113.nc
+
+
+List all files added and what they do:
+
+========= New file for testing interpinic
+A models/lnd/clm/tools/clm4_5/interpinic/clmi.I2000CLM45BGC.2000-01-01.10x15_simyr2000_c130607.nc
+
+========= Add tests
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkpftMod.F90
+
+========= Moved from util_share
+A models/lnd/clm/src/clm4_0/main/clm_varsur.F90
+
+========= Moved from util_share, and modified extensively to support new surface
+ dataset format
+A models/lnd/clm/src/clm4_5/main/clm_varsur.F90
+
+========= Add module to do some initialization that doesn't fit well elsewhere,
+ and/or can't go elsewhere because of circular dependencies
+A models/lnd/clm/src/clm4_5/main/initParametersMod.F90
+
+
+List all existing files that have been modified, and describe the changes:
+
+========= Change pct_pft and related variables on surface dataset to be % of
+ landunit; this requires significant changes for mkpftMod, mkglcmecMod
+ and the error checks / corrections done in mksurfdat.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkutilsMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/fileutils.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkfileMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkglcmecMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkvarpar.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mksurfdat.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/Srcfiles
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkpftMod.F90
+
+========= Add tests
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkutilsMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/Srcfiles
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mksurfdata_map.F90
+
+========= Update crop landunit numbering, fix urban bug for column-level
+ variables, take code out of a conditional to prevent floating point
+ exceptions
+M models/lnd/clm/tools/clm4_5/interpinic/src/interpinic.F90
+
+========= New files for testing interpinic
+M models/lnd/clm/tools/clm4_5/interpinic/interpinic.runoptions
+
+========= Change landunit and column numbering; delete udenstype
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+M models/lnd/clm/src/clm4_5/main/subgridRestMod.F90
+
+========= Add variables for determining number of natural & crop PFTs
+M models/lnd/clm/src/clm4_5/main/clm_varpar.F90
+
+========= Major changes to handle pct_pft being specified as % of landunit
+ rather than % of gridcell
+M models/lnd/clm/src/clm4_5/main/surfrdMod.F90
+M models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+M models/lnd/clm/src/clm4_5/main/subgridMod.F90
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+
+========= Initialize new surface variables, check _OPENMP here instead of driver
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+
+========= Update comments, remove udenstype
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+
+========= Change 'use' statements, use ltype instead of udenstype, fix
+ initialization for 0-weight columns
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+
+========= Add some consistency checks (moved here from clmtypeInitMod), change others
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+
+========= Change 'use' statements; use urbpoi rather than isturb; remove
+ references to 'istslak'
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNBalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPrecisionControlMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSetValueMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInputMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SnowHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/main/CNiniTimeVar.F90
+M models/lnd/clm/src/clm4_5/main/dynlandMod.F90
+M models/lnd/clm/src/clm4_5/main/accFldsMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+ -- also, remove udenstype, and move some consistency checks elsewhere
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_5/main/restFileMod.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+ -- also, move _OPENMP check to initialization
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+M models/lnd/clm/src/clm4_5/main/CNiniSpecial.F90
+M models/lnd/clm/src/clm4_5/main/inicPerpMod.F90
+M models/lnd/clm/src/clm4_5/main/mkarbinitMod.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+
+========= Tighten tolerance for error check
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+
+========= Use 'crop_prog' rather than the CROP CPP def
+M models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90
+
+========= New surface datasets and initial conditions
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+========= Change environment variable in component_gen_comp command to something universal
+M .ChangeLog_template
+
+========= Restore a failing test (see bug 1658)
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: yes
+
+ All PASS or xFail except for the following expected baseline failures:
+ 418/444 < FAIL>
+ 423/444 < FAIL>
+ 428/444 < FAIL>
+ 433/444 < FAIL>
+ 438/444 < FAIL>
+ 443/444 < FAIL>
+
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel yes
+ Tests themselves: All PASS or xFail, except
+ ERB.ne30_g16.I_1948-2004.yellowstone_intel, which I have re-added to the
+ xFail list (see bugz 1658)
+
+ Comparisons: Some nlcomp and compare_hist failures, as expected
+
+ yellowstone/aux_clm pgi yes
+ All PASS or xFail except for some nlcomp & compare_hist failures (expected)
+
+ frankfurt/aux_clm intel yes
+ All PASS or xFail except for some nlcomp & compare_hist failures (expected)
+
+ frankfurt/aux_clm pgi NO
+
+ frankfurt/aux_clm nag yes
+ All PASS or xFail except for nlcomp failures (expected)
+
+ CESM history file comparison:
+ (ccsm_utils/Tools/component_gen_comp -compare $oldtag -generate $newtag -testid $testid -baselineroot $CESMDATAROOT/ccsm_baselines/ -model clm2)
+
+ yellowstone/aux_clm intel yes
+
+ Some compare_hist failures for CLM4.5 tests, as expected
+
+ test_driver.sh tools testing:
+
+ yellowstone interactive: yes
+ All PASS except for expected baseline failures:
+ 012 blg54 TBLtools.sh clm4_5 mksurfdata_map tools__s namelist ...................................rc=7 FAIL
+ 016 blh54 TBLtools.sh clm4_5 interpinic tools__ds runoptions ....................................rc=7 FAIL
+ 020 bli24 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_T31_crpglc_2000^tools__ds rc=7 FAIL
+ 022 bli53 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_10x15_1850^tools__o ....rc=7 FAIL
+ 024 bli54 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_10x15_1850^tools__ds ...rc=7 FAIL
+ 026 bli57 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_10x15_1850^tools__do ...rc=7 FAIL
+ 032 bliT4 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp24_2000^tools_rc=7 FAIL
+
+ and expected failures:
+ 027 smiS4 TSMscript_tools.sh shared ncl_scripts getregional_datasets.pl getregional .............rc=6 FAIL
+ 028 bliS4 TBLscript_tools.sh shared ncl_scripts getregional_datasets.pl getregional .............rc=4 FAIL
+
+
+ frankfurt interactive: NO
+
+ yellowstone/PTCLM: NO
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_10
+
+Changes answers relative to baseline: YES
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in (otherwise remove this section):
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: Changes in all CLM4.5 configurations. See below
+ for details
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ Mostly roundoff, but some larger than roundoff -- see below for details.
+
+ Changes are limited to CLM4.5. Where the below notes refer to "all
+ configurations", this is really limited to CLM4.5 configurations.
+
+ For all configurations, there are roundoff-level changes due to
+ roundoff-level differences in subgrid weights. These changes can quickly
+ grow to greater than roundoff (which I believe is due to nonlinear
+ feedbacks with snow variables), but as described below, I have verified
+ that the root cause of differences is this roundoff-level change.
+
+ For cases that use initial conditions, where these initial conditions were
+ previously interpinic'ed, there are greater-than-roundoff level changes
+ due to various bugs and limitations of interpinic (for example, some
+ fields, like tsai, are skipped). I took pains to ensure that, for cases
+ using original (non-interpinic'ed) initial conditions in clm4_5_10 and
+ prior, the new initial conditions are nearly identical to the old (but not
+ entirely identical, due to bug 1702 - see comment 2); this applies to most
+ f09 initial conditions. However, this was not practical for cases that
+ used interpinic'ed files; this applies to f19, ne30 and hcru initial
+ conditions, as well as f09 BGCDV initial conditions. So for this latter
+ set of cases, there can be large differences from clm4_5_10, especially at
+ the start of the simulation.
+
+ There are also greater than roundoff-level changes for some glc_mec
+ virtual columns, because we now use information on topo_glc_mec whenever
+ we can.
+
+ There are also greater than roundoff-level changes in subgrid weights in
+ virtual (0-weight) glc_mec and crop landunits, now that we no longer use
+ arbitrary subgrid weights there; I don't think this will affect anything
+ important, though.
+
+ Some tests that exhibited larger-than-usual changes from baseline, in cpl
+ hist and/or clm hist files, were the following (ignoring changes that can
+ be explained by the above notes):
+
+ ERS_Lm3.f19_g16.IGRCP60CLM45CN.yellowstone_intel
+ ERS_D.f19_g16.ICLM45GLCMEC.yellowstone_intel.clm-glcMEC
+ ERS_D.f10_f10.ICLM45BGCNoVS.yellowstone_intel.clm-rootlit
+ ERS_Ld3_D_P64x1.ne30_g16.ICLM45CN.yellowstone_intel.clm-default
+
+ From these results, it seems that large changes may occur more often in
+ glc_mec runs, even above and beyond the virtual column changes that are
+ expected, as noted above.
+
+ For the four above tests, I verified that differences were attributable to
+ the roundoff-level changes in subgrid weights, using the procedure
+ documented below.
+
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ In order to confirm that answers only differed by roundoff, I ran 7
+ additional tests (these tests all had nl_dirs, but that was not central to
+ these tests, so I'm not listing them here):
+
+ SMS_Lm1.f19_g16.I_1850_CLM45_BGC
+ SMS_Ly2.T31_g37.IG1850CLM45CN
+ SMS_Ly5.T31_g37.I20TRCRUCLM45BGC
+ SMS_Ly5.T31_g37.ICLM45BGCDV
+ SMS_Ly5.T31_g37.I_2000_CLM45_BGC
+ SMS_Ly5.f10_f10.ICLM45BGCDVCROP
+ SMS_Ly5.f10_f10.I_2000_CLM45_BGC_CROP
+
+ Each test was done as follows:
+
+ (1) Created baselines from clm4_5_04
+
+ (2) Ran tests from a branch (allocate_all_urban_cols), where all I changed
+ from the baseline was (a) wherever we have an urban landunit, create ALL
+ urban landunits, and (b) for f19, change finidat files to use a new,
+ interpinic'ed file based on the original. Confirmed that this was bfb with
+ (1) except for (a) 1-d history files (now have extra urban columns), and
+ (b) any CLM4.5 test that uses initial conditions, since interpinic is
+ currently broken for urban.
+
+ Side-note: I actually confirmed bfb behavior for the full yellowstone
+ aux_clm test suite, in addition to the above 7 tests
+
+ This extra branch was necessary because I cannot compare 1-d history files
+ directly between my main branch and the trunk, because of the extra urban
+ columns present in the new code.
+
+
+ (3) Ran these 7 tests from my main branch, off of clm4_5_04, comparing
+ with (2). For this comparison, I only confirmed that the subgrid weights
+ were the same within roundoff (up to about 1e-12 differences for the
+ transient case; smaller for other cases). Note that greater than
+ roundoff-level changes are seen in many other fields, presumably because
+ small differences in subgrid pft weights can cause differences in how
+ variables are averaged from pft to column. This, in turn, can lead to
+ larger changes due to nonlinearities in the system (e.g., snow). The
+ following steps were taken to confirm that other differences between my
+ branch and the trunk were only due to these small differences in subgrid
+ weights.
+
+ That is, I am confirming that:
+ (a) the only differences in the branch are subgrid weights
+ (b) these subgrid weights only differ by roundoff
+
+
+ (4) Reran (2), but with extra code to write out subgrid weights (including
+ writing these weights at every time step for pftdyn) -- from branch
+ allocate_all_urban_cols_writeWeights
+
+ (5) Reran (3), but with extra code to read the subgrid weights written in
+ (4). Confirmed that, with this one-off, my branch was bfb with (4).
+
+
+===============================================================
+===============================================================
+Tag name: clm4_5_10
+Originator(s): muszala (Stefan Muszala)
+Date: Mon Jun 10 13:10:31 MDT 2013
+One-line Summary: refactor clmtype
+
+Purpose of changes: Refactor clmtype so that there is only one level of indirection.
+
+ call hist_addfld1d (fname='SNOOCFRCL', units='W/m^2', &
+ avgflag='A', long_name='surface forcing of OC in snow (land) ', &
+- ptr_pft=clm3%g%l%c%p%pef%sfc_frc_oc, set_urb=spval)
++ ptr_pft=pef%sfc_frc_oc, set_urb=spval)
+
+There is a README (with more detailed information) and a script to help with future merges in:
+
+ models/lnd/clm/tools/clm4_5/refactorTools/clmType/{README & renameClmType.pl}
+
+Requirements for tag: N/A
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do:
+
+- script and README for refactoring clmType
+A models/lnd/clm/tools/clm4_5/refactorTools
+A models/lnd/clm/tools/clm4_5/refactorTools/associate
+A models/lnd/clm/tools/clm4_5/refactorTools/clmType
+A models/lnd/clm/tools/clm4_5/refactorTools/clmType/renameClmType.pl
+A models/lnd/clm/tools/clm4_5/refactorTools/clmType/README
+
+List all existing files that have been modified, and describe the changes:
+
+- major refactor in these to flatten clmtype
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+
+- change derived type access to match those of clmtype
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNitrifDenitrifMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNGRespMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CropRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/initch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNBalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate3Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNMRespMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeMod_BGC.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4RestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNWoodProductsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPrecisionControlMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVLightMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/STATICEcosysDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate3Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/VOCEmissionMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVEcosystemDynIniMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAnnualUpdateMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeMod_CENTURY.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSetValueMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNVegStructUpdateMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVEstablishmentMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNC14DecayMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+M models/lnd/clm/src/clm4_5/main/CNiniTimeVar.F90
+M models/lnd/clm/src/clm4_5/main/dynlandMod.F90
+M models/lnd/clm/src/clm4_5/main/subgridRestMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_glclnd.F90
+M models/lnd/clm/src/clm4_5/main/accFldsMod.F90
+M models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+M models/lnd/clm/src/clm4_5/main/pft2colMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+M models/lnd/clm/src/clm4_5/main/initSoilParVICMod.F90
+M models/lnd/clm/src/clm4_5/main/CNiniSpecial.F90
+M models/lnd/clm/src/clm4_5/main/inicPerpMod.F90
+M models/lnd/clm/src/clm4_5/main/reweightMod.F90
+M models/lnd/clm/src/clm4_5/main/mkarbinitMod.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SnowHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/FrictionVelocityMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/TridiagonalMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/ActiveLayerMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/clm_driverInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CLMVICMapMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SNICARMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/initSLakeMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/H2OSfcMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/FracWetMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SLakeRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+
+Machines testing ran on: (Tests in priority order)
+ Standard Tag Pretag *** Standard Tag Posttag **
+
+ build-namelist unit tester: no
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel OK
+ yellowstone/aux_clm pgi OK
+ frankfurt/aux_clm intel OK
+ frankfurt/aux_clm pgi OK
+ frankfurt/aux_clm nag OK
+
+ CESM history file comparison:
+
+ yellowstone/aux_clm intel
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_09
+
+Changes answers relative to baseline: No. Everything in this refactor should be BFB.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_09
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Tue Jun 4 15:59:07 MDT 2013
+One-line Summary: volr and vic fix, update mct and rtm
+
+Purpose of changes: add volr area correction, minor vic fix from maoyi, update mct and
+ rtm externals
+
+Requirements for tag: N/A
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, swenson
+
+List any svn externals directories updated (csm_share, mct, etc.):
+-models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_27
++models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_28
+
+-models/utils/mct https://github.com/MCSclimate/MCT/tags/MCT_2.8.3
++models/utils/mct https://github.com/quantheory/MCT/tags/compiler_fixes_n01_MCT_2.8.3
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+- fix for VIC hydrology
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+- volr area correction
+M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90
+M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90
+- mct and rtm update
+M SVN_EXTERNAL_DIRECTORIES
+- clean up
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: OK
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel OK
+ yellowstone/aux_clm pgi OK
+ frankfurt/aux_clm intel OK
+ frankfurt/aux_clm pgi OK
+ frankfurt/aux_clm nag OK
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_08
+
+Changes answers relative to baseline: only for VIC compsets. VOLR diagnostic changes.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_08
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Mon Jun 3 13:29:30 MDT 2013
+One-line Summary: port for NAG compiler
+
+Purpose of changes: Bring in Sean Santos mods, port clm4_5 and test with the NAG compiler on Frankfurt.
+
+Requirements for tag: N/A
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): 1721 - Jim Edwards fixed problem in PIO
+
+Known bugs (include bugzilla ID): 1722 - Error in some VIC tests starting in clm4_5_07
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, Santos
+
+List any svn externals directories updated (csm_share, mct, etc.): PIO - update to pio1_7_2
+
+List all files eliminated:
+
+D models/lnd/clm/src/util_share/nanMod.F90
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/unit_testers/xFail/expectedFail.pm
+M models/lnd/clm/src/util_share/clm_time_manager.F90
+M models/lnd/clm/src/util_share/accumulMod.F90
+M models/lnd/clm/src/util_share/ndepStreamMod.F90
+M models/lnd/clm/src/util_share/ncdio_pio.F90
+M models/lnd/clm/src/util_share/spmdMod.F90
+M models/lnd/clm/src/util_share/domainMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4varcon.F90
+M models/lnd/clm/src/clm4_5/biogeochem/STATICEcosysDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDVEstablishmentMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNC14DecayMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_glclnd.F90
+M models/lnd/clm/src/clm4_5/main/subgridMod.F90
+M models/lnd/clm/src/clm4_5/main/accFldsMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+M models/lnd/clm/src/clm4_5/main/pftvarcon.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SNICARMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/H2OSfcMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+M models/lnd/clm/src/clm4_0/biogeochem/CropRestMod.F90
+M models/lnd/clm/src/clm4_0/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/clm4_0/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_0/biogeochem/STATICEcosysDynMod.F90
+M models/lnd/clm/src/clm4_0/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/clm4_0/biogeochem/CNDVEstablishmentMod.F90
+M models/lnd/clm/src/clm4_0/main/clm_glclnd.F90
+M models/lnd/clm/src/clm4_0/main/subgridMod.F90
+M models/lnd/clm/src/clm4_0/main/accFldsMod.F90
+M models/lnd/clm/src/clm4_0/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_0/main/pftvarcon.F90
+M models/lnd/clm/src/clm4_0/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_0/main/histFileMod.F90
+M models/lnd/clm/src/clm4_0/main/clm_atmlnd.F90
+M models/lnd/clm/src/clm4_0/biogeophys/SNICARMod.F90
+M SVN_EXTERNAL_DIRECTORIES
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: - OK.
+
+ yellowstone/aux_clm intel - OK.
+ yellowstone/aux_clm pgi - OK.
+ One BFAIL for hcru_hcru which should pass next time around. Bug fixed when upgrading to pio1_7_2.
+ BFAIL ERS_D.hcru_hcru.I_2000_CRUFRC_CLM45_CN.yellowstone_pgi.GC.08testPgi.compare_hist.clm4_5_07
+ frankfurt/aux_clm intel - OK.
+ frankfurt/aux_clm pgi - OK.
+ frankfurt/aux_clm nag - OK. No baselines to compare against.
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_07
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_07
+Originator(s): erik (Erik Kluzek)
+Date: Fri May 31 02:49:45 MDT 2013
+One-line Summary: New spinup files for CLM45 AND RTM, work on PTCLM, turn drydep off by default, update externals
+
+Purpose of changes:
+
+ Bring in new spinup finidat files (f09_g16@1850 for SP and BGC). interpinic to 2deg, hcru_hcru and ne30.
+ New spinup finidat files for BGCCROP and BGCDV (f19 and f09 respectively)
+ New spinup finidat files for 2000 (f09_g16 for SP and BGC)
+ Update RTM to bring in finidat_rtm files for either 1850 or 2000.
+ Update scripts, Machines, pio
+ scripts includes update for CLM40CRU hybrid startup
+ Turn drydep namelist off by default
+ Do a lot of work on getting PTCLM working and tools working for single-point.
+
+Requirements for tag:
+ New spinup files, fix bugs: 1708, 1700
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID):
+ 1715 (rmdups.ncl fails for no-overlap case)
+ 1714 (mkscripgrid.ncl doesn't calculate corners correctly.)
+ 1708 (Need Initial conditions for RTM)
+ 1706 (VIC tests fail)
+ 1700 (Memory leak in MPI layer on yellowstone)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: drydep namelist now OFF by default
+
+List any changes to the defaults for the boundary datasets: New initial conditions
+ Native initial conditions for f09 for:
+ I1850CLM45SP, I1850CLM45BGC, ICLM45SP, ICLM45BGC
+ Interpinic for:
+ I1850CLM45 & I1850CLM45BGC: f19, hcru_hcru, ne30
+ ICLM45BGCCROP @ f19
+ ICLM45BGCDB @ f09
+
+ ALSO NOTE THAT NOW RTM HAS INITIAL CONDITIONS FOR R05 -- SO RIVERFLOW CHANGES
+ FOR BOTH CLM45 AND CLM40
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): many
+ csm_share, pio, scripts, Machines, RTM, cprnc, mapping
+
+
+ scripts to scripts4_130529 (update PTCLM, send simyr to RTM, new IC for CLM40CRUCN)
+ csm_share to share3_130528
+ rtm to rtm1_0_27 (Set startup initial condition files by -simyr flag)
+ Machines to Machines_130529 (Set hcru_hcru PE-layout, and PE-layout on yellowstone for f09 I cases)
+ pio to pio1_7_1
+ cprnc to cprnc_130425
+ mapping to mapping_130509
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/tools/TBLCFGtools.sh --- Correctly point to TSMCFGtools rather than TSMtools.sh.
+
+ M models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata.pl - Add -usr_mapdir option
+
+ M models/lnd/clm/tools/shared/mkmapdata/rmdups.ncl ------ Exit early if n_s==0
+ M models/lnd/clm/tools/shared/mkmapdata/mkmapdata.sh ---- Skip if file already exists, give
+ directory for rmdups.ncl
+ M models/lnd/clm/tools/shared/mkmapdata/mknoocnmap.pl --- Don't hide NCL output
+ M models/lnd/clm/tools/shared/mkmapgrids/mkscripgrid.ncl Explicitly calculate corners
+
+ M models/lnd/clm/bld/build-namelist - Set drydep to off by default, check crop setting for finidat files
+
+ M models/lnd/clm/bld/clm.buildnml.csh - Add back logic in about ignoring IC year or date
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml - New initial conditions for:
+ Native initial conditions for f09 for:
+ I1850CLM45SP, I1850CLM45BGC, ICLM45SP, ICLM45BGC
+ Interpinic for:
+ I1850CLM45SP & I1850CLM45BGC: f19, hcru_hcru, ne30
+ ICLM45BGCCROP @ f19
+ ICLM45BGCDB @ f09
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml - Use surfdata_map rather than surfdata
+ for CLM_USRDAT_NAME fsurdat files
+
+ M models/lnd/clm/bld/namelist_files/use_cases/stdurbpt_pd.xml - Remove setting of dtime, adjust hist output
+
+ More work on readme files...
+
+ M README
+ M models/lnd/clm/doc/IMPORTANT_NOTES
+ M models/lnd/clm/doc/Quickstart.GUIDE
+ M models/lnd/clm/doc/Quickstart.userdatasets
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: yes
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel yes
+ yellowstone/aux_clm pgi yes
+ frankfurt/aux_clm intel yes
+ frankfurt/aux_clm pgi yes
+
+ CESM history file comparison:
+ (ccsm_utils/Tools/component_gen_comp -compare $oldtag -generate $newtag -testid $testid -baselineroot $GLDCSEG/ccsm_baselines/ -model clm2)
+
+ yellowstone/aux_clm intel yes
+
+ test_driver.sh tools testing:
+
+ yellowstone interactive: yes
+ frankfurt interactive: yes
+
+ yellowstone/PTCLM: yes!
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_06
+
+Changes answers relative to baseline: Yes -- due to new initial condition files
+ for I1850CLM45SP and I1850CLM45BGC @ f09, f19, hcru, ne30
+ ICLM45SP and ICLM45BGC @ f09
+ ICLM45BGCCROP @ f19 and ICLM45BGCDB @ f09
+ and ICLM40CRUCN @ f09
+
+ AND new initial conditions for RTM for ALL R05 grids
+
+ And turning drydep namelist off in the driver causes answers to appear to be different
+ when comparing coupler history files.
+
+===============================================================
+===============================================================
+Tag name: clm4_5_06
+Originator(s): erik (Erik Kluzek)
+Date: Wed May 15 13:52:43 MDT 2013
+One-line Summary: A few small bug fixes, more updates to README files
+
+Purpose of changes:
+ More work on README files and documentation.
+ Fix from Danica/Bill for transient simulations.
+ Fix from Zack for Lake output variables
+ Another multi-instance script fix.
+ Fix tropixAtl pftdyn filename.
+ Remove models/lnd/clm/bld/config_query as doesn't work with new CESM scripts.
+
+Requirements for tag:
+ Requirements: Fix bug: 1697, 1691, 1675, fix tropicAtl fpftdyn file, minimal testing on frankfurt
+
+Test level of tag: critical
+
+Bugs fixed (include bugzilla ID):
+ 1675 (need to relax error tolerance in reweightMod: weightsOkay)
+ 1691 (Scripts issue for multi-instance for CLM/RTM)
+ 1697 (ZLAKE and DZLAKE are NOT set)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: Fix 1x1_tropicAtl fpftdyn file
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, sacks (transient fix), dlawren/subin (lake fix), jedwards (multi-instance scripts)
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: testing namelist files for old CLM standalone
+
+ D models/lnd/clm/bld/config_query --- no longer works with new CESM scripts
+
+ D models/lnd/clm/test/tools/nl_files/nl_ch4_set2_ciso
+ D models/lnd/clm/test/tools/nl_files/nl_ch4_set3_pftroot
+ D models/lnd/clm/test/tools/nl_files/nl_rootlit
+ D models/lnd/clm/test/tools/nl_files/nl_ciso
+ D models/lnd/clm/test/tools/nl_files/nl_anoxia_wtsat
+ D models/lnd/clm/test/tools/nl_files/nl_vrtlay
+ D models/lnd/clm/test/tools/nl_files/nl_oldhyd
+
+List all files added and what they do:
+
+ A models/lnd/clm/tools/clm4_5/interpinic/addmetadata --- Add script to add important meta-data to finidat files.
+
+List all existing files that have been modified, and describe the changes:
+
+---------------- Work on README files documentation
+ M models/lnd/clm/test/tools/config_files/README
+ M models/lnd/clm/test/tools/README
+ M models/lnd/clm/test/tools/README.testnames
+ M models/lnd/clm/tools/README
+ M models/lnd/clm/doc/IMPORTANT_NOTES
+ M models/lnd/clm/doc/Quickstart.GUIDE
+ M models/lnd/clm/doc/README
+
+----------------
+ M models/lnd/clm/bld/config_files/config_definition.xml ------------ Document experimental settings / fix syntax error
+ M models/lnd/clm/bld/clm.buildnml.csh ------------------------------ Multi-instance fix
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml --- Fix 1x1_tropicAtl fpftdyn filename
+ M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml - Document experimental settings
+
+----------------
+ M models/lnd/clm/src/clm4_5/main/histFileMod.F90 ---- ZLAKE/DZLAKE fix
+ M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90 -- ZLAKE/DZLAKE fix
+ M models/lnd/clm/src/clm4_5/main/reweightMod.F90 ---- Increase tolerance to 1.e-7 so transient
+ simulations can run their full course.
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: yes
+
+ CESM test lists: (limited testing on yellowstone/aux_clm/intel)
+
+ frankfurt/aux_clm pgi yes
+ frankfurt/aux_clm intel yes
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_04
+
+Changes answers relative to baseline: no (bit-for-bit)
+
+===============================================================
+===============================================================
+Tag name: clm4_5_05
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Tue May 14 13:15:12 MDT 2013
+One-line Summary: hcru bug fixes
+
+Purpose of changes: update pio tag and nfire init. mod
+
+Requirements for tag: N/A
+
+Test level of tag: Only run hcru_hcru tests
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, Fang and Erik for nfire problem
+
+List any svn externals directories updated (csm_share, mct, etc.): update pio to 1_7_0
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+-- nfire init. changed from nan to spval to fix problem with hcru_hcru debug + intel runs
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+
+Machines testing ran on: Only testing hcru_hcru resolutions
+
+Yellowstone Tests:
+
+ The following were run with DEBUG=TRUE and for 1 day initial + 1 day restart
+ hcru_hcru_I_2000_CRUFRC_CLM45_CN_yellowstone_gnu_pioFixed/ PASS
+ hcru_hcru_I_2000_CRUFRC_CLM45_CN_yellowstone_intel_pioFixed/ PASS
+ hcru_hcru_I_2000_CRUFRC_CLM45_CN_yellowstone_pgi_pioFixed/ PASS
+
+ ERS_D.hcru_hcru.I_2000_CRUFRC_CLM45_CN.yellowstone_intel.125102 PASS
+ ERS_D.hcru_hcru.I_2000_CRUFRC_CLM45_CN.yellowstone_pgi.125128 PASS
+
+Frankfurt Tests:
+
+ The following were run with DEBUG=TRUE and for 1 day initial + 1 day restart
+ hcru_hcru_I_2000_CRUFRC_CLM45_CN_frankfurt_pgi_pioFixed/ PASS
+ hcru_hcru_I_2000_CRUFRC_CLM45_CN_frankfurt_intel_pioFixed/ FAIL initial run (this is
+ likely related to other existing MPI problems on Frankfurt).
+
+CLM tag used for the baseline comparison tests if applicable: N/A
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_04
+Originator(s): erik (Erik Kluzek)
+Date: Mon May 13 12:25:14 MDT 2013
+One-line Summary: Fix the previous broken tag
+
+Purpose of changes:
+
+Fix the problems in the clm4_5_03 untested tag.
+
+Requirements for tag: Fix bug 1692, 1693
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID):
+ 1693 (Misc. issues with clm4_5_03)
+ 1692 (externals screwed up in clm4_5_03)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): externals updated to those in SVN_EXTERNAL_DIRECTORIES
+
+List all files eliminated: Remove test/system as replaced by CESM testing
+
+ models/lnd/clm/test/system -- Delete the whole directory tree
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/src/clm4_0/main/clm_initializeMod.F90 -- fixed screwed up code
+
+Machines testing ran on: (Tests in priority order)
+ Standard Tag Pretag *** Standard Tag Posttag **
+
+ build-namelist unit tester: yes
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel yes
+ yellowstone/aux_clm pgi yes
+ frankfurt/aux_clm intel yes
+ frankfurt/aux_clm pgi yes
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_01
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_5_03
+Originator(s): erik (Erik Kluzek)
+Date: Fri May 10 17:29:56 MDT 2013
+One-line Summary: Several bug fixes for release, urban and test single point surface datasets
+
+Purpose of changes:
+
+ Some work on IMPORTANT_NOTES file.
+ Fix PTS_MODE restarts from John Truesdale. (implimented, but there are still issues)
+ Fix history change number of tapes on startup issue.
+ Bring in urban single pt surface datasets and single pt test: mexicocityMEX, vancouverCAN , urbanc_alpha, 1x1_tropicAtl, 1x1_smallvilleIA
+ Drydep use before defined problem.
+ Always bypass first two time-steps for CN/BGC.
+ Fix gregorian calendar on history files.
+ Remove two fields on clm45 fpftdata file as per Gordon Bonan.
+ ncd_pio fix from Jim Edwards/Mariana V.
+ set nsegspc=20 for HOMME and high resolution grids.
+ Change documentation on CLM build-namelist -drydep, but keep it default on (will change to off in next tag)
+ Remove a bunch of datm/drv fields in namelist_definition.
+ Fix some issues with Crop and DV that Sam found.
+ Fix a scripts issue with multi-instance.
+ Update RTM (multi-instance fix, allow null grid).
+ Update test list so that CLM45/DV/CROP are exercised.
+ Update scripts/machines tag because of multiple problems.
+
+Requirements for tag: fix bug 1488, 1673, 1677, 1682, 1653, 1689, 1690, 1687, 1688, 1685, 1691
+
+Test level of tag: limited!
+
+Bugs fixed (include bugzilla ID):
+
+ 1025 (partial -- implement changes from John Truesdale so SCAM can read global IC files)
+ 1488 (HOMME grids can not use nsegspc=20)
+ 1653 (Calls to PIO are not properly done)
+ 1673 (B compset gregorian calendar not reflected in CLM history)
+ 1677 (Remove bypass_CN_balance_check_on_restart in CLM45)
+ 1682 (Problem starting up CLM with no history files)
+ 1685 (use before define issue in DryDeposition)
+ 1687 (SBN scripts bug)
+ 1688 (misc. issues with new create_test)
+ 1689 (CLM45 dgvm does not build)
+ 1690 (CLM45 CNDV lightning namelist is missing)
+ 1691 (Scripts issue for multi-instance)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist:
+ Set nsegspc=20 for HOMME and f05/f02 resolutions
+ Set stream_fldfilename_lightng for CLM45/CNDV
+
+List any changes to the defaults for the boundary datasets: New single-point test and urban datasets
+ New surface datsets for: mexicocityMEX, vancouverCAN, urbanc_alpha, 1x1_tropicAtl, 1x1_smallvilleIA
+ New fpftdyn for: 1x1_tropicAtl 1850-2005
+ New pft-physiology file for CLM45 with three fields removed that were NOT being read in (qe25, mp, and resist)
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, jedwards/mvertens (fix for bug 1653), jet (fix for bug 1025), slevis (fixes for DV)
+
+List any svn externals directories updated (csm_share, mct, etc.): Machines, scripts, rtm
+ Machines to Machines_130509
+ scripts to scripts4_130510
+ rtm to rtm1_0_25
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/bld/build-namelist ---- Document drydep as if it's off (will actually become off in next tag)
+ M models/lnd/clm/bld/clm.buildnml.csh -- Multi-instance bug fix.
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml ----- nsegspc for ALL grids is 20
+ M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_0.xml --- Remove datm/drv namelist crap
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml ----- nsegspc for ALL grids is 20
+ New pft-physiology file, new surface/fpftdyn datasets for single point test and urban
+ Set stream_fldfilename_lightng for CNDV.
+ M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml --- Remove datm/drv namelist crap
+
+ M models/lnd/clm/doc/IMPORTANT_NOTES -- updates
+
+ M models/lnd/clm/src/util_share/clm_time_manager.F90 - Set parameters for calendar type.
+ M models/lnd/clm/src/util_share/ncdio_pio.F90 -------- Fix so that type of data output on read is based
+ on the variable type of the data rather than the type of data on the input file. (from mvertens/jedwards)
+
+ M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90 ------ Fix so CNDV can build.
+ M models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90 ------ Remove bypass_CN_balance_check_on_restart
+ M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90 - Fix use before define error.
+ M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90 ---- On restart set fieldlist and later compare to make sure
+ not screwed up.
+ M models/lnd/clm/src/clm4_5/main/histFileMod.F90 ---------- Make htapes_fieldlist public, check calendar for output files,
+ check that namelist didn't change number of tapes or fields on restart
+ M models/lnd/clm/src/clm4_5/main/clm_driver.F90 ----------- Remove bypass_CN_balance_check_on_restart, NEVER do balance check
+ on first time-step
+
+ M models/lnd/clm/src/clm4_0/biogeochem/DryDepVelocity.F90 - Fix use before define error.
+ M models/lnd/clm/src/clm4_0/main/histFileMod.F90 ---------- Make htapes_fieldlist public, check calendar for output files,
+ check that namelist didn't change number of tapes or fields on restart
+ M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90 ---- On restart set fieldlist and later compare to make sure
+ not screwed up.
+
+Machines testing ran on: Limited! (watch out for this tag!)
+
+ I ran preliminary testing, with versions on the cbugfixclm450 ranch. We will fix other issues with the entire
+ package as we find them.
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_02
+
+Changes answers relative to baseline: No bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_5_02
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Tue May 7 21:04:35 MDT 2013
+One-line Summary: make 'shared' tools directory, and other minor tools fixes
+
+Purpose of changes:
+
+- Make separate 'shared' tools directory, move some tools from the clm4_5
+ directory into there.
+
+- Change interpinic so that htop and hbot are skipped
+
+- Change Makefile.common files in tools to use ifort by default on yellowstone,
+ so users can just type 'gmake' without needing to do 'gmake USER_FC=ifort'.
+ For simplicity, this has been implemented by defaulting to ifort for ALL Linux
+ machines.
+
+- Fix minor mksurfdata.pl bugs (1669, 1681).
+
+Requirements for tag:
+- fix bug 1669, 1681*
+- only tools testing needed
+
+Test level of tag: tools only
+
+Bugs fixed (include bugzilla ID):
+- 1669: change needed for mksurfdata.pl for smallville (or crop PFT override anyway)
+- Changes to get mksurfdata.pl working with urban single point datasets
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None for CLM; tools builds changed to
+use ifort by default on Linux machines
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated:
+
+========= Moved to 'shared' directory
+D models/lnd/clm/tools/clm4_5/mkmapdata
+D models/lnd/clm/tools/clm4_5/mkprocdata_map
+D models/lnd/clm/tools/clm4_5/ncl_scripts
+D models/lnd/clm/tools/clm4_5/mkmapgrids
+
+List all files added and what they do:
+
+========= Tools moved from clm4_5 directory to shared directory
+A models/lnd/clm/tools/shared
+A models/lnd/clm/tools/shared/mkmapdata/mvNimport.sh
+A models/lnd/clm/tools/shared/mkmapdata/rmdups.ncl
+A models/lnd/clm/tools/shared/mkmapdata/regridbatch.sh
+A models/lnd/clm/tools/shared/mkmapdata/createXMLEntries.pl
+A models/lnd/clm/tools/shared/mkmapdata/mkmapdata.sh
+A models/lnd/clm/tools/shared/mkmapdata/mkunitymap.ncl
+A models/lnd/clm/tools/shared/mkmapdata/mknoocnmap.pl
+A models/lnd/clm/tools/shared/mkmapdata/README
+A models/lnd/clm/tools/shared/mkmapdata
+A models/lnd/clm/tools/shared/mkprocdata_map/clm4054_ne30g16_I2000.clm2.h0.2000-01_c121107.nc
+A models/lnd/clm/tools/shared/mkprocdata_map/map_ne30np4_nomask_to_fv1.9x2.5_nomask_aave_da_c121107.nc
+A models/lnd/clm/tools/shared/mkprocdata_map/mkprocdata_map_functions.bash
+A models/lnd/clm/tools/shared/mkprocdata_map/src/mkprocdata_map.F90
+A models/lnd/clm/tools/shared/mkprocdata_map/src/gridmapMod.F90
+A models/lnd/clm/tools/shared/mkprocdata_map/src/constMod.F90
+A models/lnd/clm/tools/shared/mkprocdata_map/src/Makefile.common
+A models/lnd/clm/tools/shared/mkprocdata_map/src/fmain.F90
+A models/lnd/clm/tools/shared/mkprocdata_map/src/shr_file_mod.F90
+A models/lnd/clm/tools/shared/mkprocdata_map/src/nanMod.F90
+A models/lnd/clm/tools/shared/mkprocdata_map/src/Mkdepends
+A models/lnd/clm/tools/shared/mkprocdata_map/src/Srcfiles
+A models/lnd/clm/tools/shared/mkprocdata_map/src/Filepath
+A models/lnd/clm/tools/shared/mkprocdata_map/src/Makefile
+A models/lnd/clm/tools/shared/mkprocdata_map/src/fileutils.F90
+A models/lnd/clm/tools/shared/mkprocdata_map/src/shr_kind_mod.F90
+A models/lnd/clm/tools/shared/mkprocdata_map/src
+A models/lnd/clm/tools/shared/mkprocdata_map/mkprocdata_map_in
+A models/lnd/clm/tools/shared/mkprocdata_map/mkprocdata_map_all
+A models/lnd/clm/tools/shared/mkprocdata_map/clm
+A models/lnd/clm/tools/shared/mkprocdata_map/mkprocdata_map_wrap
+A models/lnd/clm/tools/shared/mkprocdata_map/clm4054_f19g16_I2000.clm2.h0.2000-01_c121107.nc
+A models/lnd/clm/tools/shared/mkprocdata_map/README
+A models/lnd/clm/tools/shared/mkprocdata_map
+A models/lnd/clm/tools/shared/ncl_scripts/cprnc.pl
+A models/lnd/clm/tools/shared/ncl_scripts/getco2_historical.ncl
+A models/lnd/clm/tools/shared/ncl_scripts/cprnc.ncl
+A models/lnd/clm/tools/shared/ncl_scripts/getregional_datasets.pl
+A models/lnd/clm/tools/shared/ncl_scripts/getregional_datasets.ncl
+A models/lnd/clm/tools/shared/ncl_scripts/README
+A models/lnd/clm/tools/shared/ncl_scripts
+A models/lnd/clm/tools/shared/mkmapgrids/src/Makefile.common
+A models/lnd/clm/tools/shared/mkmapgrids/src/domainMod.F90
+A models/lnd/clm/tools/shared/mkmapgrids/src/shr_sys_mod.F90
+A models/lnd/clm/tools/shared/mkmapgrids/src/shr_file_mod.F90
+A models/lnd/clm/tools/shared/mkmapgrids/src/nanMod.F90
+A models/lnd/clm/tools/shared/mkmapgrids/src/shr_log_mod.F90
+A models/lnd/clm/tools/shared/mkmapgrids/src/Mkdepends
+A models/lnd/clm/tools/shared/mkmapgrids/src/Srcfiles
+A models/lnd/clm/tools/shared/mkmapgrids/src/mkmapgrids.F90
+A models/lnd/clm/tools/shared/mkmapgrids/src/Filepath
+A models/lnd/clm/tools/shared/mkmapgrids/src/Makefile
+A models/lnd/clm/tools/shared/mkmapgrids/src/shr_kind_mod.F90
+A models/lnd/clm/tools/shared/mkmapgrids/src
+A models/lnd/clm/tools/shared/mkmapgrids/mkmapgrids.namelist
+A models/lnd/clm/tools/shared/mkmapgrids/mkscripgrid.ncl
+A models/lnd/clm/tools/shared/mkmapgrids/mkmapgrids.csh
+A models/lnd/clm/tools/shared/mkmapgrids/README
+A models/lnd/clm/tools/shared/mkmapgrids
+
+========= Add test for mkmapdata using '-p clm4_0'
+A models/lnd/clm/test/tools/nl_files/mkmapdata_ne30np4_clm4_0
+
+List all existing files that have been modified, and describe the changes:
+
+========= Point to new 'shared' tools directory where appropriate
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata.pl
+ - also fix mksurfdata.pl for crop PFT override (bug 1669)
+ - also changes to get mksurfdata.pl working with urban single point datasets (bug 1681)
+M models/lnd/clm/tools/clm4_0/mksurfdata_map/mksurfdata.pl
+ - also fix mksurfdata.pl for crop PFT override (bug 1669)
+M models/lnd/clm/test/tools/TBLCFGtools.sh
+M models/lnd/clm/test/tools/TOPtools.sh
+M models/lnd/clm/test/tools/TBLscript_tools.sh
+M models/lnd/clm/test/tools/TBLtools.sh
+M models/lnd/clm/test/tools/input_tests_master
+ - also add test for mkmapdata using '-p clm4_0'
+M models/lnd/clm/tools/README
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/README.developers
+
+========= Put gen_domain in 'shared' tools directory
+M SVN_EXTERNAL_DIRECTORIES
+
+========= Use ifort by default on yellowstone (and other Linux machines)
+M models/lnd/clm/tools/clm4_5/interpinic/src/Makefile.common
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/Makefile.common
+M models/lnd/clm/tools/clm4_0/interpinic/src/Makefile.common
+M models/lnd/clm/tools/clm4_0/mksurfdata_map/src/Makefile.common
+
+========= Change interpinic so that htop and hbot are skipped
+M models/lnd/clm/tools/clm4_5/interpinic/src/interpinic.F90
+
+
+Machines testing ran on: (Tests in priority order)
+ build-namelist unit tester: no
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel no
+ frankfurt/aux_clm_int intel no
+ yellowstone/aux_clm pgi no
+ frankfurt/aux_clm intel no
+
+ test_driver.sh tools testing:
+
+ lynx interactive: no
+ yellowstone interactive: yes
+
+ All PASS except for the following expected failures (note that 006 & 008
+ baselines are expected to always fail):
+
+ 006 ble14 TBLCFGtools.sh shared gen_domain CFGtools__ds T31.runoptions .......................... rc=4 FAIL
+ 008 ble@4 TBLCFGtools.sh shared gen_domain CFGtools__ds ne30.runoptions ......................... rc=4 FAIL
+ 027 smiS4 TSMscript_tools.sh shared ncl_scripts getregional_datasets.pl getregional ............. rc=6 FAIL
+ 028 bliS4 TBLscript_tools.sh shared ncl_scripts getregional_datasets.pl getregional ............. rc=4 FAIL
+
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_5_01
+
+Changes answers relative to baseline: NO
+
+===============================================================
+===============================================================
+Tag name: clm4_5_01
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Mon May 6 16:52:27 MDT 2013
+One-line Summary: update externals
+
+Purpose of changes: update externals to alpha08b
+
+Requirements for tag: N/A
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+< scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_130502
+< scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130502
+< models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_2_33
+---
+> scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_130422
+> scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130412
+> models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_2_29
+13c13
+< models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_130502
+---
+> models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_130405
+16c16
+< models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_130423
+---
+> models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_130226
+18c18
+< models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_130417
+---
+> models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_130214
+20c20
+< models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_6_9/pio
+---
+> models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_6_7/pio
+23,25c23,25
+< tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_130425
+< tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130426a
+< models/lnd/clm/tools/clm4_5/gen_domain https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130426a/gen_domain_files
+---
+> tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_130411
+> tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130403
+> models/lnd/clm/tools/clm4_5/gen_domain https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130403/gen_domain_files
+
+List all files eliminated:N/A
+
+List all files added and what they do:N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+- clean up test list
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: OK
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel OK (detail of fails that should pass next time)
+
+BFAIL ERI_D.f10_f10.I20TRCN.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- no baseline, should pass next round
+FAIL ERS.f19_g16_r01.I1850CLM45CN4Me.nldir_rtmOnFloodOnEffvelOff.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- No matching time found in cprnc? should pass next time.
+FAIL ERS_D.f19_g16.ICLM45GLCMEC.nldir_glcMEC.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- changes in g2x_Sg_frac01 and g2x_Sg_topo01
+FAIL ERS_D.f19_g16.ICLM45GLCMEC.nldir_glcMEC.yellowstone_intel.GC.170226.nlcomp
+ -- changes in cism namelist and cism config
+FAIL ERS_D.f19_g16.IGRCP26CLM45CN.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- changes in g2x_Sg_frac01 and g2x_Sg_topo01
+FAIL ERS_D.f19_g16.IGRCP26CLM45CN.yellowstone_intel.GC.170226.nlcomp
+ -- changes in cism namelist and cism config
+FAIL ERS_D.f19_g16.IGRCP26CN.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- changes in g2x_Sg_frac01 and g2x_Sg_topo01
+FAIL ERS_D.f19_g16.IGRCP26CN.yellowstone_intel.GC.170226.nlcomp
+ -- changes in cism namelist and cism config
+BFAIL ERS_Ld3_D_P64x1.ne30_g16.ICLM45CN.nldir_default.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- no baseline, should pass next round
+BFAIL ERS_Ld3_D_P64x16.ne30_g16.ICN.nldir_default.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- no baseline, should pass next round
+FAIL ERS_Lm3.f19_g16.IGRCP60CN.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- changes in g2x_Sg_frac01 and g2x_Sg_topo01
+FAIL ERS_Lm3.f19_g16.IGRCP60CN.yellowstone_intel.GC.170226.nlcomp
+ -- changes in cism namelist and cism config
+BFAIL PET_D_P1x30.ne30_g16.ICN.nldir_default.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- no baseline, should pass next round
+BFAIL SMS_RLA.f45_f45.I.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- no baseline, should pass next round
+BFAIL SMS_RLA.f45_f45.ICLM45.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- no baseline, should pass next round
+BFAIL SMS_ROA.f45_f45.I.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- no baseline, should pass next round
+BFAIL SMS_ROA.f45_f45.ICLM45.yellowstone_intel.GC.170226.compare_hist.clm4_0_81
+ -- no baseline, should pass next round
+
+ yellowstone/aux_clm pgi OK (detail of fails that should pass next time)
+
+FAIL ERI.f19_g16.IG1850.yellowstone_pgi.GC.170137.compare_hist.clm4_0_81
+ -- changes in g2x_Sg_frac01 and g2x_Sg_topo01
+FAIL ERI.f19_g16.IG1850.yellowstone_pgi.GC.170137.nlcomp
+ -- changes in cism namelist and cism_config
+FAIL ERI.f19_g16.IG1850CLM45.yellowstone_pgi.GC.170137.compare_hist.clm4_0_81
+ -- changes in g2x_Sg_frac01 and g2x_Sg_topo01
+FAIL ERI.f19_g16.IG1850CLM45.yellowstone_pgi.GC.170137.nlcomp
+ -- changes in cism namelist and cism config
+BFAIL ERI_D.f10_f10.I20TRCN.yellowstone_pgi.GC.170137.compare_hist.clm4_0_81
+ -- no baseline, should pass next round
+FAIL SMS.T31_g37.IG4804.yellowstone_pgi.GC.170137.compare_hist.clm4_0_81
+FAIL SMS.T31_g37.IG4804.yellowstone_pgi.GC.170137.nlcomp
+ -- changes in cism namelist and cism config
+ -- changes in drv_in (ocn_ntreades=2)
+FAIL SMS.T31_g37.IG4804CLM45.yellowstone_pgi.GC.170137.compare_hist.clm4_0_81
+ -- changes in g2x_Sg_frac01 and g2x_Sg_topo01
+BFAIL SMS_RLB.f45_f45.I.yellowstone_pgi.GC.170137.compare_hist.clm4_0_81
+ -- no baseline, should pass next round
+BFAIL SMS_RLB.f45_f45.ICLM45.yellowstone_pgi.GC.170137.compare_hist.clm4_0_8
+ -- no baseline, should pass next round
+
+ frankfurt/aux_clm intel OK
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_81
+
+Changes answers relative to baseline: yes
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: only changes in g2x_Sg_frac01 and g2x_Sg_topo01
+ - what platforms/compilers: all
+
+===============================================================
+===============================================================
+Tag name: clm4_5_00
+Originator(s): erik (Erik Kluzek)
+Date: Thu May 2 00:20:17 MDT 2013
+One-line Summary: Official end to CLM4.5 development for CLM offline
+
+Purpose of changes: Changes from clm4_0_54 to now...
+
+Compsets and Scripts Changes:
+
+Remove ability to set compset file on command line, and use a new expanded compset file definition that
+allows user to create many compsets on the fly by defining a long name with the "-user_compset" option
+to "create_newcase". "-user_compset" is in the form of...
+
+TIME_DATM[%phys]_CLM[40|45][%phys]_SICE_SOCN_RTM[%phys]_GLC[%phys]_SWAV[_BGC%phys]
+
+Where
+ TIME = Time period (e.g. 2000, 20TR, RCP8...)
+ GLC = [CISM1, SGLC]
+ BGC = optional BGC scenario
+The OPTIONAL %phys attributes specify submodes of the given system
+
+So for example
+
+./create_newcase -user_compset 1850_DATM%CRU_CLM45%BGC_SICE_SOCN_RTM_SGLC_SWAV -case cru1850 -res f19_g16 -mach yellowstone -compiler intel
+
+will setup a 1850 case at f19 resolution with CRUNCEP forcing with CLM4.5-BGC.
+
+Changes for both clm4.0 and clm4.5:
+
+* Bug fixes in MEGAN VOC emission fluxes and dry deposition velocities
+* CRUNCEP is now an option for atmospheric forcing
+* Change from Sam Levis for CROP to pft-physiology file so that CROP parameter is in Kelvin rather than Celsius.
+* Don't re-weight pftdyn if weights are essentially identical.
+
+CLM4.5 includes the following:
+
+* Bring in flood capability to RTM.
+* Bring LBNL-merge branch on with: vertical soil, Methane, CENTURY, split nitrification, new-lake model.
+* Modifications to GPP, on gppdev branch, multilayer canopy and then single-layer version that reproduces it.
+* Crop model updates. Irrigation included with crop model as an option. Fix CNDV-CROP.
+* Urban model updates, multi-density, urban depth seperate from soil depth, wasteheat to zero.
+* Bring in permafrostsims09 branch with Sean Swensons's flooding changes.
+* Update pft-physiology file, change some CN defaults, change min flow slightly in RTM.
+* Set ponding to zero, acclimation mods from Keith Oleson, a hydrology change from Sean Swenson.
+* Add active flags, change subgrid weighting convention.
+* Turn off subgrid topography snow parameterization for glc_mec landunits.
+* Jinyun photosynthesis change impacting arid regions.
+* Keith Oleson's photosynthesis change, changes canopy top: triose phosphate util. rate to be dependent on vcmax.
+* VIC hydrology is an option.
+* Update mksurfdata_map for CLM4.5 (also add support for glc_nec=36 although we have no datasets for this).
+* Snow depth averaged over grid-cell (SNOWDP) on history file changed in favor of SNOW_DEPTH (averaged only over snow covered area).
+* Spinup changes from Charlie Koven from build-time to run-time (spinup now option added to CLM_BLDNML_OPTS as "-spinup on|off").
+* Bring the F. Li and S. Levis Fire model for CLMCN and CLMBGC based on Li et al. (2012a,b; 2013).
+* BSW calculation changed affecting drought phenology and frozen temperature sensitivity (SP, CN, and BGC as well as DV)
+
+Test level of tag: doc
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+ Changes to CLM configure:
+ -phys option to specify clm4_0 or clm4_5
+ -pergro and -c13 option removed
+ -spinup option removed for CLM4_0
+ New options for clm4_5: -clm4me, -vichydro, -exlaklayers, -vsoilc_centbgc
+
+Describe any changes made to the namelist:
+ For CLM4.0: WRF and 360x720cru resolutions added
+ For CLM4.5: new namelists: popd_streams light_streams clm_hydrology1_inparm clm_soilhydrology_inparm
+ irrigate is a namelist option rather than using different surface datasets
+ New namelist items for clm_inparm:
+ anoxia no_frozen_nitrif_denitrif
+ atm_c14_filename override_bgc_restart_mismatch_dump
+ cryoturb_diffusion_k perchroot
+ decomp_depth_efolding perchroot_altk
+ deepmixing_depthcrit pftspecific_rootingprofile
+ deepmixing_mixfact rootprof_exp
+ exponential_rooting_profile rootprof_exp
+ froz_q10 som_adv_flux
+ hist_wrtch4diag som_diffus
+ lake_melt_icealb spinup_state
+ max_altdepth_cryoturbation surfprof_exp
+ max_depth_cryoturb use_c13
+ more_vertlayers use_c14
+ nfix_timeconst use_c14_bombspike
+
+
+List any changes to the defaults for the boundary datasets:
+ All CLM4.5 datasets are new.
+ For CLM4.0, new ne120, ne240, and 360x720cru surface datasets (ne120 ne120 finidat files)
+ new pft-physiology file
+
+New history fields:
+ Dozens of new fields for clm4_5.
+ Three new fields for clm4_0:
++ >>>>>>>>>>> Set first and last pop-dens year, and do "arb_ic" rather than "startup"
+>>>>>>>>>>>> type for all transient cases (allow transient cases to do a cold-start)
+>>>>>>>>>>>> ALTHOUGH YOU SHOULD NEVER DO A COLD START FOR A TRANSIENT CASE!
+ M models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/20thC_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/2000_glacierMEC_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850_glacierMEC_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_glacierMEC_transient.xml
+
+>>>>>>>>>>>> Some small changes to documentation about irrigation.
+ M models/lnd/clm/doc/UsersGuide/preface.xml
+ M models/lnd/clm/doc/UsersGuide/custom.xml
+
+ M models/lnd/clm/src/util_share/ndepStreamMod.F90 -- make default private, namelist data private, and clm_domain_mct public
+
+ M models/lnd/clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90 --- add number of individuals
+ M models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate3Mod.F90 -- handle more impacts of fire
+ M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90 ----------- Fire module -- almost entirely replaced. Two new public
+ methods added:
++ public :: CNFireInit ! Initialization of CNFire
++ public :: CNFireInterp ! Interpolate fire data
+ M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90 -------- Handle more impacts of fire
+ M models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate3Mod.F90 -- Handle more impacts of fire
+ M models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90 ----------- burndate, lfc, wf, btran2, col_ctrunc, totsomc added to restart
+ old fire fields removed, _vr fields required if expected.,
+ M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90 --- Add CNFireInit, and update CNFireArea call.
+ M models/lnd/clm/src/clm4_5/biogeochem/CNSetValueMod.F90 ------- Set fire variables.
+ M models/lnd/clm/src/clm4_5/biogeochem/CNVegStructUpdateMod.F90 farea_burned impacts SAI for stubble after harvest
+ M models/lnd/clm/src/clm4_5/main/CNiniTimeVar.F90 -------------- Initialize new fire variables
+ M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90 --------- Update CNEcosystemDynInit call
+ M models/lnd/clm/src/clm4_5/main/accFldsMod.F90 ---------------- Add prec10 and prec60 (10 and 60 day total precipitation)
+ M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90 ------------ Initialize new fire variables.
+ M models/lnd/clm/src/clm4_5/main/pftdynMod.F90 ----------------- Update lf_conv_cflux, make PFT weight check same as for surfrdMod.F90
+ M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90 -------------- Read in new fire data, abort if data needed NOT found on the
+ surface dataset.
+ M models/lnd/clm/src/clm4_5/main/findHistFields.pl ------------- Also read in CNFireMod for history fields.
+ M models/lnd/clm/src/clm4_5/main/clm_driver.F90 ---------------- Add CNFireInterp call.
+ M models/lnd/clm/src/clm4_5/main/CNiniSpecial.F90 -------------- Initialize lf_conv_cflux to zero.
+ M models/lnd/clm/src/clm4_5/main/pftvarcon.F90 ----------------- Read in new fire parameters
+ (no longer need "resist" on the pft-physiology file)
+ M models/lnd/clm/src/clm4_5/main/surfrdMod.F90 ----------------- Fix tolerances to match mksurdata_map
+ bug fix for non-irrigated crop.
+ M models/lnd/clm/src/clm4_5/main/clmtype.F90 ------------------- New fire fields
+ M models/lnd/clm/src/clm4_5/main/mkarbinitMod.F90 -------------- Initialize some new fire fields: tsoi17, fsat
+ M models/lnd/clm/src/clm4_5/main/histFldsMod.F90 --------------- New fire history fields
+ M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90 ----- Save btran2, smp_node_lf for fire
+ M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90 ------- Save wf2, tsoi17, h2osoi_liqice_10cm
+
+ M models/lnd/clm/src/clm4_0/main/surfrdMod.F90 ----------------- Fix tolerances to match mksurdata_map
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: yes
+
+ CESM test lists:
+
+ yellowstone/aux_clm intel yes
+ yellowstone/aux_clm pgi yes
+ frankfurt/aux_clm_int intel yes
+ frankfurt/aux_clm intel yes
+
+ test_driver.sh tools testing:
+
+ lynx interactive: no
+ yellowstone interactive: no
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_79
+
+Changes answers relative to baseline: YES!
+
+ Summarize any changes to answers:
+ - what code configurations: All with CLM45
+ - what platforms/compilers: All
+ - nature of change: new climate
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ /glade/p/cesm/lmwg/cases/cesm1/C20new -- on yellowstone
+ /glade/scratch/erik/archive/clm4079_NewFire10f19_CRU_20TR_CN4Me
+ /glade/scratch/erik/archive/clm4077_I1850CLM45CN4Me
+ /glade/scratch/erik/archive/clm4077_NewFire10f19_QIAN_20TR_CN4Me
+ hsi:/home/fangli/qian20
+
+===============================================================
+===============================================================
+Tag name: clm4_0_79
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Wed Apr 24 20:27:01 MDT 2013
+One-line Summary: pftdyn, pft-phys*.nc and datm8 update
+
+Purpose of changes: From Erik:: - update Don't re-weight pftdyn if weights are essentially identical (Both CLM40 AND CLM45).
+ - Turn wasteheat to "ON" in CLM45. (namelist change) (done)
+ - Change from Sam Levis for CROP to pft-physiology file so that CROP parameter is in Kelvin rather than Celsius (both CLM40 and CLM45).
+ - Change datm so that LWDN is NOT read from files for CRUNCEP (datm8_130424).
+
+Requirements for tag: fix bug 1621
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID): fixed 1621
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: turn waste heat on in clm4_5
+
+List any changes to the defaults for the boundary datasets: change pft-phys files for 4_0 and 4_5
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, Erik, Dave Lawrence
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+< models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_130424
+---
+> models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_130325
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+ update to datm8_130424
+
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml
+ - $CSMDATA/lnd/clm2/pftdata/pft-physiology.clm40.c130424.nc
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+ - change ON_WASTEHEAT to ON
+ - use $CSMDATA/lnd/clm2/pftdata/pft-physiology.c130424.nc
+
+M models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+M models/lnd/clm/src/clm4_0/main/pftdynMod.F90
+ - change wtpfttot2 check
+
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+ - syntax clean up - caught by Ben Andre
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: OK. many expected failures due to new pft-physiology files. Should pass next time.
+
+ CESM test lists:
+ Many nlcomp failures:
+ clm4_5:
+ NEW: fpftcon = '/fs/cgd/csm/inputdata/lnd/clm2/pftdata/pft-physiology.c130424.nc'
+ BASELINE: fpftcon = '/fs/cgd/csm/inputdata/lnd/clm2/pftdata/pft-physiology.c121025.nc'
+ NEW: urban_hac = 'ON'
+ BASELINE: urban_hac = 'ON_WASTEHEAT'
+ clm4_0:
+ NEW: fpftcon = '/fs/cgd/csm/inputdata/lnd/clm2/pftdata/pft-physiology.clm40.c130424.nc'
+ BASELINE: fpftcon = '/fs/cgd/csm/inputdata/lnd/clm2/pftdata/pft-physiology.c110425.nc'
+
+ component_gen_comp: all PASS or BFAIL1
+ summarize_cprnc_diffs: differences in CLM files
+
+ yellowstone/CESM:
+ intel: OK. Expected failures for compare_hist (should pass next time)
+ FAIL ERS_D.f19_g16.IGRCP26CN.yellowstone_intel.GC.222079.compare_hist.clm4_0_78
+ FAIL ERS_D.f19_g16.IRCP85CN.yellowstone_intel.GC.222079.compare_hist.clm4_0_78
+ FAIL ERS_E.f19_g16.I1850CRUCLM45CN.yellowstone_intel.GC.222079.compare_hist.clm4_0_78
+ FAIL ERS_Lm3.f19_g16.IGRCP60CN.yellowstone_intel.GC.222079.compare_hist.clm4_0_78
+ FAIL NCK.f10_f10.ICRUCLM45.yellowstone_intel.GC.222079.compare_hist.clm4_0_78
+ FAIL PET_PT.f10_f10.I20TRCN.yellowstone_intel.GC.222079.compare_hist.clm4_0_78
+ FAIL SMS.f19_g16.IRCP45CN.yellowstone_intel.GC.222079.compare_hist.clm4_0_78
+ pgi : OK. Expected failures for compare_hist (should pass next time)
+ FAIL ERS_D.hcru_hcru.I_2000_CRUFRC_CLM45_CN.yellowstone_pgi.GC.111079.compare_hist.clm4_0_78
+ FAIL ERS_D.hcru_hcru.I_2000_CRUFRC_CN.yellowstone_pgi.GC.111079.compare_hist.clm4_0_78
+ FAIL PET_PT.f10_f10.I20TRCN.yellowstone_pgi.GC.111079.compare_hist.clm4_0_78
+
+ frankfurt/CESM:
+ intel: OK. Expected failures for compare_hist (should pass next time)
+ FAIL SMS.f10_f10.IRCP26CN.frankfurt_intel.GC.pft79.compare_hist.clm4_0_78
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_78
+
+Changes answers relative to baseline: yes
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: all
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate): same climate
+ -in coupler history files: l2x_Sl_*, l2x_Fall, x2l_Slrr* and some r2x_* fields change
+ -in clm history files: for these tests only differences seen in IGRCP60CN
+
+===============================================================
+===============================================================
+Tag name: clm4_0_78
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Tue Apr 23 19:25:18 MDT 2013
+One-line Summary: MEGAN fixes
+
+Purpose of changes:
+
+ - Bug fixes in MEGAN VOC emission fluxes and dry deposition velocities
+ - Remove the land fraction weighting from MEGAN history fields
+ - Added XPAN capability to dry deposition parametrization
+
+Requirements for tag: N/A
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, starting branch from Erik.
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_0.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+ - maximum string length of megan_specifier increased to 1024 characters
+
+ M models/lnd/clm/src/clm4_5/biogeochem/VOCEmissionMod.F90
+ M models/lnd/clm/src/clm4_0/biogeochem/VOCEmissionMod.F90
+ - land fraction weighting has been removed from the MEGAN diagnostics
+ - added initialization of the vocflx_meg array to zero to prevent
+ erroneous values from contributing to the MEGAN emissions
+
+ M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+ M models/lnd/clm/src/clm4_0/biogeochem/DryDepVelocity.F90
+ - corrected surface pressure
+ - added XPAN specification
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: OK
+
+ yellowstone/CESM:
+ -intel: OK
+ -component_gen_comp fails on a number of tests, but it is all expected. summarize_cprnc_diffs verifies that all fails in the
+ CLM history files is consistent.
+ -compare_hist failures are expected due to new dry deposition values going through coupler.
+ -pgi : OK
+ -compare_hist failures are expected
+
+ frankfurt/CESM:
+ -intel: OK.
+ -compare_hist failures are expected
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_77
+
+Changes answers relative to baseline: Yes. Changes in l2x_Sl_dd{001-035},l2x_Sl_dd{040,041,043} in coupler hist file.
+ Possible changes in *_voc fields in coupler hist files for certain configurations. Changes in 10 MEG_* fields and VOCFLXT in CLM history files.
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate): same climate. changes in CLM fields range from 1.e-9
+ (VOCFLXT) to 1.e-17 (MEG_thujene_a)
+
+===============================================================
+===============================================================
+Tag name: clm4_0_77
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Tue Apr 23 11:38:45 MDT 2013
+One-line Summary: fix carbon balance bug in transient runs with VERTSOI, and fix Soil Hydrology bug
+
+Purpose of changes:
+
+Fix two bugs:
+
+(1) In transient CLM45 runs with VERTSOI, a carbon balance error
+ occurred due to two routines being called with updated filters
+ when they should have been called with filters set at their values
+ from the previous time step. This bug has existed since clm4_0_62.
+
+(2) A potential for an array out-of-bounds error (which could show up
+ as garbage results if array bounds checking was off) which showed
+ up in rare circumstances (e.g., a single grid cell in Greenland in
+ a 1-year test run)
+
+Requirements for tag: Fix bugs 1663, 1664
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID):
+
+ 1663 (array bounds error in SoilHydrologyMod)
+ 1664 (carbon balance errors)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+========= Fix for bug 1664
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+
+========= Fix for bug 1663
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+
+Machines testing ran on: (Tests in priority order)
+ Standard Tag Pretag *** Standard Tag Posttag **
+
+ build-namelist unit tester: no
+
+ CESM test lists:
+
+ yellowstone/CESM: yes
+
+ All PASS or xFAIL; all component_gen_comp comparisons pass or BFAIL1
+
+ yellowstone/CESM/allIcompsets: no
+
+ frankfurt/CESM: yes
+
+ All PASS or xFAIL
+
+ test_system testing:
+
+ yellowstone batch: no
+ frankfurt interactive: no
+ yellowstone interactive: no
+ lynx batch: no
+ lynx interactive: no
+ frankfurt batch: no
+
+ test_driver.sh tools testing:
+
+ lynx interactive: no
+ yellowstone interactive: no
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_76
+
+Changes answers relative to baseline: YES, potentially (though none
+observed in standard testing)
+
+ Answer changes are possible in any CLM45 case due to the SoilHydrology
+ bug fix (1663). This changes answers only in rare situations where the
+ water table comes near the surface (e.g., in a 1-year test run, this only
+ happened in one grid cell in Greenland)
+
+ In addition, the following answer changes are expected due to the fix for
+ bug 1664:
+
+ (1) CLM45 transient with VERTSOI (i.e., BGC). Implementing this change in
+ clm4_0_62 (the first tag that exhibited bug 1664), clm4_0_62-withFix
+ was identical to clm4_0_61 for this configuration. But clm4_0_77 will
+ differ from clm4_0_76 for this configuration.
+
+ (2) Answers are changed for CLM45 CNDV with VERTSOI (i.e., BGCDV). It
+ appears that this configuration was buggy before this tag (e.g.,
+ restarts weren't exact), so this tag changes answers in a way that
+ seems to fix this configuration.
+
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? NOT DONE
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_0_76
+Originator(s): muszala (Stefan Muszala)
+Date: Mon Apr 22 13:36:26 MDT 2013
+One-line Summary: spinup changes from Charlie Koven (part 1)
+
+Purpose of changes:
+
+Remove SPINUP CPP tokens, in favor of a run-time namelist item that can be set to
+change the spinup mode on the fly. The state is stored on the restart file, and
+if the user changes the mode on the namelist -- the model will automatically do
+the "ENTER-SPINUP" or "EXIT-SPINUP" step as needed on the first time-step. The
+spinup options were thus removed from the CLM configure for CLM45 and moved to
+the build-namelist as option "-spinup" with values either "on" or "off".
+
+
+Add new history fields: TOTLITC_1m, TOTSOMC_1m, TOTLITN_1m, and TOTSOMN_1m.
+Remove the namelist item: reset_permafrost_c_n_pools. Removed default history
+output for decomposing C pool changes due to vertical transport, and for vertical
+ profiles for N Deposition and fixation.
+
+
+Answer Changes for C13: C13 and C13 Carbon isotopes are handled a bit differently
+ when they are NOT on the restart file. For C13 prior timestep's downregulation
+is used in calculating ci used for photosynthetic discrimination. This changes
+ answers when use_c13 is turned on.
+
+Requirements for tag: N/A
+
+Test level of tag: std-tag
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: Change the way spin up is handled
+
+Describe any changes made to the namelist: spinup now controlled in build-namelist
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: Dave L. checked spinup test comparing old and new method. Erik, Charlie Koven
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+-scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_130416a
++scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_130419a
+
+List all files eliminated: N/A
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/build-namelist
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+M models/lnd/clm/bld/configure
+M models/lnd/clm/bld/config_files/config_definition.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeMod_BGC.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeMod_CENTURY.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSetValueMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNC14DecayMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+M models/lnd/clm/src/clm4_5/main/CNiniTimeVar.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+M SVN_EXTERNAL_DIRECTORIES
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: OK.
+ some new failures that should be gone when compared to the next tag. This is due to the new namelist variable
+
+ < spinup_state = 0
+
+ 418/444 < FAIL>
+ 423/444 < FAIL>
+ 428/444 < FAIL>
+ 433/444 < FAIL>
+ 438/444 < FAIL>
+ 443/444 < FAIL>
+
+ CESM test lists:
+
+ yellowstone/CESM: SPM - tracking tputcomp failures
+ cesm intel: OK
+ FAIL ERS.f09_g16.ICLM45VIC.nldir_vic_vrtlay.yellowstone_intel.GC.222075.tputcomp.clm4_0_74
+ FAIL ERS_D.f10_f10.I.yellowstone_intel.GC.222075.tputcomp.clm4_0_74
+ FAIL ERS_D.f10_f10.ICLM45.yellowstone_intel.GC.222075.tputcomp.clm4_0_74
+ FAIL ERS_D.f10_f10.ICLM45CN4Me.nldir_ch4_set2_ciso.yellowstone_intel.GC.222075.tputcomp.clm4_0_74
+ FAIL ERS_D.f10_f10.ICLM45CN4Me.nldir_ch4_set3_pftroot.yellowstone_intel.GC.222075.tputcomp.clm4_0_74
+ FAIL ERS_D.f10_f10.ICLM45CN4MeNoVS.nldir_rootlit.yellowstone_intel.GC.222075.tputcomp.clm4_0_74
+ FAIL ERS_E.f19_g16.I1850.yellowstone_intel.GC.222075.tputcomp.clm4_0_74
+ FAIL ERS_Lm3.f19_g16.IGRCP60CN.yellowstone_intel.GC.222075.tputcomp.clm4_0_74
+ cesm pgi : OK
+ FAIL SMS.1x1_numaIA.ICNCROP.yellowstone_pgi.GC.111075.tputcomp.clm4_0_74
+
+ frankfurt/CESM:
+ cesm intel: OK
+ FAIL ERS.f45_g37.I1850CN.frankfurt_intel.GC.00075.tputcomp.clm4_0_74
+ FAIL SMS.f10_f10.IRCP26CN.frankfurt_intel.GC.00075.tputcomp.clm4_0_74
+ FAIL SMS_D.1x1_mexicocityMEX.I.frankfurt_intel.GC.00075.tputcomp.clm4_0_74
+
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_75
+
+Changes answers relative to baseline: some changes
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+Answer Changes for C13: C13 and C13 Carbon isotopes are handled a bit differently
+ when they are NOT on the restart file. For C13 prior timestep's downregulation
+is used in calculating ci used for photosynthetic discrimination. This changes
+ answers when use_c13 is turned on.
+
+===============================================================
+===============================================================
+Tag name: clm4_0_75
+Originator(s): muszala (Stefan Muszala)
+Date: Fri Apr 19 16:13:42 MDT 2013
+One-line Summary: run propset
+
+Purpose of changes: run propset so externals are updated
+
+Requirements for tag:N/A
+
+Test level of tag: critical
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:N/A
+
+Describe any changes made to the namelist:N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:N/A
+
+Machines testing ran on: no testing run
+===============================================================
+===============================================================
+Tag name: clm4_0_74
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Wed Apr 17 15:58:50 MDT 2013
+One-line Summary: snow_depth changes, major scripts overhaul, small fix for tools
+
+Purpose of changes: bring in snow_depth changes (bfb except for one field in clm hist files, SNOWDP)
+ update external to alpha06e and bring in scripts refactoring by mvertens. Bug fix for mksurfdata_map
+ by sacks. Some minor code cleanup by muszala.
+
+Requirements for tag: N/A
+
+Test level of tag: doc, std-test + tools
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): Bug in pio1_6_6 which kills mpi-serial runs, jedwards is working on fix.
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self, mvertens, swensoc
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+< scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_130416a
+> scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/newcompsets2_tags/newcompsets2_02_scripts4_130405a
+< scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130412
+> scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130403
+< models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_2_29
+> models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_2_26
+< models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_6_6/pio
+> models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_6_5/pio
+< tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_130411
+< tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130403
+> mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130308
+< models/lnd/clm/tools/clm4_5/gen_domain https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130403/gen_domain_files
+> models/lnd/clm/tools/clm4_5/gen_domain https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130308/gen_domain_files
+
+List all files eliminated: models/lnd/clm/tools/SVN_EXTERNAL_DIRECTORIES (moved cprnc to common location).
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+SVN_EXTERNAL_DIRECTORIES
+
+--small fix from sacks.
+models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkglcmecMod.F90
+models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkglcmecMod.F90
+
+--update xFail list since we now run test_system tests out of cesm and scripts
+--test_system now uses create_test
+models/lnd/clm/test/system/test_system
+models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+--minor clean up
+models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90
+models/lnd/clm/src/util_share/organicFileMod.F90
+models/lnd/clm/src/util_share/decompInitMod.F90
+
+--snow depth changes
+models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+models/lnd/clm/src/clm4_5/biogeochem/STATICEcosysDynMod.F90
+models/lnd/clm/src/clm4_5/biogeochem/CNVegStructUpdateMod.F90
+models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90
+models/lnd/clm/src/clm4_5/main/clm_driver.F90
+models/lnd/clm/src/clm4_5/main/inicPerpMod.F90
+models/lnd/clm/src/clm4_5/main/clmtype.F90
+models/lnd/clm/src/clm4_5/main/mkarbinitMod.F90
+models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/SLakeFluxesMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/SnowHydrologyMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/SLakeHydrologyMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/SLakeTemperatureMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/initSLakeMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: OK.
+
+ CESM test lists: (this now includes tests from test_system batch from yellowstone).
+
+yellowstone::
+ CESM intel: OK
+ CESM pgi: OK
+frankfurt:
+ CESM intel: OK. just ran generate. run these instead of test_system interactive tests from now on.
+
+Tool testing: OK. This was to double check my merge since the branch I started with was in clm4_0_68.
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_73
+
+Changes answers relative to baseline: Only change is in the clm history field, SNOWDP. Everything else is bfb.
+
+
+===============================================================
+===============================================================
+Tag name: clm4_0_73
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Mon Apr 15 09:48:03 MDT 2013
+One-line Summary: update mksurfdata_map for CLM4.5, and other misc. updates, mainly to tools
+
+Purpose of changes:
+
+Main purpose is to add a bunch of new fields to the CLM4.5 mksurfdata_map:
+- SLOPE, STD_ELEV
+- LAKEDEPTH
+- peatf, abm, gdp (for fire)
+- binfl, Ws, Dsmax, Ds (for VIC)
+- F0, P3, ZWT0 (for methane)
+
+Also, other miscellaneous changes:
+
+- some refactoring of mksurfdata_map, and get more routines under unit test
+
+- for CLM4.5 mksurfdata_map, always use hires datasets, except for pft
+
+- add support for 36 glc_mec elevation classes (though there are currently
+ no surface datasets for this option)
+
+- add support for 1-d domain files in mksurfdata_map
+
+- add createXMLEntries.pl for creating xml entries for new mapping files
+
+- change default behavior of new_woodharv for clm4.0 (default is true now),
+ and only support new_woodharv=true for clm4.5
+
+- allow global & regional map generation in a single submission of
+ mkmapdata/regridbatch.sh
+
+- handle clm4_0 vs clm4_5 distinction in mkmapdata.sh
+
+- handle large file support more robustly in mkmapdata.sh
+
+- refactored mkscripgrid.ncl to use built-in ESMF utility
+
+- remove 0.47x0.63 support for CLM4.5, since we don't have a good scrip
+ grid file for that resolution
+
+- in some files in bld/namelist_files, fix some resolutions listed as
+ 360x720 to be 360x720cru
+
+- a few other minor changes, as noted below
+
+
+Requirements for tag: Requirements: tools tests, and build-namelist
+test (to catch any accidental changes to CLM's namelist), fix bug:
+1641. Also ran standard tests to cover all bases.
+
+Test level of tag: standard + tools
+
+Bugs fixed (include bugzilla ID):
+ - 1641 (RCP6 and RCP8.5 used old bad wood harvest for 2006 and 2007)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None to CLM namelist, but many
+changes to mksurfdata_map namelist.
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated:
+
+========= Move to new inputs directory
+D models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_lookup_2d_netcdf.nc
+
+List all files added and what they do:
+
+========= Create xml entries and commands to move files to inputdata for
+ a bunch of mapping files
+A models/lnd/clm/tools/clm4_5/mkmapdata/createXMLEntries.pl
+
+========= Guide for how to add new fields to mksurfdata_map
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/README.developers
+
+========= Pull out shared mksurfdata_map code into new, shared modules
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkdiagnosticsMod.F90
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkchecksMod.F90
+
+========= Regrid new fields for mksurfdata_map
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mktopostatsMod.F90
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkgdpMod.F90
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkagfirepkmonthMod.F90
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkpeatMod.F90
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkVICparamsMod.F90
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkCH4inversionMod.F90
+
+========= Get more of mksurfdata_map code under unit tests
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkchecksMod.F90
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkgridmapMod.F90
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkdomainMod.F90
+
+========= Add inputs for new mksurfdata_map unit tests
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/inputs
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/inputs/test_domain_read_dims__lsmlon.nc
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/inputs/test_domain_read_dims__lon.nc
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/inputs/test_domain_read_dims__lon_and_num_pixels.nc
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/inputs/test_domain_read_dims__lon_and_ni.nc
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/inputs/test_domain_read_dims__num_pixels.nc
+
+========= Move to inputs directory
+A models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/inputs/test_lookup_2d_netcdf.nc
+
+
+List all existing files that have been modified, and describe the changes:
+
+========= Add new mksurfdata_map variables and mapping files
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkvarctl.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mksurfdat.F90
+ - also add no_inlandwet option
+ - also handle the case where special landunits sum to a
+ tiny bit more than 100% and thus give negative pct_pft
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mklanwatMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkfileMod.F90
+ - also add some other global attributes
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/Srcfiles
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata.pl
+ - also add merge_gis and inlandwet options, remove ngwh
+ option, remove hires option (instead use hirespft)
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata_map.namelist
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+ - also add support for 36 glc_mec columns, and remove 0.47x0.63
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5_tools.xml
+ - also remove 0.47x0.63, remove coarse-res lake, change logic for
+ determining glacier dataset, remove ngwh=off rcp6 and rcp8.5 datasets
+ - also add xml file support for more scrip grid file info
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+ - also add support for 36 glc_mec columns, no_inlandwet option,
+ remove a duplicate section, remove 0.47x0.63
+ - also add xml file support for more scrip grid file info
+M models/lnd/clm/bld/namelist_files/checkmapfiles.ncl
+M models/lnd/clm/doc/UsersGuide/tools.xml
+
+========= Add nodata argument to gridmap_areaave
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkgridmapMod.F90
+ - also add gridmap_areastddev and gridmap_check routines
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mklaiMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mksoilMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkharvestMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkurbanparCommonMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkvocefMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkpftMod.F90
+
+========= Add support for 36 glc_mec elevation classes
+M models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkglcmecMod.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkglcmecMod.F90
+M models/lnd/clm/bld/build-namelist
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0_tools.xml
+ - also fix ngwh default for rcp6 for 2006 and 2007
+ - also add xml file support for more scrip grid file info
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_0.xml
+ - also add xml file support for more scrip grid file info
+M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml
+
+========= Change default behavior of new_woodharv for clm4.0
+M models/lnd/clm/tools/clm4_0/mksurfdata_map/mksurfdata.pl
+
+========= Add new test routines
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mksurfdata_map.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/Srcfiles
+
+========= Change location of input files for unit testing
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkncdio.F90
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkindexmapMod.F90
+
+========= Add support for 1-d domain files; allow larger diffs in
+ domain_checksame
+M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkdomainMod.F90
+
+========= Allow global & regional map generation in a single submission
+M models/lnd/clm/tools/clm4_5/mkmapdata/regridbatch.sh
+
+========= Fix yellowstone ESMF path, add new grids, remove bluefire,
+ no longer make atm-ocn and RTM mapping files, handle large
+ file support and other grid-specific flags in a more robust
+ way, add option to differentiate between clm4_0 vs 4_5
+M models/lnd/clm/tools/clm4_5/mkmapdata/mkmapdata.sh
+
+========= Refactored to use built-in ESMF utility
+M models/lnd/clm/tools/clm4_5/mkmapgrids/mkscripgrid.ncl
+
+========= Remove 0.47x0.63 for CLM4.5, since we don't have a good scrip grid
+ file for that resolution
+M models/lnd/clm/tools/clm4_5/mkmapgrids/mkmapgrids.csh
+
+========= Renumber build-namelist unit tests due to removing a
+ resolution. Also cleaned up expectedFails list, mostly removing
+ tests that now pass, changing failure types, and adding
+ ERB.ne30_g16.I_1948-2004, which failed in clm4_0_72, too
+M models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+========= Fix some paths in tools test scripts
+M models/lnd/clm/test/tools/TBLCFGtools.sh
+M models/lnd/clm/test/tools/TBLscript_tools.sh
+M models/lnd/clm/test/tools/test_driver.sh
+M models/lnd/clm/test/tools/TBLtools.sh
+M models/lnd/clm/test/tools/TOPtools.sh
+
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: yes
+ All PASS or xFAIL
+
+ CESM test lists:
+
+ yellowstone/CESM: yes
+ All PASS or xFAIL except:
+
+ ***** Not listed in xFAIL list, but failed in clm4_0_72, so I'm
+ adding it to the xFAIL list
+ FAIL ERB.ne30_g16.I_1948-2004.yellowstone_intel
+
+ yellowstone/CESM/allIcompsets: no
+
+ test_system testing:
+
+ yellowstone batch: yes
+ All PASS or xFAIL except:
+
+ ***** No baselines
+ BFAIL ERS_D.f10_f10.I_2000_CLM45_CN4MeNoVSoil.yellowstone_intel_rootlit.GC.142502.compare_hist.clm4_0_72
+
+ From component_gen_comp, all PASS or BFAIL1 except:
+
+ ****** No baselines
+ BFAIL2 ERS_D.f10_f10.I_2000_CLM45_CN4MeNoVSoil.yellowstone_intel_rootlit.compare_hist.clm4_0_72.clm2.h0 (baseline history file does not exist)
+ BFAIL2 ERS_D.f10_f10.I_2000_CLM45_CN4MeNoVSoil.yellowstone_intel_rootlit.compare_hist.clm4_0_72.clm2.h1 (baseline history file does not exist)
+
+
+ frankfurt interactive: yes
+ All PASS or xFAIL (including component_gen_comp)
+
+
+ yellowstone interactive: no
+ lynx batch: no
+ lynx interactive: no
+ frankfurt batch: no
+
+ test_driver.sh tools testing:
+
+ frankfurt interactive: no
+ Couldn't get tools to build on frankfurt, from either my branch or
+ the clm4_0_72 trunk tag
+
+ yellowstone interactive: yes
+ All PASS except:
+
+ ********* These seem to be expected failures, based on the fact that they fail in clm4_0_72. Note that the
+ ********* gen_domain tests themselves pass, but the baseline comparisons fail, even if I compare clm4_0_72
+ ********* against itself
+ 006 ble14 TBLCFGtools.sh clm4_5 gen_domain CFGtools__ds T31.runoptions ..........................rc=4 FAIL
+ 008 ble@4 TBLCFGtools.sh clm4_5 gen_domain CFGtools__ds ne30.runoptions .........................rc=4 FAIL
+ 027 smiS4 TSMscript_tools.sh clm4_5 ncl_scripts getregional_datasets.pl getregional .............rc=6 FAIL
+ 028 bliS4 TBLscript_tools.sh clm4_5 ncl_scripts getregional_datasets.pl getregional .............rc=4 FAIL
+
+ ********* Expected baseline failures due to changes in default behavior of CLM4_5 mksurfdata_map
+ ********* (see notes on answer changes, below, for what changed; I have rerun these tests with some
+ ********* changes on my branch and in the trunk tag to confirm that baseline comparisons pass when I
+ ********* revert the differences noted there)
+ 012 blg54 TBLtools.sh clm4_5 mksurfdata_map tools__s namelist ...................................rc=7 FAIL
+ 020 bli24 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_T31_crpglc_2000^tools__ds rc=7 FAIL
+ 022 bli53 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_10x15_1850^tools__o ....rc=7 FAIL
+ 024 bli54 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_10x15_1850^tools__ds ...rc=7 FAIL
+ 026 bli57 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_10x15_1850^tools__do ...rc=7 FAIL
+ 030 bli74 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools_ rc=7 FAIL
+ 032 bliT4 TBLscript_tools.sh clm4_5 mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp24_2000^tools_ rc=7 FAIL
+
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_72
+
+Changes answers relative to baseline: NO. However, changes behavior of
+these offline tools:
+
+mksurfdata_map for clm4.5: changes the following defaults:
+- use hires raw datasets (where available) for everything except pctpft
+- zeroes out inland wetland areas
+- changes default glacier dataset for glc_mec surface datasets
+- uses correct ngwh dataset for rcp6.0 2006 & 2007
+
+mksurfdata_map for clm4.0: changes the following defaults:
+- uses correct ngwh dataset for rcp6.0 2006 & 2007
+
+mkmapdata.sh:
+- no longer generates ocean-atmosphere and RTM mapping files
+
+mkscripgrid.ncl:
+- roundoff-level changes in coordinates
+- grid_dims is fixed (now correctly nx by ny, rather than ntot by ntot)
+
+===============================================================
+===============================================================
+Tag name: clm4_0_72
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Thu Apr 11 15:13:40 MDT 2013
+One-line Summary: maoyi bug fix for vic hydro
+
+Purpose of changes: Bring in changes from Maoyi that fix a few bugs in the VIC hydrology code. Make a small change in
+ scripts that fixes NoVS runs.
+
+Requirements for tag: N/A
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID): 1648
+
+Known bugs (include bugzilla ID): 1658 - ERB problem with clm4_0.
+ 1659 - RTM restart problem when under a day boundary
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: changed NoVSBGC to NoVS in scripts branch_tag
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): changed scripts branch tag to
+
+-scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/newcompsets2_tags/newcompsets2_01_scripts4_130405a
++scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/newcompsets2_tags/newcompsets2_02_scripts4_130405a
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+from Maoyi Huang:
+
+The variable cps%ws in my original codes overlaps with another variable in the SLAKE option. So I renamed it to cps$Wsvic.
+wtsub in SoilHydrologyMod.F90 when VICHYDRO was on was not initialized. To avoid any potential conflicts, I renamed it to wtsub_vic and initialized it to 0._r8.
+cleaned up the codes a little bit by taking out all variables that were not used.
+
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_5/main/initSoilParVICMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/biogeophys/CLMVICMapMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+
+Machines testing ran on: (Tests in priority order)
+
+- updated xFail list and slightly modified test_system yellowstone.batch to reflect ERS_D and ERS_Ln48_D changes.
+- tracking tputcomp and memcomp changes in advance of refactoring modifications.
+
+ build-namelist unit tester: All OK. All Failures in clm4_0_71 now pass.
+
+ CESM test lists:
+
+ cesm intel: PID: 163148: OK. a number of tputcomp failures:
+ FAIL ERS_D.f10_f10.I.yellowstone_intel.GC.163148.tputcomp.clm4_0_71
+ FAIL ERS_Lm3.1x1_vancouverCAN.I1PT.yellowstone_intel.GC.163148.tputcomp.clm4_0_71
+ FAIL ERS_Lm3.f19_g16.IGRCP60CN.yellowstone_intel.GC.163148.tputcomp.clm4_0_71
+ FAIL SMS_RLA.f45_f45.ICLM45.yellowstone_intel.GC.163148.tputcomp.clm4_0_71
+ FAIL SMS.1x1_mexicocityMEX.I1PTCLM45.yellowstone_intel.GC.163148.tputcomp.clm4_0_71
+
+ cesm pgi: PID: 163101 OK. one memcomp failure
+ FAIL ERS.f19_g16.ICNCROP.yellowstone_pgi.GC.163101
+
+ test_system testing:
+
+ yellowstone batch: OK. Fixed a problem with NoVS, found bug in
+ FAIL ERS.f19_g16.I_1850_CLM45_CN4Me.yellowstone_intel_user_nl_dirs.GC.162650.tputcomp.clm4_0_71
+ FAIL ERS_D.f10_f10.I_2000_CLM45_CN4Me.yellowstone_intel_ch4_set2_ciso.GC.162650.memcomp.clm4_0_71
+ FAIL ERS_D.f10_f10.I_2000_CLM45_CN4Me.yellowstone_intel_ch4_set2_ciso.GC.162650.tputcomp.clm4_0_71
+ FAIL ERS_D.f10_f10.I_2000_CLM45_CN4Me.yellowstone_intel_ch4_set3_pftroot.GC.162650.tputcomp.clm4_0_71
+
+ frankfurt interactive: OK. generate and tputcomp sub-tests failed.
+ FAIL SMS_D_Mmpi-serial.1x1_vancouverCAN.I1PTCLM45.frankfurt_intel_user_nl_dirs.GC.165025.generate.clm4_0_72
+ FAIL SMS_D_Mmpi-serial.1x1_vancouverCAN.I1PTCLM45.frankfurt_intel_user_nl_dirs.GC.165025.tputcomp.clm4_0_71
+ FAIL ERS_Mmpi-serial.1x1_mexicocityMEX.I1PTCLM45.frankfurt_intel_user_nl_dirs.GC.165025.generate.clm4_0_72
+ FAIL ERS_Mmpi-serial.1x1_mexicocityMEX.I1PTCLM45.frankfurt_intel_user_nl_dirs.GC.165025.tputcomp.clm4_0_71
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_72
+
+Changes answers relative to baseline: only for VIC. Original implementation broken. Consider this tag the new baseline against which to test for VIC.
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_0_71
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Wed Apr 10 08:43:00 MDT 2013
+One-line Summary: compsets refactoring by mvertens
+
+Purpose of changes: Bring in externals that refactor the compset handling. Update CLM to
+ work with the new compsets. Compsets are now extensible and easier to
+ modify and work with.
+
+Requirements for tag: N/A
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID): N/A
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: modify CLM to work with new compset refactor
+
+Describe any changes made to the namelist: modify CLM to work with new compset refactor
+
+List any changes to the defaults for the boundary datasets: N/A Levy's new files will come in later.
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: mvertens, erik
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/newcompsets2_tags/newcompsets2_01_scripts4_130405a
+https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130403
+https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_2_26
+https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_130325
+https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_02/socn
+https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_02/sice
+https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_02/sglc
+https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_02/swav
+https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_130405
+http://parallelio.googlecode.com/svn/trunk_tags/pio1_6_5/pio
+https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130308
+https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130308/gen_domain_files
+
+List all files eliminated:
+
+D models/lnd/clm/test/system/tests_posttag_lynx_nompi
+D models/lnd/clm/test/system/mirage.interactive
+D models/lnd/clm/test/system/TCBCFGtools.sh
+D models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+D models/lnd/clm/test/system/config_files
+D models/lnd/clm/test/system/config_files/gen_domain
+D models/lnd/clm/test/system/config_files/tools__do
+D models/lnd/clm/test/system/config_files/tools__s
+D models/lnd/clm/test/system/config_files/CFGtools__ds
+D models/lnd/clm/test/system/config_files/tools__ds
+D models/lnd/clm/test/system/config_files/README
+D models/lnd/clm/test/system/config_files/config_CLMtestCompsets.xml
+D models/lnd/clm/test/system/config_files/tools__o
+D models/lnd/clm/test/system/get_cprnc_diffs.sh
+D models/lnd/clm/test/system/TSMncl_tools.sh
+D models/lnd/clm/test/system/CLM_compare.sh
+D models/lnd/clm/test/system/TBLCFGtools.sh
+D models/lnd/clm/test/system/README.testnames
+D models/lnd/clm/test/system/tests_posttag_yong
+D models/lnd/clm/test/system/TCBtools.sh
+D models/lnd/clm/test/system/test_driver.sh
+D models/lnd/clm/test/system/lynx.interactive
+D models/lnd/clm/test/system/tests_pretag_yellowstone_nompi
+D models/lnd/clm/test/system/bluefire.batch
+D models/lnd/clm/test/system/Makefile
+D models/lnd/clm/test/system/TSMscript_tools.sh
+D models/lnd/clm/test/system/tests_posttag_mirage
+D models/lnd/clm/test/system/tests_posttag_frankfurt_nompi
+D models/lnd/clm/test/system/gen_test_table.sh
+D models/lnd/clm/test/system/TOPtools.sh
+D models/lnd/clm/test/system/input_tests_master
+D models/lnd/clm/test/system/TSMtools.sh
+D models/lnd/clm/test/system/TBLscript_tools.sh
+D models/lnd/clm/test/system/tests_posttag_nompi_regression
+D models/lnd/clm/test/system/TBLtools.sh
+D models/lnd/clm/test/system/show_var_diffs.sh
+D models/lnd/clm/test/system/TSMCFGtools.sh
+
+List all files added and what they do:
+
+A + models/lnd/clm/test/tools
+A + models/lnd/clm/test/tools/TSMscript_tools.sh
+A + models/lnd/clm/test/tools/TCBCFGtools.sh
+A + models/lnd/clm/test/tools/tests_posttag_frankfurt_nompi
+A + models/lnd/clm/test/tools/config_files
+A + models/lnd/clm/test/tools/config_files/gen_domain
+A + models/lnd/clm/test/tools/config_files/tools__do
+A + models/lnd/clm/test/tools/config_files/tools__s
+A + models/lnd/clm/test/tools/config_files/CFGtools__ds
+A + models/lnd/clm/test/tools/config_files/tools__ds
+A + models/lnd/clm/test/tools/config_files/README
+A + models/lnd/clm/test/tools/config_files/tools__o
+A + models/lnd/clm/test/tools/get_cprnc_diffs.sh
+A + models/lnd/clm/test/tools/gen_test_table.sh
+A + models/lnd/clm/test/tools/TSMncl_tools.sh
+A + models/lnd/clm/test/tools/CLM_compare.sh
+A + models/lnd/clm/test/tools/nl_files
+A + models/lnd/clm/test/tools/nl_files/nl_ch4_set2_ciso
+A + models/lnd/clm/test/tools/nl_files/nl_ch4_set3_pftroot
+A + models/lnd/clm/test/tools/nl_files/gen_domain.ne30.runoptions
+A + models/lnd/clm/test/tools/nl_files/mksrfdt_1x1_brazil_1850
+A + models/lnd/clm/test/tools/nl_files/nl_rootlit
+A + models/lnd/clm/test/tools/nl_files/gen_domain.T31.runoptions
+A + models/lnd/clm/test/tools/nl_files/mksrfdt_10x15_1850
+A + models/lnd/clm/test/tools/nl_files/nl_ciso
+A + models/lnd/clm/test/tools/nl_files/nl_anoxia_wtsat
+A + models/lnd/clm/test/tools/nl_files/clm4_0_mksrfdt_1x1_numaIA_mp20irrcr_2000
+A + models/lnd/clm/test/tools/nl_files/mksrfdt_T31_crpglc_2000
+A + models/lnd/clm/test/tools/nl_files/clm4_0_mksrfdt_10x15_irr_1850
+A + models/lnd/clm/test/tools/nl_files/getregional
+A + models/lnd/clm/test/tools/nl_files/mksrfdt_1x1_numaIA_mp24_2000
+A + models/lnd/clm/test/tools/nl_files/mksrfdt_1x1_brazil_1850-2000
+A + models/lnd/clm/test/tools/nl_files/mkprocdata_ne30_to_f19_I2000
+A + models/lnd/clm/test/tools/nl_files/mkmapdata_ne30np4
+A + models/lnd/clm/test/tools/nl_files/nl_vrtlay
+A + models/lnd/clm/test/tools/nl_files/nl_oldhyd
+A + models/lnd/clm/test/tools/nl_files/mksrfdt_1x1_vancouverCAN_2000
+A + models/lnd/clm/test/tools/nl_files/mkmapdata_if10
+A + models/lnd/clm/test/tools/TBLCFGtools.sh
+A + models/lnd/clm/test/tools/input_tests_master
+A + models/lnd/clm/test/tools/TOPtools.sh
+A + models/lnd/clm/test/tools/README
+A + models/lnd/clm/test/tools/TSMtools.sh
+A + models/lnd/clm/test/tools/README.testnames
+A + models/lnd/clm/test/tools/TBLscript_tools.sh
+A + models/lnd/clm/test/tools/tests_posttag_yong
+A + models/lnd/clm/test/tools/TCBtools.sh
+A + models/lnd/clm/test/tools/test_driver.sh
+A + models/lnd/clm/test/tools/tests_posttag_nompi_regression
+A + models/lnd/clm/test/tools/tests_pretag_yellowstone_nompi
+A + models/lnd/clm/test/tools/TBLtools.sh
+A + models/lnd/clm/test/tools/show_var_diffs.sh
+A + models/lnd/clm/test/tools/TSMCFGtools.sh
+A + models/lnd/clm/test/tools/Makefile
+A + models/lnd/clm/test/system/yellowstone.namelist
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/test/system/test_system
+M models/lnd/clm/test/system/yellowstone.interactive
+M models/lnd/clm/test/system/frankfurt.interactive
+M models/lnd/clm/test/system/README
+M models/lnd/clm/test/system/yellowstone.batch
+M models/lnd/clm/bld/user_nl_clm
+M models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+M models/lnd/clm/bld/build-namelist
+M models/lnd/clm/bld/clm.buildnml.csh
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_0.xml
+MM models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5_tools.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+MM models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90
+MM models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90
+M SVN_EXTERNAL_DIRECTORIES
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester:
+
+These should pass next time around:
+-these two are due to megan now being on by default:
+ 030/449 < FAIL>
+ 037/449 < FAIL>
+-these four should pass next time...no baselines in clm4_0_70
+ 108/449 < FAIL>
+ 109/449 < FAIL>
+ 443/449 < FAIL>
+ 444/449 < FAIL>
+
+ CESM test lists:
+
+ yellowstone/CESM:
+intel: other than our expected fail list, current failures should pass during the next round of testing
+pgi : see intel
+
+note for intel and pgi: nlcomp fails should not be considered truth or otherwise. There is a bug compare_namelist.
+lots of BFAILS when comparing to clm4_0_70 and some differences in coupler hist. vars.
+These are expected due to a new CISM and DATM.
+
+ test_system testing:
+
+ yellowstone batch: OK. See explanation for yellowstone/CESM tests above.
+ frankfurt interactive: OK. After modifying the frankfurt compset for 1PT.
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_70
+
+Changes answers relative to baseline: yes, due to CISM, but not due to any science changes in CLM itself. There will be
+changes in some coupler history files.
+
+===============================================================
+===============================================================
+Tag name: clm4_0_70
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Mon Apr 1 15:58:23 MDT 2013
+One-line Summary: bring in vic hydrology
+
+Purpose of changes: Merge in VIC hydrology. This is an isolated option that stands on it's
+own and does not effect existing code. Added tests with and without vrtlay = .true..
+
+Requirements for tag: Add vic tests for CLM45 and CLM45-vrtlay, normal testing protocol
+
+Test level of tag: standard + I_compsets + yellowstone_rtm batch
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):VIC crashes when run in debug mode-1648
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: Add namelist option for vichydro
+
+Describe any changes made to the namelist: Add namelist option for vichydro
+
+List any changes to the defaults for the boundary datasets: Using temporary surface data sets. New datasets
+ will come in at a later tag. The temporary data sets do not effect normal CLM runs.
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: Erik,Maoyi Huang
+
+List any svn externals directories updated (csm_share, mct, etc.):N/A
+
+List all files eliminated:N/A
+
+List all files added and what they do:
+
+- For new VIC tests:
+A models/lnd/clm/test/system/user_nl_dirs/vic/vic_f09/user_nl_clm
+A models/lnd/clm/test/system/user_nl_dirs/vic/vic_f09
+A models/lnd/clm/test/system/user_nl_dirs/vic/vic_f19/user_nl_clm
+A models/lnd/clm/test/system/user_nl_dirs/vic/vic_f19
+A models/lnd/clm/test/system/user_nl_dirs/vic/vic_vrtlay/user_nl_clm
+A models/lnd/clm/test/system/user_nl_dirs/vic/vic_vrtlay
+A models/lnd/clm/test/system/user_nl_dirs/vic
+- For VIC implementation:
+A models/lnd/clm/src/clm4_5/main/initSoilParVICMod.F90
+A models/lnd/clm/src/clm4_5/biogeophys/CLMVICMapMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+- For VIC namelist fucntionality
+M models/lnd/clm/test/system/config_files/config_CLMtestCompsets.xml
+M models/lnd/clm/bld/configure
+M models/lnd/clm/bld/config_files/config_definition.xml
+- For new VIC tests
+M models/lnd/clm/test/system/yellowstone.batch
+- VIC implementation
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+M models/lnd/clm/src/clm4_5/main/clm_varpar.F90
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: OK
+
+ CESM test lists:
+
+ yellowstone/CESM:
+ cesm intel: OK. some tputcomp FAILs, but main tests pass
+ cesm pgi: OK. one tputcomp FAIL, main tests pass
+ yellowstone/CESM/allIcompsets: OK.
+
+ test_system testing:
+ yellowstone rtm batch: OK.
+ yellowstone batch: OK.
+
+ new VIC tests:
+ ERS.f09_g16.I_2000_CLM45_VIC -user_nl_dir ../models/lnd/clm/test/system/user_nl_dirs/vic/vic_f09
+ SMS.f19_g16.I_2000_CLM45_VIC -user_nl_dir ../models/lnd/clm/test/system/user_nl_dirs/vic/vic_f19
+ ERS.f09_g16.I_2000_CLM45_VIC -user_nl_dir ../models/lnd/clm/test/system/user_nl_dirs/vic/vic_vrtlay
+ ERS_D.f09_g16.I_2000_CLM45_VIC -user_nl_dir ../models/lnd/clm/test/system/user_nl_dirs/vic/vic_vrtlay
+ this last one expected to Fail.
+
+ frankfurt interactive: OK.
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_69
+
+Changes answers relative to baseline: No. The VIC hydro option, if turned on
+ does change answers, but the use of this code is isolated from the rest of CLM.
+
+===============================================================
+===============================================================
+Tag name: clm4_0_69
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Tue Mar 26 16:24:54 MDT 2013
+One-line Summary: remove hydro reorder, volr and esmf mods
+
+Purpose of changes: fix volrlnd init. from SPVAL to 0.0 so TWS in CLM looks correct.
+ modify esmf interfaces for volr. remove hydrology reordering due to nasty bug in
+ restart.
+
+Requirements for tag: fix bug 1644
+
+Test level of tag: critical
+
+Bugs fixed (include bugzilla ID): 1644
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist:N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: Dave L. and Jinyn Tang. Minor review by Erik and Bill.
+
+List any svn externals directories updated (csm_share, mct, etc.): update RTM to 1_0_22
+
+List all files eliminated: N/A
+
+List all files added and what they do:N/A
+
+List all existing files that have been modified, and describe the changes:
+M models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+M models/lnd/clm/src/clm4_5/main/mkarbinitMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90
+M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90
+M SVN_EXTERNAL_DIRECTORIES
+
+Machines testing ran on: (Tests in priority order)
+
+ only run CESM tests:
+
+=== CESM Yellowstone PGI tests: OK. All BFAILS occur in cmopare_hist which is expected due to the removal of the hydro. reordering.
+Status with expected failures removed:
+./cs.status.114300.yellowstone | grep -v PET_PT.f19_g16.I1850 | grep -v SMS.1x1_numaIA.ICN_CROP | grep -v PET_PT.f10_f10.I20TRCN | grep -v PET_PT.f19_g16.ICLM451850 | grep -v SMS.T31_g37.IG4804CLM45 | grep -v SMS.1x1_numaIA.ICLM45CNCROP | grep -v PET_PT.f10_f10.I20TRCLM45CN | grep -v PASS
+Possible test result outcomes:
+...
+BFAIL SMS_RLB.f45_f45.I.yellowstone_pgi.GC.114300.compare_hist.clm4_0_68
+BFAIL ERS_D.hcru_hcru.I_2000_CRUFRC_CN.yellowstone_pgi.GC.114300.compare_hist.clm4_0_68
+BFAIL ERS.f19_g16.ICNCROP.yellowstone_pgi.GC.114300.compare_hist.clm4_0_68
+BFAIL ERI.f19_g16.IG1850.yellowstone_pgi.GC.114300.compare_hist.clm4_0_68
+BFAIL SMS.T31_g37.IG4804.yellowstone_pgi.GC.114300.compare_hist.clm4_0_68
+BFAIL SMS.1x1_numaIA.ICNCROP.yellowstone_pgi.GC.114300.compare_hist.clm4_0_68
+BFAIL SMS_RLB.f45_f45.ICLM45.yellowstone_pgi.GC.114300.compare_hist.clm4_0_68
+BFAIL ERS_D.hcru_hcru.I_2000_CRUFRC_CLM45_CN.yellowstone_pgi.GC.114300.compare_hist.clm4_0_68
+BFAIL ERS.f19_g16.ICLM45CNCROP.yellowstone_pgi.GC.114300.compare_hist.clm4_0_68
+BFAIL ERI.f19_g16.IG1850CLM45.yellowstone_pgi.GC.114300.compare_hist.clm4_0_68
+
+=== CESM Yellowstone INTEL tests: OK. All BFAILS occur in cmopare_hist which is expected due to the removal of the hydro. reordering.
+Status with expected failures removed:
+>>./cs.status.114247.yellowstone | grep -v ERH_D.f19_g16.I1850CLM45CN | grep -v ERB.ne30_g16.I_1948-2004_CLM45 | grep -v ERS_E.f19_g16.I1850CRUCLM45CN | grep -v CME.f10_f10.ICN | grep -v ERS_D.f10_f10.ICLM45 | grep -v PET_PT.f19_g16.I1850CN | grep -v ERB.ne30_g16.I_1948-2004 | grep -v PET_PT.f10_f10.I20TRCN | grep -v PET_PT.f19_g16.I1850CLM45CN | grep -v ERS_E.f19_g16.ICLM451850 | grep -v ERS_D.f19_g16.IGRCP26CLM45CN | grep -v ERS_Lm3.f19_g16.IGRCP60CLM45CN | grep -v PET_PT.f10_f10.I20TRCLM45CN | grep -v SMS.f19_g16.IRCP45CLM45CN | grep -v ERS_D.f19_g16.IRCP85CLM45CN | grep -v PASS | grep -v COMMENT
+...
+FAIL ERS_D.f10_f10.I.yellowstone_intel.GC.114247.compare_hist.clm4_0_68
+FAIL ERS_D.f10_f10.I.yellowstone_intel.GC.114247.tputcomp.clm4_0_68
+FAIL NCK.f10_f10.I.yellowstone_intel.GC.114247.compare_hist.clm4_0_68
+FAIL ERS_E.f19_g16.I1850.yellowstone_intel.GC.114247.compare_hist.clm4_0_68
+FAIL ERH_D.f19_g16.I1850CN.yellowstone_intel.GC.114247.compare_hist.clm4_0_68
+FAIL ERS_D.f19_g16.IGRCP26CN.yellowstone_intel.GC.114247.compare_hist.clm4_0_68
+FAIL ERS_Lm3.f19_g16.IGRCP60CN.yellowstone_intel.GC.114247.compare_hist.clm4_0_68
+FAIL ERS_Lm3.f19_g16.IGRCP60CN.yellowstone_intel.GC.114247.tputcomp.clm4_0_68
+FAIL SMS.f19_g16.IRCP45CN.yellowstone_intel.GC.114247.compare_hist.clm4_0_68
+FAIL ERS_D.f19_g16.IRCP85CN.yellowstone_intel.GC.114247.compare_hist.clm4_0_68
+FAIL ERS_D.f19_g16.IRCP85CN.yellowstone_intel.GC.114247.memcomp.clm4_0_68
+FAIL SMS_ROA.f45_f45.ICLM45.yellowstone_intel.GC.114247.tputcomp.clm4_0_68
+FAIL NCK.f10_f10.ICRUCLM45.yellowstone_intel.GC.114247.compare_hist.clm4_0_68
+
+=== test_system yellowstone.rtm.batch: OK. All compare_hist failures are expected due changes in photosynthesis
+
+CLM tag used for the baseline comparison tests if applicable: For Cesm intel and pgi tests- clme_0_68
+ for rtm tests, against clm4_0_66.
+ for science validation, clm4_0_66
+
+Changes answers relative to baseline: yes
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ obvious changes in GPP,FPSN and FCTR. Other fields changed as well.
+ ran the following and had Dave L. and Jinyun Tang look at output.
+ f19_g16_I_1850_CLM45_CN_yellowstone_intel_photo_clm4_0_66/
+ f19_g16_I_1850_CLM45_CN_yellowstone_intel_photo_clm4_0_68/
+ f19_g16_I_1850_CLM45_CN_yellowstone_intel_photo_clm4_0_69/
+ f19_g16_ICLM45_yellowstone_intel_photo_clm4_0_66/
+ f19_g16_ICLM45_yellowstone_intel_photo_clm4_0_68/
+ f19_g16_ICLM45_yellowstone_intel_photo_clm4_0_69/
+
+===============================================================
+===============================================================
+Tag name: clm4_0_68
+Originator(s): erik (Erik Kluzek)
+Date: Sat Mar 16 16:03:14 MDT 2013
+One-line Summary: Fix mksurfdata_map for ne120np. Error out if SUM(weights)/=100. Photosynthesis change for CLM45.
+
+Purpose of changes:
+
+Bring in ne120fix branch to trunk. This fixes some issues in mksurfdata_map for generation
+of ne120np surface data file. Put error back in CLM if weights don't sum to 100. Add in
+Keith Oleson's photosynthesis change. This changes canopy top: triose phosphate utilization rate at 25C to
+be dependent on vcmax25top ( maximum rate of carboxylation) rather than jmax25top (maximum electron
+transport rate). Update getco2_historical.ncl script to be able to handle rcp files as well.
+
+Update scripts so that I1PT settings for urban single-point files will be used, and IRCP
+will properly do a hybrid startup. And seperate out intel/pgi test lists.
+
+Update datasets for ne120np4 and ne240np4 (CLM40), with updated mksurfdata_map.
+
+Requirements for tag:
+ Tools test, yellowstone batch, fix bug 1632/1643
+
+Test level of tag: critical
+
+Bugs fixed (include bugzilla ID): 1632 (ne120np4 mksurfdata problem)
+ 1643 (Fix RES_COMPSET_MATCH for I1PT, IRCP*)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ New surfdata and pftdyn files for ne120np4 (CLM40)
+ New surfdata files for ne240np4 (CLM40)
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, sacks (mksrfdata changes)
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts
+ scripts updated to scripts4_130315c
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/yellowstone.batch --- Fix some compset names
+
+ M models/lnd/clm/tools/clm4_5/ncl_scripts/getco2_historical.ncl - Handle rcp CO2 files
+ M models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mksurfdat.F90 -- Clean out small PFT values
+ M models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mksurfdat.F90 -- Clean out small PFT values
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml ------- Update ne120/ne240 surfdata/pftdyn datasets
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml ------- Delete ALL finidat files as none compatible
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5_tools.xml - Add rcp CO2 datasets: rcp2.6/4.5/6/8.5
+
+ M models/lnd/clm/src/clm4_5/main/surfrdMod.F90 - Add back abort if surfdata weights don't sum to 100%
+ M models/lnd/clm/src/clm4_0/main/surfrdMod.F90 - Add back abort if surfdata weights don't sum to 100%
+ M models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90 - change in photosynthesis
+
+Changes in expected fails for testing:
+
+ build-namelist unit-test: Following fail because of new datasets will pass next tag
+ 203 ne120
+ 208 ne240
+ 306 ne120 20th Century
+ 428 48x96 for CLM45 (remove finidat)
+ failType="FAIL">answers change on restart
+
+
+ Changes to expected fail:
+
+
+
+
++ answers change on restart
++ answers change on restart
++ answers change on restart
++ answers change on restart
++ answers change on restart
++ answers change on restart
++ answers change on restart
++ answers change on restart
+
+
+
+
+ +Restart difference
+ +Restart difference
+ +Restart difference
+ +Restart difference
+ +Restart difference
+
+Machines testing ran on: (Tests in priority order)
+ build-namelist unit tester: yes
+
+ CESM test lists:
+
+ yellowstone/CESM: yes
+ yellowstone/CESM/allIcompsets: yes
+
+ test_system testing:
+
+ yellowstone batch: yes
+ frankfurt interactive: yes
+
+ test_driver.sh tools testing:
+
+ yellowstone interactive: yes
+ frankfurt interactive: yes
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_68
+
+Changes answers relative to baseline: Yes!
+
+ - what code configurations:
+ All CLM45 change because of change in photosynthisis
+ I1PT compsets change because of scripts bug
+ IRCP compsets now startup with new initial conditions
+ - what platforms/compilers: All
+ - nature of change: similar climate
+
+===============================================================
+===============================================================
+Tag name: clm4_0_67
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Tue Mar 12 11:14:59 MDT 2013
+One-line Summary: Jinyun photosynthesis and hydrology reorder
+
+Purpose of changes: Bring in mods that reorder hydrology code and modes that
+ address photosynthesis CN code. This tag is F90 code only.
+
+Requirements for tag: N/A
+
+Test level of tag: std-test + ICompset tests + yellowstone interactive
+
+Bugs fixed (include bugzilla ID): N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist:N/A
+
+List any changes to the defaults for the boundary datasets:N/A
+
+Describe any substantial timing or memory changes:N/A
+
+Code reviewed by: Dave L, S. Swenson, self
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated:N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+Major changes in CNEcosystemDynMod, SoilHydrologyMod and Hydrology2Mod. Also did some deadCode removal (unused pointer
+assignments, unused modules and local variables.
+
+ M biogeochem/CNEcosystemDynMod.F90 - effects CN. Split out CNEcosystemDynA and CNEcosystemDynB
+
+ M main/CNiniTimeVar.F90 - removed some commented out code
+ M main/clmtypeInitMod.F90 - removed commented out init_gridcell_pstate_type
+ M main/pftdynMod.F90
+ M main/initSurfAlbMod.F90 - effects CN. For photosynthesis.
+ M main/clm_driver.F90
+ M main/CNiniSpecial.F90
+ M ain/clmtype.F90
+ M main/histFldsMod.F90
+
+ M biogeophys/Hydrology2Mod.F90 - split out 2A and 2B subroutines
+ M biogeophys/SoilHydrologyMod.F90 - split out new WaterTable routine from existing Drainage routine
+ M biogeophys/BareGroundFluxesMod.F90
+ M biogeophys/CanopyFluxesMod.F90 - For photosynthesis.
+
+
+Machines testing ran on: (Tests in priority order)
+
+Dave Lawrence looked at 1 year runs to make sure behavior looked OK before and after mods.
+S. Swenson looked at short simulations to make sure reordering worked correctly in the hydrology code.
+Expect changes in the following fields (depending on compset and test type)
+
+roff Flrl_rofliq
+roff Flrl_rofliq
+lnd Flrl_rofliq
+lnd Flrl_rofliq
+roff Forr_roff
+roff Forr_roff
+roff Flrl_rofliq
+roff Flrl_rofliq
+lnd Fall_lat
+lnd Fall_lat
+lnd Fall_sen
+lnd Fall_sen
+lnd Fall_evap
+lnd Fall_evap
+lnd Flrl_rofliq
+lnd Flrl_rofliq
+roff Forr_roff
+roff Forr_roff
+roff Flrl_rofliq
+roff Flrl_rofliq
+lnd Sl_fv
+lnd Sl_f
+
+l2x_Sl_avsdr
+l2x_Sl_anidr
+l2x_Sl_avsdf
+l2x_Sl_anidf
+l2x_Sl_tref
+l2x_Sl_qref
+l2x_Sl_t
+l2x_Sl_fv
+l2x_Sl_ram1
+l2x_Sl_snowh
+l2x_Sl_u10
+l2x_Fall_swnet
+l2x_Fall_taux
+l2x_Fall_tauy
+l2x_Fall_lat
+l2x_Fall_sen
+l2x_Fall_lwup
+l2x_Fall_evap
+l2x_Fall_flxdst1
+l2x_Fall_flxdst2
+l2x_Fall_flxdst3
+l2x_Fall_flxdst4
+l2x_Flrl_rofliq
+x2l_Slrr_volr
+r2x_Slrr_volr
+r2x_Forr_roff
+r2x_Forr_ioff
+x2r_Flrl_rofliq
+
+ build-namelist unit tester: yes - OK
+
+ CESM test lists:
+
+ yellowstone/CESM: yes - OK. Fails are due to new code. These should pass next tag.
+
+ FAIL ERS.f19_g16.I_1850_CLM45_CN4Me.yellowstone_intel_user_nl_dirGC.113407
+ oAIL ERS_D.fol_g16.I_1850_CLM45_CNCENTNoMe.yellowstone_intel_user_nl_dirs.GC.113407
+ FAIL ERS.f19_g16.I_2000_CLM45_CN4Me_CROP.yellowstone_intel_user_nl_dirs.GC.113407
+ FAIL ERS_Ld211.f10_f10.ICLM45CNADSPIN.yellowstone_intel_monthly.GC.113407
+ FAIL ERS_Ld211.f10_f10.ICLM45CNADSPIN.yellowstone_intel_monthly.GC.113407.compare_hist.clm4_0_66
+ FAIL ERS_Ln48_D_P64x1.ne30_g16.ICLM45CN.yellowstone_intel_user_nl_dirs.GC.113407.compare_hist.clm4_0_66
+ FAIL ERS_Ld211_D_P224x1.f10_f10.ICLM45CNCROP.yellowstone_intel_crop.GC.113407
+ FAIL ERS_Ld211_P384x1.f19_g16.ICLM45CNDVCROP.yellowstone_intel_crop.GC.113407
+ FAIL ERS_Ld211.f10_f10.I_2000_CLM45_VOC_CN.yellowstone_intel_voc.GC.113407
+ FAIL ERS_Ln48_D.f10_f10.I_2000_CLM45_CN.yellowstone_intel_ciso.GC.113407.compare_hist.clm4_0_66
+ FAIL ERS_D.f19_g16.ICLM45GLCMEC.yellowstone_intel_glcMEC.GC.113407
+
+ yellowstone/CESM/allIcompsets: yes - OK
+
+ test_system testing:
+
+ yellowstone batch: yes - OK. Fails are due to new code. These should pass next tag.
+
+ FAIL ERS.f19_g16.I_1850_CLM45_CN4Me.yellowstone_intel_user_nl_dirs.GC.113407
+ FAIL ERS_D.f19_g16.I_1850_CLM45_CNCENTNoMe.yellowstone_intel_user_nl_dirs.GC.113407
+ FAIL ERS.f19_g16.I_2000_CLM45_CN4Me_CROP.yellowstone_intel_user_nl_dirs.GC.113407
+ FAIL ERS_Ld211.f10_f10.ICLM45CNADSPIN.yellowstone_intel_monthly.GC.113407
+ FAIL ERS_Ln48_D_P64x1.ne30_g16.ICLM45CN.yellowstone_intel_user_nl_dirs.GC.113407.compare_hist.clm4_0_66
+ FAIL ERS_Ld211_D_P224x1.f10_f10.ICLM45CNCROP.yellowstone_intel_crop.GC.113407
+ FAIL ERS_Ld211_P384x1.f19_g16.ICLM45CNDVCROP.yellowstone_intel_crop.GC.113407
+ FAIL ERS_Ld211.f10_f10.I_2000_CLM45_VOC_CN.yellowstone_intel_voc.GC.113407
+ FAIL ERS_Ln48_D.f10_f10.I_2000_CLM45_CN.yellowstone_intel_ciso.GC.113407.compare_hist.clm4_0_66
+ FAIL ERS_D.f19_g16.ICLM45GLCMEC.yellowstone_intel_glcMEC.GC.113407
+
+ frankfurt interactive: yes - OK
+ yellowstone interactive: yes - reasonably OK. Added a few tests to xFail list that need new
+ surface data sets.
+ The following fail due to new code and should pass next round:
+
+ FAIL ERS_D_Mmpi-serial.CLM_USRDAT.ICLM45USUMB.yellowstone_intel_user_nl_dirs.GC.075359.compare_hist.clm4_0_66b
+ FAIL ERS_D_P1x1_Mmpi-serial.f19_g16.I20TR_CLM45VSCN.yellowstone_intel_voc.GC.075359
+ FAIL ERS_D_P1x1_Mmpi-serial.f19_g16.I20TR_CLM45VSCN.yellowstone_intel_voc.GC.075359.compare_hist.clm4_0_66b
+ FAIL ERS_Ly3_Mmpi-serial.1x1_brazil.I_2000_CLM45.yellowstone_intel_monthly.GC.075359.compare_hist.clm4_0_66b
+ FAIL ERS_Ld211_Mmpi-serial.1x1_brazil.I_2000_CLM45_CN.yellowstone_intel_monthly.GC.075359.compare_hist.clm4_0_66b
+ FAIL ERS_Ly3_Mmpi-serial.1x1_brazil.I_2000_CLM45_CNDV.yellowstone_intel_monthly.GC.075359.compare_hist.clm4_0_66b
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_66
+
+Changes answers relative to baseline: Photosynthesis mods and reordering will change answers
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate): hydrology reordering will introducce very small changes due to the process of moving around, but not changing code.
+ photosysthesis mods are major changes that effect science
+
+
+===============================================================
+===============================================================
+Tag name: clm4_0_66
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Thu Mar 7 11:54:05 MST 2013
+One-line Summary: turn off subgrid topography snow parameterization for glc_mec landunits
+
+Purpose of changes:
+
+Change from Sean Swenson to turn off subgrid topography snow
+parameterization over glc_mec landunits: ice_mec columns already account
+for subgrid topographic variability through their use of multiple elevation
+classes; thus, to avoid double-accounting for topographic variability in
+these columns, we ignore topo_std and use a value of n_melt that assumes
+little topographic variability within the column.
+
+Requirements for tag: yellowstone cesm tests, make sure GLC test goes
+
+Test level of tag: critical
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, Sean Swenson
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: no
+
+ CESM test lists:
+
+ yellowstone/CESM: yes
+ All PASS or xFail (ignoring tput failures); only baseline failure is,
+ the following, which is an expected failure:
+ FAIL ERI.f19_g16.IG1850CLM45.yellowstone_pgi.C.113330.compare_hist.clm4_0_65
+
+ yellowstone/CESM/allIcompsets: no
+
+ test_system testing:
+
+ yellowstone batch: no
+ frankfurt interactive: no
+ yellowstone interactive: no
+ lynx batch: no
+ lynx interactive: no
+ frankfurt batch: no
+
+ test_driver.sh tools testing:
+
+ lynx interactive: no
+ yellowstone interactive: no
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_65
+
+Changes answers relative to baseline: Yes, just for CLM4.5 with glc_mec
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM4.5 with glc_mec
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate): new climate
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: N/A
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_0_65
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Thu Mar 7 09:53:31 MST 2013
+One-line Summary: back out Machines external to get more tests to pass, especially IG
+
+Purpose of changes:
+
+Some tests - particularly IG - became broken in clm4_0_64. This tag rolls
+back the Machines external so that GLC compiles properly.
+
+Requirements for tag:
+
+Test level of tag: only yellowstone CESM tests
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+-scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130304b
++scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130301
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+Difference in expected fails: this brings the xfail list back to what it
+was in clm4_0_63, with the exception of some SBN IcompsetTests that were
+added to xFail in clm4_0_64:
+@@ -154,12 +154,6 @@
+ scripts issue component not threaded
+ missing finidat file
+ missing finidat file
+-
+- No rule to make target `glc_constants.o/glade/scratch/muszala/ERI.f19_g16.IG1850.yellowstone_pgi.GC.161430/bld/glc/lib/libglimmercismfortran.a', needed by `glc_constants.mod'
+- No rule to make target `glc_constants.o/glade/scratch/muszala/ERI.f19_g16.IG1850.yellowstone_pgi.GC.161430/bld/glc/lib/libglimmercismfortran.a', needed by `glc_constants.mod'
+-
+- ERROR: lnd_prognostic but num_inst_lnd not num_inst_max
+- ERROR: lnd_prognostic but num_inst_lnd not num_inst_max
+
+
+ scripts issue with ocean not threaded
+@@ -170,10 +164,6 @@
+ checkWeights error, probably due to old-format urban on surface dataset
+ Bad compset name: ICNCROP
+ scripts issue with ocean not threaded
+-
+- No rule to make target `glc_constants.o/glade/scratch/muszala/ERI.f19_g16.IG1850.yellowstone_pgi.GC.161430/bld/glc/lib/libglimmercismfortran.a', needed by `glc_constants.mod'
+- No rule to make target `glc_constants.o/glade/scratch/muszala/ERI.f19_g16.IG1850.yellowstone_pgi.GC.161430/bld/glc/lib/libglimmercismfortran.a', needed by `glc_constants.mod'
+- No rule to make target `glc_constants.o/glade/scratch/muszala/ERI.f19_g16.IG1850.yellowstone_pgi.GC.161430/bld/glc/lib/libglimmercismfortran.a', needed by `glc_constants.mod'
+
+
+
+
+
+Machines testing ran on: (Tests in priority order)
+ Standard Tag Pretag *** Standard Tag Posttag **
+
+ build-namelist unit tester: no
+
+ CESM test lists:
+
+ yellowstone/CESM: yes
+ All PASS or xFail (ignoring tput failures)
+ (note that baselines didn't exist in clm4_0_64 for some tests,
+ particularly IG)
+
+ yellowstone/CESM/allIcompsets: no
+
+ test_system testing:
+
+ yellowstone batch: no
+ frankfurt interactive: no
+ yellowstone interactive: no
+ lynx batch: no
+ lynx interactive: no
+ frankfurt batch: no
+
+ test_driver.sh tools testing:
+
+ lynx interactive: no
+ yellowstone interactive: no
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_64
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm4_0_64
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Wed Mar 6 12:41:26 MST 2013
+One-line Summary: update externals. fixes 40/45 intial condition problem
+
+Purpose of changes: Main purpose is to bring in scripts4_130227b so that
+ CLM45 compsets do not use CLM40 initial conditions. Put in
+ PTCLM fix. Secondary purpose is to update other externals.
+ NOTE: This tag only changes externals. No clm
+ code, scripts or xml files were touched.
+
+Requirements for tag: N/A
+
+Test level of tag: critical (only yellowstone, Icompset and aux 40/45 aux tests)
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: Changes in processor count for certain resolutions.
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: the 40/45 fix in scripts4_130227b will fix initial condition problems.
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: muszala, Erik
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ cprnc_120828 -> cprnc_130301
+ scripts4_130207 -> scripts4_130304
+ Machines_130214 -> Machines_130304b
+ rtm1_0_19 -> rtm1_0_20
+ share3_130220 -> share3_130226
+ esmf_wrf_timemgr_120427 -> esmf_wrf_timemgr_130213
+ timing_120731 -> timing_130214
+ mapping_121113b -> mapping_130222
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes: N/A
+
+ M models/lnd/clm/tools/SVN_EXTERNAL_DIRECTORIES
+ M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+ M SVN_EXTERNAL_DIRECTORIES
+
+Machines testing ran on: (Tests in priority order)
+ yellowstone/CESM: yes - looks decent, but not great. There are no new test failures and new tests that do fail are
+ most likely due to new testlists that weren't tested in a clm tag.
+ A few nl comp failures since number of pes changed (expected).
+ Many comparisons failed due to baselines not existing.
+ 5 IG compsets fail due to a linking error.
+ 2 NCK.F10_f10 tests die with "ERROR: lnd_prognostic but num_inst_lnd not num_inst_max"
+
+ yellowstone/CESM/allIcompsets: yes - looks OK. Transient runs added to xFail list as well as SBN.1x1_smallvilleIA.ICLM45CNCROP
+ and SBN.f09_g16.IGCLM45IS2
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_63
+
+Changes answers relative to baseline: Yes. PE counts change plus the initial condition fixes will change answers compared
+ to existing baselines
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ The 40/45 fix should bring this back to being correct (ie. the state before clm4_0_60).
+
+===============================================================
+===============================================================
+Tag name: clm4_0_63
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Mon Mar 4 13:50:15 MST 2013
+One-line Summary: bug 1635 fix - 4_0 CN bug
+
+Purpose of changes: Put back some removed code. This allows CN to run with 4_0
+ beyond one year
+
+Requirements for tag:
+
+Test level of tag: critical
+
+Bugs fixed (include bugzilla ID): 1635
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: Dave Lawrence, Sam Levis
+
+List any svn externals directories updated (csm_share, mct, etc.): N/A
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/src/clm4_0/biogeochem/CNAnnualUpdateMod.F90
+--- models/lnd/clm/src/clm4_0/biogeochem/CNAnnualUpdateMod.F90 (revision 44311)
++++ models/lnd/clm/src/clm4_0/biogeochem/CNAnnualUpdateMod.F90 (working copy)
+@@ -183,6 +183,12 @@
+ call p2c(num_soilc, filter_soilc, annavg_t2m, cannavg_t2m)
+ end if
+
+ + ! column loop
+ + do fc = 1,num_soilc
+ + c = filter_soilc(fc)
+ + if (annsum_counter(c) >= get_days_per_year() * secspday) annsum_counter(c) = 0._r8
+ + end do
+ +
+ end subroutine CNAnnualUpdate
+ !-----------------------------------------------------------------------
+
+
+Machines testing ran on: (Tests in priority order)
+ yellowstone/CESM: yes only 40 list - OK. Matches xFail list
+ yellowstone/CESM/allIcompsets: yes - OK. Matches xFail list
+
+ Also had D. Lawrence look at one 45 run and a 40 run from this tag compared to one from
+ clm4_0_58 (the tag just before this bug was introduced). All three runs were 2 years long.
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_62
+
+Changes answers relative to baseline: No
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+===============================================================
+Tag name: clm4_0_62
+Originator(s): sacks (Bill Sacks,UCAR/CGD,303-497-1762)
+Date: Sun Feb 24 15:27:09 MST 2013
+One-line Summary: add active flags, change subgrid weighting convention, other misc fixes
+
+Purpose of changes:
+
+Main set of changes involves adding 'active' flags at the pft, column & landunit
+levels, saying whether computations should be run over a given point. This
+change involved many changes throughout the code, changing conditionals like 'if
+(pwtgcell(p) > 0)' to 'if (pactive(p))'. The purpose of this change was
+two-fold: (1) make these conditionals less error-prone and more robust to future
+changes in the code: currently, the 'active' condition is: weight > 0 OR type =
+glc_mec -- but sometimes people forgot to include the latter condition, and it
+could get worse moving forwards; (2) make it easy to change the 'active'
+condition in the future -- this now just has to be done in one place, in
+reweightMod.
+
+In changing these conditionals to use the new 'active' flags, I also added or
+removed conditionals in a few places -- see notes below on the individual file
+modifications.
+
+Also, changed subgrid weighting convention, so that the sum of weights always
+adds to 1 at all levels. Previously, there was no fixed convention for the
+weights of, e.g., pfts on a 0-weight column. Now, even on a 0-weight column, the
+sum of pft weights on the column will still add to 1.
+
+Also a number of other miscellaneous fixes:
+- bug-fix in handling of unstructured grids in determining new vs old urban format
+- add some new surface datasets with new urban format
+- other misc. fixes noted below
+
+Requirements for tag:
+ Testing: build-namelist unit tests, yellowstone cesm, yellowstone
+ test_system batch, frankfurt test_system interactive; include
+ component_gen_comp for test_system tests
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: A few new surface datasets (see below)
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: erik
+
+List any svn externals directories updated (csm_share, mct, etc.):
+Main purpose is to update cism (along with necessary scripts & machines
+updates), but also updated other externals to their cesm1_2_beta02 versions:
+- scripts
+- machines
+- cism
+- mct
+- pio
+- csm_share (includes scam update from Erik)
+
+List all files eliminated:
+
+List all files added and what they do:
+
+======= Handles modifications and error-checks related to changing subgrid weights
+======= (note that direct calls to setFilters should no longer be made --
+======= instead, call reweightWrapup in this new module). This adds a routine that confirms
+======= that all subgrid weights add to 1 (from Zack Subin).
+A models/lnd/clm/src/clm4_5/main/reweightMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+======= add 'active' flags; replace use of things like 'if (pwtgcell(p) > 0)'
+======= with 'if (pactive(p))'
+M models/lnd/clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90 ---------- also removed unnecessary conditional
+M models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DUSTMod.F90 ------------ also added a pactive check
+M models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+M models/lnd/clm/src/clm4_5/main/dynlandMod.F90 --------------- also added a new conditional
+M models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+M models/lnd/clm/src/clm4_5/main/histFileMod.F90
+M models/lnd/clm/src/clm4_5/main/controlMod.F90
+M models/lnd/clm/src/clm4_5/main/filterMod.F90
+M models/lnd/clm/src/clm4_5/main/subgridAveMod.F90 ------------ also added a new conditional in p2c_2d_filter
+ (similar to existing conditional in p2c_1d_filter)
+M models/lnd/clm/src/clm4_5/main/clmtype.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90 --- also remove ^M line endings accidentally added in clm4_0_61
+M models/lnd/clm/src/clm4_5/biogeophys/clm_driverInitMod.F90
+
+======= replace calls to setFilters with calls to reweightWrapup; in driver,
+======= moved these calls based on an analysis of where they are needed
+M models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+M models/lnd/clm/src/clm4_5/main/clm_driver.F90
+
+======= change subgrid weighting convention; remove duplicated code in setting
+======= up urban landunits. Note that, in a few places (marked by "TODO WJS") I
+======= assumed an arbitrary weighting for, e.g., pft weights in a 0-weight
+======= landunit. This can be changed in the future once we change how weights
+======= are defined on the surface dataset (using weights on the landunit rather
+======= than on the grid cell).
+M models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+
+======= declare some parameters as 'parameter' (needed in order to use them in
+======= select case statements)
+M models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+
+======= make check for new vs old format more robust; in particular, fix
+======= handling of unstructured grids (before, these were deemed to be
+======= old-format urban files by accident)
+M models/lnd/clm/src/clm4_5/biogeophys/UrbanInputMod.F90
+
+======= use nlevurb=5 even for more_vertlayers (based on suggestion from Keith
+======= and Erik)
+M models/lnd/clm/src/clm4_5/main/clm_varpar.F90
+
+======= use new surface datasets for glcmec 1.9x2.5 1850&2000, and f10 1850, in
+======= order to have valid urban data for some tests to pass
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+
+======= use cism rather than sglc for test_system tests, because sglc means no
+======= sno fields are sent to the coupler, which leads to ERS test failures and
+======= generally weaker tests
+M models/lnd/clm/test/system/config_files/config_CLMtestCompsets.xml
+
+======= add cism1 distinction in compset name
+M models/lnd/clm/test/system/yellowstone.interactive
+M models/lnd/clm/test/system/bluefire.interactive
+M models/lnd/clm/test/system/lynx.batch
+
+======= add call to component_gen_comp
+M models/lnd/clm/test/system/test_system
+
+======= Removed some now-passing tests, including some that were passing earlier
+======= but still remained in this file
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+xFAIL differences:
+Index: models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+===================================================================
+--- models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml (revision 44092)
++++ models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml (working copy)
+@@ -113,8 +113,7 @@
+
+
+
+-
+-
++
+
+
+
+@@ -137,7 +136,6 @@
+
+ scripts issue with ocean not threaded
+ Restart difference
+- Soil balance error on restart
+ scripts issue with ocean not threaded
+
+ scripts issue with ocean not threaded
+@@ -149,20 +147,12 @@
+ missing finidat file
+
+
+- problem building with mpi-serial with pgi compiler
+- missing LAPACK symbol dgbsv
+ scripts issue with ocean not threaded
+- Need LAPACK for PGI (dgbsv)
+- Need LAPACK for PGI (dgbsv)
+ Bad compset name: ICNCROP
+ scripts issue with ocean not threaded
+
+- problem building with mpi-serial with pgi compiler
+- missing LAPACK symbol dgbsv
+- missing LAPACK symbol dgbsv
+ scripts issue with ocean not threaded
+- Need LAPACK for PGI (dgbsv)
+- Need LAPACK for PGI (dgbsv)
++ checkWeights error, probably due to old-format urban on surface dataset
+ Bad compset name: ICNCROP
+ scripts issue with ocean not threaded
+
+
+
+Machines testing ran on: (Tests in priority order)
+ Standard Tag Pretag *** Standard Tag Posttag **
+
+NOTE: Ignoring throughput fails
+
+
+ build-namelist unit tester: yes
+ All PASS or xFAIL
+
+ CESM test lists:
+
+ Note: the following change was made after running the CESM test list (just
+ reran the one affected case: ERI.f19_g16.IG1850CLM45.yellowstone_pgi):
+ In bld/namelist_files/namelist_defaults_clm4_5.xml:
+ -lnd/clm2/surfdata_map/surfdata_1.9x2.5_simyr1850_glcmec10_c120927.nc
+ +lnd/clm2/surfdata_map/surfdata_1.9x2.5_simyr1850_glcmec10_c130221.nc
+
+ yellowstone/CESM: yes
+ All PASS or xFAIL except:
+
+ ***** Expected failure due to urban bug-fix for unstructured grids
+ FAIL ERB.ne30_g16.I_1948-2004_CLM45.yellowstone_intel.GC.051632.compare_hist.clm4_0_61
+
+ ***** memcomp failures probably due to using cism2 code
+ FAIL ERS_D.f19_g16.IGRCP26CN.yellowstone_intel.GC.051626.memcomp.clm4_0_61
+ FAIL ERS_Lm3.f19_g16.IGRCP60CN.yellowstone_intel.GC.051626.memcomp.clm4_0_61
+
+ ***** memcomp failures with unknown cause
+ FAIL ERS_D.f19_g16.IRCP85CN.yellowstone_intel.GC.051626.memcomp.clm4_0_61
+ COMMENT pesmaxmem_incr = 28.2
+
+ yellowstone/CESM/allIcompsets: no
+
+ test_system testing:
+
+ yellowstone batch: yes, including component_gen_comp
+ All PASS or xFAIL except:
+
+ ***** Expected failure due to new surface dataset
+ FAIL ERS_Ld211.f10_f10.ICLM45CNADSPIN.yellowstone_intel_monthly.GC.051756.compare_hist.clm4_0_61_test_system
+
+ ***** Expected failure due to urban bug-fix for unstructured grids
+ FAIL ERS_Ln48_D_P64x1.ne30_g16.ICLM45CN.yellowstone_intel_user_nl_dirs.GC.051756.compare_hist.clm4_0_61_test_system
+
+ ***** memcomp failures with unknown cause
+ FAIL ERS_Ld211.f10_f10.I_2000_CLM45_VOC_CN.yellowstone_intel_voc.GC.051756.memcomp.clm4_0_61_test_system
+ FAIL ERS_Ln48_D_P64x16.ne30_g16.ICN.yellowstone_intel_user_nl_dirs.GC.051756.memcomp.clm4_0_61_test_system
+
+
+ ----- COMPONENT_GEN_COMP RESULTS ---
+ All comparisons PASS except:
+
+ ****** Expected failures due to new surface dataset and fix in urban for
+ ****** ne30 These failures all go away when I compare against one-offs
+ ****** from clm4_0_61 with fixes in surface datasets and the urban ne30
+ ****** bug. However, there is then a diff in the h1 file for the GLCMEC
+ ****** test: diffs just in cols1d_wtlunit & pfts1d_wtlunit, and this is
+ ****** just over glc_mec columns -- this is expected due to changes in
+ ****** subgrid weighting convention
+ FAIL ERS_D.f19_g16.ICLM45GLCMEC.yellowstone_intel_glcMEC.compare_hist.clm4_0_61.clm2.h0
+ FAIL ERS_Ld211.f10_f10.ICLM45CNADSPIN.yellowstone_intel_monthly.compare_hist.clm4_0_61.clm2.h0
+ FAIL ERS_Ld211.f10_f10.ICLM45CNADSPIN.yellowstone_intel_monthly.compare_hist.clm4_0_61.clm2.h1
+ FAIL ERS_Ln48_D_P64x1.ne30_g16.ICLM45CN.yellowstone_intel_user_nl_dirs.compare_hist.clm4_0_61.clm2.h0
+ FAIL ERS_Ln48_D_P64x1.ne30_g16.ICLM45CN.yellowstone_intel_user_nl_dirs.compare_hist.clm4_0_61.clm2.h1
+
+ ****** Differences just over crop landunits: RMS diffs in pft weights on
+ ****** col and landunit, and col weights on landunit; and FILLDIFFs in 12
+ ****** column-level variables (now _FillValue in 0-weight places). These
+ ****** differences aren't surprising given the changes in subgrid weight
+ ****** convention and the fact that inactive points are now given spval in
+ ****** 1-d output
+ FAIL ERS.f19_g16.I_2000_CLM45_CN4Me_CROP.yellowstone_intel_user_nl_dirs.compare_hist.clm4_0_61.clm2.h1
+
+
+ frankfurt interactive: yes, including component_gen_comp
+ All PASS or xFAIL
+
+ yellowstone interactive: no
+ lynx batch: no
+ lynx interactive: no
+ frankfurt batch: no
+
+ test_driver.sh tools testing:
+
+ lynx interactive: no
+ yellowstone interactive: no
+
+ yellowstone/PTCLM: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_61
+
+Changes answers relative to baseline: yes, in limited cases - see below
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CLM4.5 with the following:
+ - unstructured grids, due to urban bug fix
+ - glcmec @ 1.9x2.5, due to new surface datasets with new urban
+ - 1850 @ f10, due to new surface dataset
+ - what platforms/compilers: ALL
+ - nature of change: larger than roundoff/same climate OR new climate (not
+ investigated carefully)
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: NOT DONE
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_0_61
+Originator(s): muszala (Stefan Muszala,UCAR/CGD,303-497-1320)
+Date: Wed Feb 20 15:53:38 MST 2013
+One-line Summary: rtm, drv and clm mods: tws, Volr, r01 rdric file and SoilHydroMod
+
+Purpose of changes: Bring Volr from RTM to CLM.
+ New ne120 files.
+ New SoidHydrologyMod file for 45 (not bit-for-bit)
+ Bring tws in.
+ Sacks test list change and test_system change.
+ Add yellowstone to xFail options.
+ Added RTM test list for test_system tests (yellowstone.rtm.batch)
+ DEPRECATE WT in 4_5 code. WT and the variable wt are left in since they are used in other
+ portions of the code, but they are marked as deprecated since we now have TWS.
+
+Requirements for tag:
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: muszala, swenseon, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ drv: update from drvseq4_2_20 to drvseq4_2_22
+ rtm: update from rtm1_0_18 to rtm1_0_19
+ csm_share: update from share3_130213 to share3_130131
+
+List all files eliminated:
+D https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOn
+D https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmR01
+
+List all files added and what they do:
+ Added RTM rtm test_system tests
+A https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOnFloodOnEffvelOff/user_nl_rtm
+A https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOnFloodOnEffvelOff
+A https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOnFloodOnEffvelOn/user_nl_rtm
+A https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOnFloodOnEffvelOn
+A https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOnIceOff/user_nl_rtm
+A https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOnIceOff
+A https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOnIceOn/user_nl_rtm
+A https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOnIceOn
+A https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/yellowstone.rtm.batch
+
+List all existing files that have been modified, and describe the changes:
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/test_system
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/config_files/config_CLMtestCompsets.xml
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/yellowstone.interactive
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOff/user_nl_rtm
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/README
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/test/system/yellowstone.batch
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/bld/unit_testers/xFail/expectedFail.pm
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/doc/ChangeLog
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/doc/ChangeSum
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/src/cpl_share/clm_cpl_indices.F90
+MM https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90
+MM https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/src/clm4_5/main/clmtype.F90
+MM https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/models/lnd/clm/src/clm4_0/main/clm_atmlnd.F90
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/SVN_EXTERNAL_DIRECTORIES
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/ChangeLog
+M https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_60/ChangeSum
+
+
+Machines testing ran on: (Tests in priority order)
+ NOTE: Lots of throughput, NLComp and memcomp fails. Also ran rtm test_system tests
+
+ build-namelist unit tester: OK (yellowstone) - added two tests that were missing from xFail list
+
+ CESM test lists:
+
+ yellowstone/CESM:
+ -> 4_0 testing: OK. Removed tests in xFail file and ignoring NLComp tests. Remaining Fails (tputcomp and memcomp) will be ignored since test tolerences are too narrow.
+
+ -> 4_5 testing: OK. There will be B4B differences due to a new SoilHydrologyMod which are listed below
+
+ FAILs due to new SoilHydroMod and r2x_Slrr_volr in coupler
+
+ FAIL SMS_RLA.f45_f45.ICLM45.yellowstone_intel.GC.161028.compare_hist.clm4_0_60
+ FAIL ERS_D.f10_f10.ICLM45.yellowstone_intel.GC.161028.compare_hist.clm4_0_60
+ FAIL NCK.f10_f10.ICLM45.yellowstone_intel.GC.161028.compare_hist.clm4_0_60
+ FAIL ERB.ne30_g16.I_1948-2004_CLM45.yellowstone_intel.GC.161028.compare_hist.clm4_0_60
+ FAIL ERH_D.f19_g16.I1850CLM45CN.yellowstone_intel.GC.161028.compare_hist.clm4_0_60
+ FAIL SMS.1x1_mexicocityMEX.I1PTCLM45.yellowstone_intel.GC.161028.compare_hist.clm4_0_60
+ FAIL ERS_Lm3.1x1_vancouverCAN.I1PTCLM45.yellowstone_intel.GC.161028.compare_hist.clm4_0_60
+
+ yellowstone/CESM/allIcompsets: OK
+
+ test_system testing:
+
+ yellowstone batch:
+ OK. FAILs due to new SoilHydroMod and r2x_Slrr_volr in coupler
+
+ FAIL ERS.f19_g16.I_1850_CLM45_CN4Me.user_nl_dirs.GC.114029.compare_hist.clm4_0_60
+ FAIL ERS_D.f19_g16.I_1850_CLM45_CNCENTNoMe.user_nl_dirs.GC.114029.compare_hist.clm4_0_60
+ FAIL ERS.f19_g16.I_2000_CLM45_CN4Me_CROP.user_nl_dirs.GC.114029.compare_hist.clm4_0_60
+ FAIL ERS_Ld211.f10_f10.ICLM45CNADSPIN.monthly.GC.114029.compare_hist.clm4_0_60
+ FAIL ERS_Ln48_D_P64x1.ne30_g16.ICLM45CN.user_nl_dirs.GC.114029.compare_hist.clm4_0_60
+ FAIL ERS_Ld211_D_P224x1.f10_f10.ICLM45CNCROP.crop.GC.114029.compare_hist.clm4_0_60
+ FAIL ERS_Ld211_P384x1.f19_g16.ICLM45CNDVCROP.crop.GC.114029.compare_hist.clm4_0_60
+ FAIL ERS_Ld211.f10_f10.I_2000_CLM45_VOC_CN.voc.GC.114029.compare_hist.clm4_0_60
+ FAIL ERS_Ln48_D.f10_f10.I_2000_CLM45_CN.ciso.GC.114029.compare_hist.clm4_0_60
+
+ frankfurt interactive:
+ OK. FAILs due to new SoilHydroMod and r2x_Slrr_volr in coupler
+
+ FAIL SMS_D_Mmpi-serial.1x1_vancouverCAN.ICLM451PT.frankfurt_intel_user_nl_dirs.GC.104908.compare_hist.clm4_0_60
+ FAIL ERS_Mmpi-serial.1x1_mexicocityMEX.ICLM451PT.frankfurt_intel_user_nl_dirs.GC.104908.compare_hist.clm4_0_60
+
+ yellowstone interactive:
+ OK. FAILs due to new SoilHydroMod and r2x_Slrr_volr in coupler
+
+ FAIL ERS_D_Mmpi-serial.CLM_USRDAT.ICLM45alaskaCN.yellowstone_intel_user_nl_dirs.GC.114053.compare_hist.clm4_0_60
+ FAIL ERS_D_Mmpi-serial.CLM_USRDAT.ICLM45USUMB.yellowstone_intel_user_nl_dirs.GC.114053.compare_hist.clm4_0_60
+ FAIL ERS_D_P1x1_Mmpi-serial.f19_g16.I20TR_CLM45VSCN.yellowstone_intel_voc.GC.114053.compare_hist.clm4_0_60
+ FAIL ERS_D_P1x1_Mmpi-serial.5x5_amazon.I_2000_CLM45.yellowstone_intel_user_nl_dirs.GC.114053.compare_hist.clm4_0_60
+ FAIL ERS_Ln48_D_P1x1_Mmpi-serial.f45_g37.ICLM45VOC.yellowstone_intel_voc.GC.114053.compare_hist.clm4_0_60
+ FAIL ERS_Ly3_Mmpi-serial.1x1_brazil.I_2000_CLM45.yellowstone_intel_monthly.GC.114053.compare_hist.clm4_0_60
+ FAIL ERS_Ld211_Mmpi-serial.1x1_brazil.I_2000_CLM45_CN.yellowstone_intel_monthly.GC.114053.compare_hist.clm4_0_60
+ FAIL ERS_Ly3_Mmpi-serial.1x1_brazil.I_2000_CLM45_CNDV.yellowstone_intel_monthly.GC.114053.compare_hist.clm4_0_60
+ FAIL ERS_Ld211_Mmpi-serial.1x1_brazil.IVSCN.yellowstone_intel_voc.GC.114053.compare_hist.clm4_0_60
+
+ frankfurt batch:
+ OK. FAILs due to new SoilHydroMod and r2x_Slrr_volr in coupler
+
+ FAIL ERS_D_P16x1.f19_g16.I_1850_CLM45_CN4Me.frankfurt_intel_user_nl_dirs.GC.104516.compare_hist.clm4_0_60
+ FAIL ERI_P16x1.f19_g16.I_1850_CLM45_CNCENTNoMe.frankfurt_intel_user_nl_dirs.GC.104516.compare_hist.clm4_0_60
+ FAIL ERS_Ld211_P16x1.f10_f10.ICLM45CNADSPIN.frankfurt_intel_monthly.GC.104516.compare_hist.clm4_0_60
+ FAIL ERS_P16x1.f19_g16.I_1850_CLM45_CN4Me_LessSPIN.frankfurt_intel_user_nl_dirs.GC.104516.compare_hist.clm4_0_60
+ FAIL SMS_D_P16x1.f19_g16.I_1850_CLM45_CN4Me_EXLessSPIN.frankfurt_intel_user_nl_dirs.GC.104516.compare_hist.clm4_0_60
+ FAIL SMS_D_P16x1.f10_f10.I_2000_CLM45_CN4Me.frankfurt_intel_vrtlay.GC.104516.compare_hist.clm4_0_60
+ FAIL ERS_D_P16x1.f19_g16.I_2000_CLM45_CN.frankfurt_intel_user_nl_dirs.GC.104516.compare_hist.clm4_0_60
+ FAIL ERI_P16x1.f19_g16.I_2000_CLM45_CN.frankfurt_intel_user_nl_dirs.GC.104516.compare_hist.clm4_0_60
+ FAIL SMS_D_P16x1.f19_g16.ICLM45CNEXSPIN.frankfurt_intel_user_nl_dirs.GC.104516.compare_hist.clm4_0_60
+
+CLM tag used for the baseline comparison tests if applicable: CLM4_0_60
+
+Changes answers relative to baseline:
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: 4_5 code
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ 1) SoilHydrologyMod.F90 mods from Swenson effect soil moisture which are effecting certain l2x coupler fields
+ 2) bringing VOLR from RTM through the coupler adds one more field to coupler history files.
+
+===============================================================
+===============================================================
+Tag name: clm4_0_60
+Originator(s): erik (Erik Kluzek)
+Date: Mon Feb 11 03:55:56 MST 2013
+One-line Summary: Bring CLM4.5 code from clm45sci branch to trunk as an option set at configure time
+
+Purpose of changes:
+
+Bring in CLM4.5 branch as additional directories. Change directory structure, so there are shared files
+and utilities for both CLM4.0 and CLM4.5 and files that are different for each. Update compsets in
+scripts in order to work in this paradigm. Move clm45sci15_clm4_0_58 code to trunk under clm4_5 phys.
+
+clm4.5 includes the following:
+
+* Bring LBNL-merge branch on with: vertical soil, Methane, CENTURY, split nitrification, new-lake model.
+* Modifications to GPP, on gppdev branch, multilayer canopy and then single-layer version that reproduces it.
+* Crop model updates. Irrigation included with crop model as an option. Fix CNDV-CROP.
+* Urban model updates, multi-density, urban depth seperate from soil depth, wasteheat to zero
+* Bring in permafrostsims09 branch with Sean Swensons's flooding changes.
+* Update pft-physiology file, change some CN defaults, change min flow slightly in RTM
+* Set ponding to zero, acclimation mods from Keith Oleson, a hydrology change from Sean Swenson.
+
+Requirements for tag: clm40/clm45 code/tools work/tested, answers same, complete move from bluefire to yellowstone
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID):
+ 1621 (normalization issue in mksurfdata_map and clm -- partial)
+ 1604 (The -co2_type flag in the CLM namelist is not set correct.)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+
+ Compsets to run CLM4.5 added.
+
+I_2000_CLM45 (ICLM45)
+I_2000_1PTFRC_CLM45 (I1PTCLM45)
+I_2000_GLC_CLM45_CISM1 (IGCLM45)
+I_2000_GLC_CLM45_CISM2P (IGCLM45IS2)
+I_2000_CLM45_CN (ICLM45CN)
+I_2000_CLM45_CN_CROP (ICLM45CNCROP)
+I_2000_CLM45_CN_4Me (ICLM45CN4Me)
+I_2000_CRUFRC_CLM45 (ICRUCLM45)
+I_2000_CRUFRC_CLM45_CN (ICRUCLM45CN)
+I_2000_CRUFRC_CLM45_CN_4Me (ICRUCLM45CN4Me)
+I_2000_CLM45_CN_GLC_CISM1 (IGCLM45CN)
+I_1850_CLM45 (I1850CLM45)
+I_1850_CLM45_CN_4Me (I1850CLM45CN4Me)
+I_1850_CRUFRC_CLM45 (I1850CRUCLM45)
+I_1850_CRUFRC_CLM45_CN (I1850CRUCLM45CN)
+I_1850_CRUFRC_CLM45_CN_4Me (I1850CRUCLM45CN4Me)
+I_1850_CLM45_GLC_CISM1 (IG1850CLM45)
+I_1850_CLM45_CN (I1850CLM45CN)
+I_1850-2000_CLM45 (I20TRCLM45)
+I_1850-2000_CLM45_CN (I20TRCLM45CN)
+I_1850-2000_CRUFRCCLM45 (I20TRCRUCLM45)
+I_1850-2000_CRUFRC_CLM45_CN (I20TRCRUCLM45CN)
+I_1850-2000_CRUFRC_CLM45_CN_4Me (I20TRCRU4MeCLM45)
+I_1850-2000_CLM45_GLC_CISM1 (IG20TRCLM45)
+I_1850-2000_CLM45_CN_GLC_CISM1 (IG20TRCLM45CN)
+I_1948-2004_CLM45 (I4804CLM45)
+I_1948-2004_CLM45_GLC_CISM1 (IG4804CLM45)
+I_1948-2004_CLM45_CN_GLC_CISM1 (IG4804CLM45CN)
+I_RCP8.5_CLM45_CN_GLC_CISM1 (IGRCP85CLM45CN)
+I_RCP6.0_CLM45_CN (IRCP60CLM45CN)
+I_RCP6.0_CLM45_CN_GLC_CISM1 (IGRCP60CLM45CN)
+I_RCP4.5_CLM45_CN (IRCP45CLM45CN)
+I_RCP4.5_CLM45_CN_GLC_CISM1 (IGRCP45CNCLM45)
+I_RCP2.6_CLM45_CN (IRCP26CLM45CN)
+I_RCP2.6_CLM45_CN_GLC_CISM1 (IGRCP26CLM45CN)
+I_RCP8.5_CLM45_CN (IRCP85CLM45CN)
+I_1850_SPINUP_3HrWx_CLM45_CN_4Me
+
+ CLM configure changes:
+
+ Add physics option to determine if CLM4.0 or CLM4.5 physics is used:
++ -phys Value of clm4_0 or clm4_5 (default is clm4_0)
+
+ Options removed
+
+ -pergro
+ -c13
+
+ Options added for CLM4.5 physics:
+
++ -clm4me Turn Methane model: [on | off]
++ Requires bgc=cn/cndv (Carbon Nitrogen model)
++ (ONLY valid for CLM4.5!)
++ -exlaklayers Turn on extra lake layers (25 layers instead of 10) [on | off]
++ (ONLY valid for CLM4.5!)
++ -vsoilc_centbgc Turn on vertical soil Carbon profile, CENTURY model decomposition,
++ split Nitrification/de-Nitrification into two mineral
++ pools for NO3 and NH4 (requires clm4me Methane model), and
++ eliminate inconsistent duplicate soil hydraulic
++ parameters used in soil biogeochem.
++ (requires either CN or CNDV)
++ (ONLY valid for CLM4.5!)
++ [on,off or colen delimited list of no options] (default off)
++ no-vert Turn vertical soil Carbon profile off
++ no-cent Turn CENTURY off
++ no-nitrif Turn the Nitrification/denitrification off
++ no-stnd-bsw Turn the standard BSW for soil psi off
++ [no-vert,no-cent,no-nitrif,no-stnd-bsw,
++ no-vert:no-cent,no-nitrif:no-stnd-bsw,
++ no-vert:no-cent:no-stnd-bsw]
+
+ New spinup options added for CLM4.5 physics (but are now deprecated and NOT recommended for use)
+
++ Enter-AD Turn on Accelerated Decomposition from (6)
++ existing initial conditions (optional) (deprecated)
++ (ONLY valid for CLM4.5!)
++ AD2Lesser Jump from full AD to lesser AD spinup (optional) (4)
++ (deprecated) (ONLY valid for CLM4.5!)
++ LesserAD Lesser Accelerated Decomposition mode (3)
++ (deprecated) (ONLY valid for CLM4.5!)
++ LesserAD-exit Jump from lesser AD to normal mode (1)
++ (deprecated) (ONLY valid for CLM4.5!)
++ Two sequences are valid: 6-5-4-3-1-0 or 6-5-2-0 (where 6 and 4 are optional)
++ The recommended sequence is 5-2-0
+
+
+
+Describe any changes made to the namelist:
+ Extensive list of new namelist options for CLM4.5 physics
+
+List any changes to the defaults for the boundary datasets:
+ Extensive list of new datasets for CLM4.5
+ Add 360x720 grid (hcru_hcru) for CLM4.0 physics
+
+Describe any substantial timing or memory changes: CLM4.0 -- identical to clm4_0_59
+ CLM4.5 -- identical to clm45sci15_clm4_0_58
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): to cesm1_2_alpha02a versions
+
+ scripts to scripts4_130204
+ Machines to Machines_130204
+ drv to drvseq4_2_18
+ datm to datm8_130130
+ rtm to rtm1_0_18
+ cism to 45merge_02_cism1_121114
+ csm_share to share3_130131
+ pio to pio1_6_1
+ mapping/gen_domain to mapping_121113b
+
+List all files eliminated:
+
+============== Eliminate PERGRO option, remove duplicated tools from clm4_0, change names to include clm4_0
+D models/lnd/clm/test/system/nl_files/mksrfdt_1x1_numaIA_mp20irrcr_2000
+D models/lnd/clm/test/system/nl_files/mksrfdt_10x15_irr_1850
+D models/lnd/clm/tools/mkmapdata/* ---------> remove
+D models/lnd/clm/tools/mkprocdata_map/* ----> remove
+D models/lnd/clm/tools/ncl_scripts/* -------> remove
+D models/lnd/clm/tools/interpinic/* --------> move to under clm4_0
+D models/lnd/clm/tools/mkmapgrids/* --------> remove
+D models/lnd/clm/tools/mksurfdata_map/* ----> move to under clm4_0
+D models/lnd/clm/bld/namelist_files/namelist_definition.xml ---> use clm4_5 version
+D models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -> move to name with clm4_0
+D models/lnd/clm/bld/namelist_files/use_cases/pergro_pd.xml ---> Remove PERGRO option
+D models/lnd/clm/bld/namelist_files/use_cases/pergro0_pd.xml --> Remove PERGRO option
+D models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml -> move to name with clm4_0
+
+List all files added and what they do:
+
+============== testing for clm4_5 and change name of some tests to include clm4_0
+A + models/lnd/clm/test/system/config_files/gen_domain
+A + models/lnd/clm/test/system/tests_posttag_frankfurt_nompi
+A + models/lnd/clm/test/system/user_nl_dirs/anoxia_wtsat
+A + models/lnd/clm/test/system/user_nl_dirs/anoxia_wtsat/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/vrtlay
+A + models/lnd/clm/test/system/user_nl_dirs/vrtlay/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/oldhyd
+A + models/lnd/clm/test/system/user_nl_dirs/oldhyd/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/ch4_set2_ciso
+A + models/lnd/clm/test/system/user_nl_dirs/ch4_set2_ciso/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/ch4_set3_pftroot
+A + models/lnd/clm/test/system/user_nl_dirs/ch4_set3_pftroot/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/rootlit
+A + models/lnd/clm/test/system/user_nl_dirs/rootlit/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/ciso
+A + models/lnd/clm/test/system/user_nl_dirs/ciso/user_nl_clm
+A + models/lnd/clm/test/system/nl_files/nl_ch4_set2_ciso
+A + models/lnd/clm/test/system/nl_files/nl_ch4_set3_pftroot
+A + models/lnd/clm/test/system/nl_files/mksrfdt_10x15_1850
+A + models/lnd/clm/test/system/nl_files/nl_rootlit
+A + models/lnd/clm/test/system/nl_files/nl_ciso
+A + models/lnd/clm/test/system/nl_files/nl_anoxia_wtsat
+A + models/lnd/clm/test/system/nl_files/clm4_0_mksrfdt_1x1_numaIA_mp20irrcr_2000
+A + models/lnd/clm/test/system/nl_files/clm4_0_mksrfdt_10x15_irr_1850
+A + models/lnd/clm/test/system/nl_files/mksrfdt_1x1_numaIA_mp24_2000
+A + models/lnd/clm/test/system/nl_files/nl_vrtlay
+A + models/lnd/clm/test/system/nl_files/nl_oldhyd
+============== clm4_5 version of tools (from clm45sci15_clm4_0_58)
+A + models/lnd/clm/tools/clm4_5
+A + models/lnd/clm/tools/clm4_5/mkmapdata
+A + models/lnd/clm/tools/clm4_5/mkmapdata/mvNimport.sh
+A + models/lnd/clm/tools/clm4_5/mkmapdata/rmdups.ncl
+A + models/lnd/clm/tools/clm4_5/mkmapdata/regridbatch.sh
+A + models/lnd/clm/tools/clm4_5/mkmapdata/mkmapdata.sh
+A + models/lnd/clm/tools/clm4_5/mkmapdata/mkunitymap.ncl
+A + models/lnd/clm/tools/clm4_5/mkmapdata/mknoocnmap.pl
+A + models/lnd/clm/tools/clm4_5/mkmapdata/README
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/clm4054_ne30g16_I2000.clm2.h0.2000-01_c121107.nc
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/map_ne30np4_nomask_to_fv1.9x2.5_nomask_aave_da_c121107.nc
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/mkprocdata_map_functions.bash
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/mkprocdata_map.F90
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/gridmapMod.F90
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/constMod.F90
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/Makefile.common
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/fmain.F90
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/shr_file_mod.F90
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/nanMod.F90
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/Mkdepends
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/Srcfiles
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/Filepath
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/Makefile
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/fileutils.F90
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/src/shr_kind_mod.F90
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/mkprocdata_map_in
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/mkprocdata_map_all
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/clm
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/mkprocdata_map_wrap
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/clm4054_f19g16_I2000.clm2.h0.2000-01_c121107.nc
+A + models/lnd/clm/tools/clm4_5/mkprocdata_map/README
+A + models/lnd/clm/tools/clm4_5/ncl_scripts
+A + models/lnd/clm/tools/clm4_5/ncl_scripts/cprnc.pl
+A + models/lnd/clm/tools/clm4_5/ncl_scripts/getco2_historical.ncl
+A + models/lnd/clm/tools/clm4_5/ncl_scripts/cprnc.ncl
+A + models/lnd/clm/tools/clm4_5/ncl_scripts/getregional_datasets.pl
+A + models/lnd/clm/tools/clm4_5/ncl_scripts/getregional_datasets.ncl
+A + models/lnd/clm/tools/clm4_5/ncl_scripts/README
+A + models/lnd/clm/tools/clm4_5/interpinic
+A + models/lnd/clm/tools/clm4_5/interpinic/clmi.BCN.1949-01-01_10x15_USGS_simyr1850_c121113.nc
+A + models/lnd/clm/tools/clm4_5/interpinic/interpinic.runoptions
+A + models/lnd/clm/tools/clm4_5/interpinic/src
+A + models/lnd/clm/tools/clm4_5/interpinic/src/interpinic.F90
+A + models/lnd/clm/tools/clm4_5/interpinic/src/shr_infnan_mod.F90
+A + models/lnd/clm/tools/clm4_5/interpinic/src/Makefile.common
+A + models/lnd/clm/tools/clm4_5/interpinic/src/shr_isnan.c
+A + models/lnd/clm/tools/clm4_5/interpinic/src/shr_sys_mod.F90
+A + models/lnd/clm/tools/clm4_5/interpinic/src/fmain.F90
+A + models/lnd/clm/tools/clm4_5/interpinic/src/shr_log_mod.F90
+A + models/lnd/clm/tools/clm4_5/interpinic/src/Mkdepends
+A + models/lnd/clm/tools/clm4_5/interpinic/src/Srcfiles
+A + models/lnd/clm/tools/clm4_5/interpinic/src/shr_isnan.h
+A + models/lnd/clm/tools/clm4_5/interpinic/src/Filepath
+A + models/lnd/clm/tools/clm4_5/interpinic/src/Makefile
+A + models/lnd/clm/tools/clm4_5/interpinic/src/shr_kind_mod.F90
+A + models/lnd/clm/tools/clm4_5/interpinic/src/shr_const_mod.F90
+A + models/lnd/clm/tools/clm4_5/interpinic/README
+A + models/lnd/clm/tools/clm4_5/mkmapgrids
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/Makefile.common
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/domainMod.F90
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/shr_sys_mod.F90
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/shr_file_mod.F90
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/nanMod.F90
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/shr_log_mod.F90
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/Mkdepends
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/Srcfiles
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/mkmapgrids.F90
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/Filepath
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/Makefile
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/src/shr_kind_mod.F90
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/mkmapgrids.namelist
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/mkscripgrid.ncl
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/mkmapgrids.csh
+A + models/lnd/clm/tools/clm4_5/mkmapgrids/README
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkvarctl.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkncdio.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/clm_varpar.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkutilsMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/shr_file_mod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/shr_timer_mod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/shr_log_mod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkindexmapMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mklaiMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mksoilMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/fileutils.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/shr_const_mod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkharvestMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkfileMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkurbanparCommonMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/Makefile
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/shr_string_mod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkglcmecMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkvarpar.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/clm_varctl.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/shr_sys_mod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkvocefMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkdomainMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/Filepath
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mksurfdat.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/shr_kind_mod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkurbanparMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/Makefile.common
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkgridmapMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/nanMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/Srcfiles
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/Mkdepends
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mklanwatMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/src/mkpftMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_lookup_2d_netcdf.nc
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkncdio.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkutilsMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkindexmapMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/Srcfiles
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mksurfdata_map.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/Filepath
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/Makefile
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/README
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/unit_testers/test_mkurbanparMod.F90
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata_map.namelist
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/mksurfdata.pl
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/README
+A + models/lnd/clm/tools/clm4_5/mksurfdata_map/pftdyn_hist_simyr1850-2005.txt
+============== clm4_0 version of tools
+A + models/lnd/clm/tools/clm4_0
+A + models/lnd/clm/tools/clm4_0/interpinic
+A + models/lnd/clm/tools/clm4_0/interpinic/clmi.BCN.1949-01-01_10x15_USGS_simyr1850_c100322.nc
+A + models/lnd/clm/tools/clm4_0/interpinic/interpinic.runoptions
+A + models/lnd/clm/tools/clm4_0/interpinic/src
+A + models/lnd/clm/tools/clm4_0/interpinic/src/interpinic.F90
+A + models/lnd/clm/tools/clm4_0/interpinic/src/Makefile.common
+A + models/lnd/clm/tools/clm4_0/interpinic/src/shr_sys_mod.F90
+A + models/lnd/clm/tools/clm4_0/interpinic/src/fmain.F90
+A + models/lnd/clm/tools/clm4_0/interpinic/src/shr_log_mod.F90
+A + models/lnd/clm/tools/clm4_0/interpinic/src/Mkdepends
+A + models/lnd/clm/tools/clm4_0/interpinic/src/Srcfiles
+A + models/lnd/clm/tools/clm4_0/interpinic/src/Filepath
+A + models/lnd/clm/tools/clm4_0/interpinic/src/Makefile
+A + models/lnd/clm/tools/clm4_0/interpinic/src/shr_kind_mod.F90
+A + models/lnd/clm/tools/clm4_0/interpinic/src/shr_const_mod.F90
+A + models/lnd/clm/tools/clm4_0/interpinic/README
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkvarctl.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkncdio.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/clm_varpar.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkutilsMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/shr_file_mod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/shr_timer_mod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/shr_log_mod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkindexmapMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mklaiMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mksoilMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/fileutils.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/shr_const_mod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkurbanparDomMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkharvestMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkfileMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkurbanparCommonMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/Makefile
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/shr_string_mod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkglcmecMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkvarpar.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/clm_varctl.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/shr_sys_mod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkvocefMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkdomainMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/Filepath
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mksurfdat.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkurbanparAvgMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/shr_kind_mod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/Makefile.common
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkgridmapMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/nanMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/Mkdepends
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/Srcfiles
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mklanwatMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/src/mkpftMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/test_lookup_2d_netcdf.nc
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/test_mkncdio.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/test_mkutilsMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/test_mkindexmapMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/Srcfiles
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/test_mksurfdata_map.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/test_mod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/Filepath
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/Makefile
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/README
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/unit_testers/test_mkurbanparDomMod.F90
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/mksurfdata_map.namelist
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/mksurfdata.pl
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/README
+A + models/lnd/clm/tools/clm4_0/mksurfdata_map/pftdyn_hist_simyr1850-2005.txt
+A + models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0_tools.xml
+A + models/lnd/clm/bld/namelist_files/namelist_definition_clm4_0.xml
+A + models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5_tools.xml
+A + models/lnd/clm/bld/namelist_files/namelist_definition_clm4_5.xml
+A + models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_0.xml
+A + models/lnd/clm/bld/namelist_files/namelist_defaults_clm4_5.xml
+============== clm4_5 version of source (from clm45sci15_clm4_0_58)
+A + models/lnd/clm/src/clm4_5
+A + models/lnd/clm/src/clm4_5/biogeochem
+A + models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate2Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNGapMortalityMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNNitrifDenitrifMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNGRespMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CropRestMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/initch4Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate1Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNBalanceCheckMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate3Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNFireMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNMRespMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeMod_BGC.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/MEGANFactorsMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNVerticalProfileMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/ch4RestMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNWoodProductsMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNCIsoFluxMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNPrecisionControlMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/ch4Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNSummaryMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/DUSTMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNDVLightMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNPhenologyMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/ch4varcon.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/STATICEcosysDynMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNDecompMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate1Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNDVMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNCStateUpdate3Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/VOCEmissionMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNrestMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNDVEcosystemDynIniMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNAnnualUpdateMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNNStateUpdate2Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNDecompCascadeMod_CENTURY.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNNDynamicsMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNAllocationMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/DryDepVelocity.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNEcosystemDynMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNSetValueMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNVegStructUpdateMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNSoilLittVertTranspMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNDVEstablishmentMod.F90
+A + models/lnd/clm/src/clm4_5/biogeochem/CNC14DecayMod.F90
+A + models/lnd/clm/src/clm4_5/main
+A + models/lnd/clm/src/clm4_5/main/clm_varcon.F90
+A + models/lnd/clm/src/clm4_5/main/clm_varpar.F90
+A + models/lnd/clm/src/clm4_5/main/CNiniTimeVar.F90
+A + models/lnd/clm/src/clm4_5/main/dynlandMod.F90
+A + models/lnd/clm/src/clm4_5/main/clm_initializeMod.F90
+A + models/lnd/clm/src/clm4_5/main/subgridRestMod.F90
+A + models/lnd/clm/src/clm4_5/main/clm_glclnd.F90
+A + models/lnd/clm/src/clm4_5/main/subgridMod.F90
+A + models/lnd/clm/src/clm4_5/main/accFldsMod.F90
+A + models/lnd/clm/src/clm4_5/main/clmtypeInitMod.F90
+A + models/lnd/clm/src/clm4_5/main/pftdynMod.F90
+A + models/lnd/clm/src/clm4_5/main/iniTimeConst.F90
+A + models/lnd/clm/src/clm4_5/main/histFileMod.F90
+A + models/lnd/clm/src/clm4_5/main/pft2colMod.F90
+A + models/lnd/clm/src/clm4_5/main/clm_atmlnd.F90
+A + models/lnd/clm/src/clm4_5/main/findHistFields.pl
+A + models/lnd/clm/src/clm4_5/main/restFileMod.F90
+A + models/lnd/clm/src/clm4_5/main/controlMod.F90
+A + models/lnd/clm/src/clm4_5/main/initSurfAlbMod.F90
+A + models/lnd/clm/src/clm4_5/main/filterMod.F90
+A + models/lnd/clm/src/clm4_5/main/clm_varctl.F90
+A + models/lnd/clm/src/clm4_5/main/clm_driver.F90
+A + models/lnd/clm/src/clm4_5/main/subgridAveMod.F90
+A + models/lnd/clm/src/clm4_5/main/initGridCellsMod.F90
+A + models/lnd/clm/src/clm4_5/main/CNiniSpecial.F90
+A + models/lnd/clm/src/clm4_5/main/pftvarcon.F90
+A + models/lnd/clm/src/clm4_5/main/surfrdMod.F90
+A + models/lnd/clm/src/clm4_5/main/inicPerpMod.F90
+A + models/lnd/clm/src/clm4_5/main/clmtype.F90
+A + models/lnd/clm/src/clm4_5/main/histFldsMod.F90
+A + models/lnd/clm/src/clm4_5/main/mkarbinitMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys
+A + models/lnd/clm/src/clm4_5/biogeophys/BalanceCheckMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SoilTemperatureMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SLakeFluxesMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/UrbanInputMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SnowHydrologyMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics1Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/FrictionVelocityMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/TridiagonalMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SLakeHydrologyMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/Hydrology1Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/BiogeophysRestMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/UrbanMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/ActiveLayerMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/QSatMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/clm_driverInitMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SLakeTemperatureMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/BareGroundFluxesMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SNICARMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/CanopyFluxesMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SurfaceRadiationMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/initSLakeMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/H2OSfcMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/Biogeophysics2Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/FracWetMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/UrbanInitMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SLakeRestMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SLakeCon.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SurfaceAlbedoMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/Hydrology2Mod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/BandDiagonalMod.F90
+A + models/lnd/clm/src/clm4_5/biogeophys/SoilHydrologyMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+M models/lnd/clm/test/system/TCBCFGtools.sh
+M models/lnd/clm/test/system/frankfurt.batch
+M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+M models/lnd/clm/test/system/config_files/config_CLMtestCompsets.xml
+M models/lnd/clm/test/system/TSMncl_tools.sh
+M models/lnd/clm/test/system/TBLCFGtools.sh
+M models/lnd/clm/test/system/README.testnames
+M models/lnd/clm/test/system/tests_posttag_yong
+M models/lnd/clm/test/system/yellowstone.interactive
+M models/lnd/clm/test/system/TCBtools.sh
+M models/lnd/clm/test/system/test_driver.sh
+M models/lnd/clm/test/system/shortlist.interactive
+M models/lnd/clm/test/system/tests_pretag_yellowstone_nompi
+M models/lnd/clm/test/system/bluefire.batch
+M models/lnd/clm/test/system/frankfurt.interactive
+M models/lnd/clm/test/system/TSMscript_tools.sh
+M models/lnd/clm/test/system/tests_posttag_mirage
+M models/lnd/clm/test/system/gen_test_table.sh
+M models/lnd/clm/test/system/nl_files/gen_domain.ne30.runoptions
+M models/lnd/clm/test/system/nl_files/gen_domain.T31.runoptions
+M models/lnd/clm/test/system/TOPtools.sh
+M models/lnd/clm/test/system/input_tests_master
+M models/lnd/clm/test/system/TSMtools.sh
+M models/lnd/clm/test/system/TBLscript_tools.sh
+M models/lnd/clm/test/system/yellowstone.batch
+M models/lnd/clm/test/system/tests_posttag_nompi_regression
+M models/lnd/clm/test/system/lynx.batch
+M models/lnd/clm/test/system/TBLtools.sh
+M models/lnd/clm/test/system/shortlist.batch
+M models/lnd/clm/test/system/TSMCFGtools.sh
+
+M models/lnd/clm/bld/configure
+M models/lnd/clm/bld/queryDefaultNamelist.pl
+M models/lnd/clm/bld/config_files/config_definition.xml
+M models/lnd/clm/bld/listDefaultNamelist.pl
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+M models/lnd/clm/bld/build-namelist
+M models/lnd/clm/bld/clm.buildnml.csh
+M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl
+
+MM models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90
+MM models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90
+
+Difference in expected Fails:
+
+
+
+-
+-
+-
+-
+-
+- missing datasets for us20
+- ne16 missing finidat file for 1850
+- ne60 missing finidat file for 1850
+- 1x1_tropicAtl missing finidat file for 1850
+-
+-
+-
+
+
+
+@@ -25,10 +13,17 @@
+
+
+
++
+
++ missing datasets for us20
++ ne16 missing finidat file for 1850
++ ne16 missing finidat file for 1850
++ ne60 missing finidat file for 1850
++ ne60 missing finidat file for 1850
++ 1x1_tropicAtl missing finidat file for 1850
++ 1x1_tropicAtl missing finidat file for 1850
+
+
+- missing datasets for wus12
+ missing datasets for us20
+ ne16 missing finidat file for 1850
+ ne60 missing finidat file for 1850
+@@ -38,17 +33,6 @@
+
+
+
+-
+-
+-
+- cprnc showing diffs are not b4b
+- problem configuring
+- problem configuring
+- clm stand-alone can no longer work
+- clm stand-alone can no longer work
+-
+-
+-
+
+
+
+@@ -62,24 +46,6 @@
+
+
+
+-
+-
+-
+- Ignore. Will be moved to CESM tests.
+- Ignore. Will be moved to CESM tests.
+- Ignore. Will be moved to CESM tests.
+- Ignore. Will be moved to CESM tests.
+- Failing for long time. endrun initiated from CNBalanceCheckMod.F90.
+- Failing for long time. __cnbalancecheckmod_NMOD_cbalancecheck.
+- Failing for long time. Fail because erU61 fails.
+- Failing for long time. Fail because erU61 fails.
+- Have been failing for a long time .
+- Have been failing for a long time.
+- Have been failing for a long time.
+- Have been failing for a long time.
+-
+-
+-
+
+
+
+@@ -93,91 +59,108 @@
+
+
+
+-
+-
+- Initial simulation fails
+- Initial simulation fails
+- History files are different
+- Initial simulation fails
+- Initial simulation fails
+- History files are different on restart (known problem
+ restarting mid-day with _GLC: bug 1557)
+- Initial simulation fails
+- build error?
+-
+-
+
+
+- ????
+
+
+
+
+
+
+- ????
+
+
+
+
+- Initial simulation fails
+- Initial simulation fails case name too
+ long
+- History files are different on restart (known
+ problem restarting mid-day with _GLC: bug 1557)
++
++
++
++
++
++
++
++
++
+
+
+
+
+
+-
+-
+- T62 not working
+- Dies early with a floating point trap
+-
+- Baseline comp. test will always fail
+- build error?
+-
+-
+
+
+ History files are different on restart (known problem
+restarting mid-day with _GLC: bug 1557)
+
+
++
++
++
++
+
+
+ History files are different on restart (known problem restarting
+mid-day with _GLC: bug 1557)
++
++
++
++
++
++
++
++
++
++
++
+
+
+-
++
+
+
+-
+-
++
++
++
+ missing datasets
+ missing datasets
+- missing datasets
+-
+-
++ missing datasets
++ missing 0.1 mapping dataset (for RTM at R01)
++
++
++
++
++
++
+
++
++ scripts issue with ocean not threaded
++ Restart difference
++ Soil balance error on restart
++ scripts issue with ocean not threaded
++
++ scripts issue with ocean not threaded
++ bad compset name
++ surfdata and pftdyn file mismatched
++ Soil balance error on restart
++ scripts issue component not threaded
++ missing finidat file
++ missing finidat file
++
+
+- ????
+- ????
++ problem building with mpi-serial with pgi compiler
++ missing LAPACK symbol dgbsv
++ scripts issue with ocean not threaded
++ Need LAPACK for PGI (dgbsv)
++ Need LAPACK for PGI (dgbsv)
++ Bad compset name: ICNCROP
++ scripts issue with ocean not threaded
++
++ problem building with mpi-serial with pgi compiler
++ missing LAPACK symbol dgbsv
++ missing LAPACK symbol dgbsv
++ scripts issue with ocean not threaded
++ Need LAPACK for PGI (dgbsv)
++ Need LAPACK for PGI (dgbsv)
++ Bad compset name: ICNCROP
++ scripts issue with ocean not threaded
+
+
+
+Machines testing ran on: (Tests in priority order)
+
+ build-namelist unit tester: yes
+
+ CESM test lists:
+
+ yellowstone/CESM: yes
+ yellowstone/CESM/allIcompsets: yes
+
+ test_system testing:
+
+ yellowstone batch: yes
+ frankfurt interactive: yes
+ yellowstone interactive: yes
+ frankfurt batch: yes
+
+ test_driver.sh tools testing:
+
+ yellowstone interactive: yes
+ frankfurt interactive: yes
+
+ yellowstone/PTCLM: no (PTCLM still doesn't quite work)
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0 to clm4_0_59 clm4_5 to clm45sci15_clm4_0_58
+
+Changes answers relative to baseline: Yes, for some resolutions for clm4_0 -- because of new default initial condition files in compsets
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ For clm4_0...
+ New initial conditions for: f09/f19-I_2000, f09/f19-I_1850, f09/f19-I_1850-2000
+ f09/f19-I_1948-2004, f09/f19-I_1850_SPINUP_3HrWx
+ f09/f19-I_RCP
+
+===============================================================
+===============================================================
+Tag name: clm4_0_59
+Originator(s): mvertens (Mariana Vertenstein) / erik
+Date: Thu Dec 20 09:24:16 MST 2012
+One-line Summary: restructure clmtype and all pointer references, new directory structure
+
+Purpose of changes:
+ Reststucture trunk directory tree to prepare for incorporation of clm4_5
+ Move all cpp-ifdefs to clm_varctl and introduce new logical variables in their place
+ Restructure clmtype to remove nesting - and also redo all the pointer references
+ All together the code can then move to having no cpp-ifdefs
+
+Test level of tag:
+ std-test
+
+Bugs fixed (include bugzilla ID):
+ None
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 147 (mkgriddata can't straddle over Greenwich)
+ 025 (SCM mode can NOT use a global finidat file)
+ 017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+ 1598 (non-repeatable results when running with CNDV and/or CROP)
+
+ Threading seems to work for all cases where CROP and/or CNDV
+ is not on
+
+Describe any changes made to build system:
+ New directory structure
+
+Describe any changes made to the namelist:
+ variables use_c13 and use_c14 added to namelist_definition.xml file
+
+List any changes to the defaults for the boundary datasets:
+ No
+
+Describe any substantial timing or memory changes:
+ Currently more memory for compsets without CN, etc - less memory
+ when CN, CNDV, etc are activated. This will be fixed in clm4_0_59.
+
+Code reviewed by:
+ self (proposed changes reviewed by Erik, Bill and Stefan)
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated:
+ src/biogeochem -> src/clm4_0/biogeochem
+ src/biogeophys -> src/clm4_0/biogeophys
+ src/main -> src/clm4_0/main
+
+
+List all files added and what they do:
+ None
+
+List all existing files that have been modified, and describe the changes:
+
+ All files in src/clm4_0 have been modified relative to their
+ original versions to remove the cpp-ifdefs and to adjust pointer
+ references to new names
+
+ M src/cpl_share/clm_cpl_indices.F90
+ M src/cpl_mct/lnd_comp_mct.F90
+ M src/cpl_esmf/lnd_comp_esmf.F90
+
+ M bld/configure
+ M bld/namelist_files/namelist_definition.xml
+ M test/system/yellowstone.interactive
+ M test/system/yellowstone.batch
+
+ Add in 360x720_cruncep datasets (from Erik).
+
+ M models/lnd/clm/bld/unit_testers/build-namelist_test.pl --- correct number of tests
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml
+
+Machines testing ran on:
+
+ build-namelist unit tester: yes
+
+ CESM test lists:
+
+ yellowstone/CESM: yes
+ yellowstone/CESM/allIcompsets: no
+
+ PTCLM
+ yellowstone: no
+
+ test_system testing:
+
+ yellowstone batch: yes
+ yellowstone interactive: no
+ frankfurt batch: no
+ frankfurt interactive: yes
+
+ test_driver.sh tools testing:
+
+ yellowstone interactive: no
+ frankfurt interactive: no
+
+Difference in expected fails from testing:
+
++ Numbers change for build-namelist unit tests
+
+
+- ????
+
+
+
+
+- ????
+- ????
+- ????
+
+-
+- ????
+-
+
+
+CLM tag used for the baseline comparison tests if applicable:
+ clm4_0_58
+
+Changes answers relative to baseline:
+ no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_58
+Originator(s): erik (Erik Kluzek)
+Date: Fri Dec 14 05:13:33 MST 2012
+One-line Summary: Uncomment us20 and wus12 datasets, more testing to: bluefire, yellowstone, frankfurt
+
+Purpose of changes:
+
+Uncomment WRF grids in namelist xml files. Fix mkprocdata bug on lynx.
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID):
+ 1601 (mkprocdata seg faults on lynx)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: No
+
+Describe any changes made to the namelist: No
+
+List any changes to the defaults for the boundary datasets: Yes
+ uncomment out wus12 and us20 WRF datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, Machines, csm_share
+
+ scripts to: scripts4_121207b
+ Machines to: Machines_121207
+ csm_share to: share3_121204a
+
+List all files eliminated: None
+
+List all files added and what they do: Add frankfurt test lists
+
+>>>>>>>>>>>>>>>> Tests for frankfurt
+ A models/lnd/clm/test/system/frankfurt.interactive
+ A models/lnd/clm/test/system/frankfurt.batch
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/test_system
+ M models/lnd/clm/test/system/config_files/config_CLMtestCompsets.xml
+
+ M models/lnd/clm/test/system/bluefire.batch
+ M models/lnd/clm/test/system/yellowstone.batch
+ M models/lnd/clm/test/system/nl_files/mkprocdata_ne30_to_f19_I2000
+
+>>>>>>>>>>>>>>>> Fix mkprocdata and allow it to run from a different exe directory for testing
+ M models/lnd/clm/tools/mkprocdata_map/src/mkprocdata_map.F90
+ M models/lnd/clm/tools/mkprocdata_map/src/gridmapMod.F90
+ M models/lnd/clm/tools/mkprocdata_map/src/fmain.F90
+ M models/lnd/clm/tools/mkprocdata_map/src/shr_file_mod.F90
+ M models/lnd/clm/tools/mkprocdata_map/src/fileutils.F90
+ M models/lnd/clm/tools/mkprocdata_map/mkprocdata_map_all
+ M models/lnd/clm/tools/mkprocdata_map/mkprocdata_map_wrap
+
+>>>>>>>>>>>>>>>> Fix bug in unit-tester
+ M models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+
+>>>>>>>>>>>>>>>> Uncomment WRF files
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+
+Machines testing ran on:
+
+ build-namelist unit tester: yes
+
+ CESM test lists:
+
+ bluefire/CESM: yes
+ bluefire/CESM/allIcompsets: yes
+ lynx/CESM: no
+
+ bluefire/PTCLM: no
+
+ test_system testing:
+
+ bluefire batch: yes
+ bluefire interactive: yes
+ yellowstone batch: yes
+ yellowstone interactive: yes
+ frankfurt batch: yes
+ frankfurt interactive: yes
+
+ test_driver.sh tools testing:
+
+ bluefire interactive: yes
+ lynx interactive: yes
+ yellowstone interactive: yes
+ frankfurt interactive: yes
+
+Difference in expected fails from testing:
+
+Index: expectedClmTestFails.xml
+===================================================================
+--- expectedClmTestFails.xml (revision 42691)
++++ expectedClmTestFails.xml (working copy)
+@@ -5,12 +5,12 @@
+
+
+
+-
+
+
+- ne16 missing finidat file for 1850
+- ne60 missing finidat file for 1850
+- 1x1_tropicAtl missing finidat file for 1850
++ missing datasets for us20
++ ne16 missing finidat file for 1850
++ ne60 missing finidat file for 1850
++ 1x1_tropicAtl missing finidat file for 1850
+
+
+
+@@ -24,6 +24,17 @@
+
+
+
++
++
++
++
++ missing datasets for wus12
++ missing datasets for us20
++ ne16 missing finidat file for 1850
++ ne60 missing finidat file for 1850
++ 1x1_tropicAtl missing finidat file for 1850
++
++
+
+
+
+@@ -101,6 +112,19 @@
+
+
+
++
++
++ ????
++ ????
++
++
++
++
++ Initial simulation fails
++ Initial simulation fails case name too
+long
++ History files are different on restart (known
+problem restarting mid-day with _GLC: bug 1557)
++
++
+
+
+
+@@ -118,6 +142,11 @@
+ History files are different on restart (known problem
+restarting mid-day with _GLC: bug 1557)
+
+
++
++
++ History files are different on restart (known problem restarting
+mid-day with _GLC: bug 1557)
++
++
+
+
+
+@@ -128,6 +157,18 @@
+ missing datasets
+
+
++
++
++ ????
++ ????
++ ????
++ ????
++ ????
++
++
++ ????
++
++
+
+
+
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_58
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+===============================================================
+Tag name: clm4_0_57
+Originator(s): muszala (Muszala Stefan 303-497-1320 CGD)
+Date: Fri Nov 30 14:20:13 MST 2012
+One-line Summary: update trunk with release mods, some rtm fixes
+
+Purpose of changes:
+
+CLM: Merge the changes Erik made in the release branch tags to trunk.
+RTM: Add effective velocity as a namelist variable.
+ Change rdirc file.
+ Add RTM tests to test_system batch CLM tests.
+ Clean up logic in RtmFloodInit so R01 works without SLOPE and MAX_VOLR.
+ Change rdirc file to rdirc_0.5x0.5_simyr2000_slpmxvl_c120717.nc which is
+ correct and contains FLOOD and MAX_VOLR. This fixes an error in choice
+ of rdirc file from clm4_0_55 / rtm1_0_10
+
+Requirements for tag:
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: Changed RTM rdirc file.
+
+Describe any substantial timing or memory changes: None Known
+
+Code reviewed by: Erik, Tony, Mariana (in progress)
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ - rtm1_0_13
+ - scripts4_121127
+ - Machines_121126
+ - drvseq4_2_13
+ - datm8_121123
+ - cism1_121114
+
+List all files eliminated:
+
+ - Deleted during release tag cleanup
+D models/lnd/clm/test/system/TCB.sh
+D models/lnd/clm/test/system/tests_pretag_bluefire
+D models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dh
+D models/lnd/clm/test/system/config_files/17p_cndvsc_m
+D models/lnd/clm/test/system/config_files/21p_cndvcrpsc_m
+D models/lnd/clm/test/system/config_files/17p_cndvsc_o
+D models/lnd/clm/test/system/config_files/21p_cndvcrpsc_o
+D models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dm
+D models/lnd/clm/test/system/config_files/_persc_dh
+D models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_do
+D models/lnd/clm/test/system/config_files/17p_cndvsc_s
+D models/lnd/clm/test/system/config_files/21p_cndvcrpsc_s
+D models/lnd/clm/test/system/config_files/_persc_dm
+D models/lnd/clm/test/system/config_files/_persc_do
+D models/lnd/clm/test/system/config_files/_scnv_ds
+D models/lnd/clm/test/system/config_files/_persc_ds
+D models/lnd/clm/test/system/config_files/17p_sc_dh
+D models/lnd/clm/test/system/config_files/17p_sc_dm
+D models/lnd/clm/test/system/config_files/17p_sc_do
+D models/lnd/clm/test/system/config_files/_sc_dh
+D models/lnd/clm/test/system/config_files/17p_sc_ds
+D models/lnd/clm/test/system/config_files/_sc_dm
+D models/lnd/clm/test/system/config_files/21p_cncrpsc_h
+D models/lnd/clm/test/system/config_files/17p_cnsc_h
+D models/lnd/clm/test/system/config_files/_sc_do
+D models/lnd/clm/test/system/config_files/21p_cncrpsc_dh
+D models/lnd/clm/test/system/config_files/17p_cnsc_dh
+D models/lnd/clm/test/system/config_files/21p_cncrpsc_m
+D models/lnd/clm/test/system/config_files/17p_cnsc_m
+D models/lnd/clm/test/system/config_files/_sc_ds
+D models/lnd/clm/test/system/config_files/21p_cncrpsc_o
+D models/lnd/clm/test/system/config_files/17p_cnsc_o
+D models/lnd/clm/test/system/config_files/17p_cnsc_dm
+D models/lnd/clm/test/system/config_files/21p_cncrpsc_dm
+D models/lnd/clm/test/system/config_files/17p_cnsc_do
+D models/lnd/clm/test/system/config_files/17p_cnc13sc_dh
+D models/lnd/clm/test/system/config_files/21p_cncrpsc_do
+D models/lnd/clm/test/system/config_files/21p_cncrpsc_s
+D models/lnd/clm/test/system/config_files/17p_sc_h
+D models/lnd/clm/test/system/config_files/17p_cnsc_ds
+D models/lnd/clm/test/system/config_files/21p_cncrpsc_ds
+D models/lnd/clm/test/system/config_files/17p_cnc13sc_dm
+D models/lnd/clm/test/system/config_files/_mexsc_ds
+D models/lnd/clm/test/system/config_files/17p_cnc13sc_do
+D models/lnd/clm/test/system/config_files/17p_sc_m
+D models/lnd/clm/test/system/config_files/17p_sc_o
+D models/lnd/clm/test/system/config_files/_sc_h
+D models/lnd/clm/test/system/config_files/17p_cnnfsc_dh
+D models/lnd/clm/test/system/config_files/_sc_m
+D models/lnd/clm/test/system/config_files/17p_cnnfsc_dm
+D models/lnd/clm/test/system/config_files/_sc_o
+D models/lnd/clm/test/system/config_files/17p_cndvsc_dh
+D models/lnd/clm/test/system/config_files/21p_cndvcrpsc_dh
+D models/lnd/clm/test/system/config_files/17p_cnnfsc_do
+D models/lnd/clm/test/system/config_files/_sc_s
+D models/lnd/clm/test/system/config_files/17p_cndvsc_dm
+D models/lnd/clm/test/system/config_files/21p_cndvcrpsc_dm
+D models/lnd/clm/test/system/config_files/17p_cndvsc_do
+D models/lnd/clm/test/system/config_files/21p_cndvcrpsc_do
+D models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dh
+D models/lnd/clm/test/system/config_files/21p_cndvcrpsc_ds
+D models/lnd/clm/test/system/config_files/_vansc_ds
+D models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dm
+D models/lnd/clm/test/system/config_files/17p_cnadspinupsc_do
+D models/lnd/clm/test/system/config_files/_nil3sc_dh
+D models/lnd/clm/test/system/config_files/_nil3sc_dm
+D models/lnd/clm/test/system/config_files/_scsnf_dh
+D models/lnd/clm/test/system/config_files/_scsnf_dm
+D models/lnd/clm/test/system/config_files/_scsnf_do
+D models/lnd/clm/test/system/config_files/21p_cndvcrpsc_h
+D models/lnd/clm/test/system/config_files/17p_cndvsc_h
+D models/lnd/clm/test/system/TBL.sh
+D models/lnd/clm/test/system/tests_pretag_edinburgh
+D models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+D models/lnd/clm/test/system/TBR.sh
+D models/lnd/clm/test/system/TER.sh
+D models/lnd/clm/test/system/mknamelist
+D models/lnd/clm/test/system/tests_posttag_hybrid_regression
+D models/lnd/clm/test/system/tests_posttag_purempi_regression
+D models/lnd/clm/test/system/TRP.sh
+D models/lnd/clm/test/system/tests_pretag_jaguarpf
+D models/lnd/clm/test/system/TSMrst_tools.sh
+D models/lnd/clm/test/system/tests_pretag_jaguarpf_nompi
+D models/lnd/clm/test/system/nl_files/nl_per
+D models/lnd/clm/test/system/nl_files/nl_voc
+D models/lnd/clm/test/system/nl_files/clm_std
+D models/lnd/clm/test/system/nl_files/multi_inst
+D models/lnd/clm/test/system/nl_files/multi_inst/multi_inst
+D models/lnd/clm/test/system/nl_files/multi_inst/multi_inst_1
+D models/lnd/clm/test/system/nl_files/multi_inst/multi_inst_2
+D models/lnd/clm/test/system/nl_files/multi_inst/multi_inst_3
+D models/lnd/clm/test/system/nl_files/clm_nortm
+D models/lnd/clm/test/system/nl_files/clm_transient_rcp2.6
+D models/lnd/clm/test/system/nl_files/clm_ndepdyn
+D models/lnd/clm/test/system/nl_files/clm_transient_rcp4.5
+D models/lnd/clm/test/system/nl_files/clm_pftdyn
+D models/lnd/clm/test/system/nl_files/clm_transient_rcp8.5
+D models/lnd/clm/test/system/nl_files/clm_per0
+D models/lnd/clm/test/system/nl_files/nl_ptsmode_ocn
+D models/lnd/clm/test/system/nl_files/nl_urb_br
+D models/lnd/clm/test/system/nl_files/clm_spin
+D models/lnd/clm/test/system/nl_files/clm_transient_glcMEC_rcp6
+D models/lnd/clm/test/system/nl_files/clm_urb1pt
+D models/lnd/clm/test/system/nl_files/nl_urb
+D models/lnd/clm/test/system/nl_files/nl_crcrop
+D models/lnd/clm/test/system/nl_files/clm_per
+D models/lnd/clm/test/system/nl_files/clm_drydep
+D models/lnd/clm/test/system/nl_files/nl_std
+D models/lnd/clm/test/system/nl_files/clm_glcmec
+D models/lnd/clm/test/system/nl_files/clm_transient_rcp6
+D models/lnd/clm/test/system/nl_files/nl_crop
+D models/lnd/clm/test/system/nl_files/clm_usrdat
+D models/lnd/clm/test/system/nl_files/nl_cn_conly
+D models/lnd/clm/test/system/nl_files/clm_stdIgnYr
+D models/lnd/clm/test/system/nl_files/clm_transient_20thC
+D models/lnd/clm/test/system/nl_files/nl_ptsmode
+D models/lnd/clm/test/system/nl_files/clm_transient_glcMEC_rcp2.6
+D models/lnd/clm/test/system/nl_files/clm_irrig
+D models/lnd/clm/test/system/nl_files/clm_transient_glcMEC_rcp4.5
+D models/lnd/clm/test/system/nl_files/nl_lfiles
+D models/lnd/clm/test/system/nl_files/clm_transient_glcMEC_rcp8.5
+D models/lnd/clm/test/system/TSMpergro.sh
+D models/lnd/clm/test/system/TSMcnspinup.sh
+D models/lnd/clm/test/system/TBLrst_tools.sh
+D models/lnd/clm/test/system/CLM_runcmnd.sh
+D models/lnd/clm/test/system/TSM.sh
+D models/lnd/clm/test/system/tests_posttag_lynx
+D models/lnd/clm/tools/mkprocdata_map/camhomme
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/mkprocdata_map.F90
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/gridmapMod.F90
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/Depends
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/domainMod.F90
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/shr_file_mod.F90
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/nanMod.F90
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/Srcfiles
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/Filepath
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/Makefile
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/fileutils.F90
+D models/lnd/clm/tools/mkprocdata_map/camhomme/src/shr_kind_mod.F90
+D models/lnd/clm/tools/mkprocdata_map/camhomme/mkprocdata_map_in
+D models/lnd/clm/tools/mkprocdata_map/clm/src
+D models/lnd/clm/tools/mkprocdata_map/clm/src/mkprocdata_map.F90
+D models/lnd/clm/tools/mkprocdata_map/clm/src/gridmapMod.F90
+D models/lnd/clm/tools/mkprocdata_map/clm/src/constMod.F90
+D models/lnd/clm/tools/mkprocdata_map/clm/src/fmain.F90
+D models/lnd/clm/tools/mkprocdata_map/clm/src/shr_file_mod.F90
+D models/lnd/clm/tools/mkprocdata_map/clm/src/nanMod.F90
+D models/lnd/clm/tools/mkprocdata_map/clm/src/Mkdepends
+D models/lnd/clm/tools/mkprocdata_map/clm/src/Srcfiles
+D models/lnd/clm/tools/mkprocdata_map/clm/src/Filepath
+D models/lnd/clm/tools/mkprocdata_map/clm/src/Makefile
+D models/lnd/clm/tools/mkprocdata_map/clm/src/fileutils.F90
+D models/lnd/clm/tools/mkprocdata_map/clm/src/shr_kind_mod.F90
+D models/lnd/clm/tools/mkprocdata_map/clm/mkprocdata_map_in
+D models/lnd/clm/tools/mkprocdata_map/clm/mkprocdata_map_all
+D models/lnd/clm/tools/mkprocdata_map/clm/mkprocdata_map_wrap
+D models/lnd/clm/tools/mkprocdata_map/clm/README
+D models/lnd/clm/tools/ncl_scripts/RMSbluefire.dat
+D models/lnd/clm/tools/ncl_scripts/RMSlahey.dat
+D models/lnd/clm/tools/ncl_scripts/ndepregrid.ncl
+D models/lnd/clm/tools/ncl_scripts/RMSjaguar.dat
+D models/lnd/clm/tools/ncl_scripts/RMSintel.dat
+D models/lnd/clm/tools/ncl_scripts/RMSintrepid.dat
+D models/lnd/clm/tools/ncl_scripts/pergroPlot.ncl
+D models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl
+D models/lnd/clm/tools/ncl_scripts/runDepositionRegrid.pl
+D models/lnd/clm/tools/ncl_scripts/ndeplintInterp.ncl
+D models/lnd/clm/tools/ncl_scripts/pftdyntest2raw.ncl
+D models/lnd/clm/bld/config_files/config_sys_defaults.xml
+D models/lnd/clm/bld/namelist_files/checkdatmfiles.ncl
+D models/lnd/clm/bld/namelist_files/datm-build-namelist
+D models/lnd/clm/bld/namelist_files/checklatsfiles.ncl
+D models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml
+D models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml
+
+List all files added and what they do:
+
+ - Added for RTM testing
+A models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOn/user_nl_rtm
+A models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOn
+A models/lnd/clm/test/system/user_nl_dirs/rtm/rtmR01/user_nl_rtm
+A models/lnd/clm/test/system/user_nl_dirs/rtm/rtmR01
+A models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOff/user_nl_rtm
+A models/lnd/clm/test/system/user_nl_dirs/rtm/rtmOff
+A models/lnd/clm/test/system/user_nl_dirs/rtm
+ - Added from release tags
+A models/lnd/clm/test/system/yellowstone.interactive
+A models/lnd/clm/test/system/tests_pretag_yellowstone_nompi
+A models/lnd/clm/test/system/nl_files/mkprocdata_ne30_to_f19_I2000
+A models/lnd/clm/test/system/yellowstone.batch
+A models/lnd/clm/tools/mkprocdata_map/clm4054_ne30g16_I2000.clm2.h0.2000-01_c121107.nc
+A models/lnd/clm/tools/mkprocdata_map/map_ne30np4_nomask_to_fv1.9x2.5_nomask_aave_da_c121107.nc
+A models/lnd/clm/tools/mkprocdata_map/mkprocdata_map_functions.bash
+A models/lnd/clm/tools/mkprocdata_map/src/mkprocdata_map.F90
+A models/lnd/clm/tools/mkprocdata_map/src/gridmapMod.F90
+A models/lnd/clm/tools/mkprocdata_map/src/constMod.F90
+A models/lnd/clm/tools/mkprocdata_map/src/Makefile.common
+A models/lnd/clm/tools/mkprocdata_map/src/fmain.F90
+A models/lnd/clm/tools/mkprocdata_map/src/shr_file_mod.F90
+A models/lnd/clm/tools/mkprocdata_map/src/nanMod.F90
+A models/lnd/clm/tools/mkprocdata_map/src/Mkdepends
+A models/lnd/clm/tools/mkprocdata_map/src/Srcfiles
+A models/lnd/clm/tools/mkprocdata_map/src/Filepath
+A models/lnd/clm/tools/mkprocdata_map/src/Makefile
+A models/lnd/clm/tools/mkprocdata_map/src/fileutils.F90
+A models/lnd/clm/tools/mkprocdata_map/src/shr_kind_mod.F90
+A models/lnd/clm/tools/mkprocdata_map/src
+A models/lnd/clm/tools/mkprocdata_map/mkprocdata_map_in
+A models/lnd/clm/tools/mkprocdata_map/mkprocdata_map_all
+A models/lnd/clm/tools/mkprocdata_map/mkprocdata_map_wrap
+A models/lnd/clm/tools/mkprocdata_map/clm4054_f19g16_I2000.clm2.h0.2000-01_c121107.nc
+A models/lnd/clm/tools/mkprocdata_map/README
+A models/lnd/clm/bld/config_query
+A models/lnd/clm/doc/UsersGuide/modelnl/xmldef2html_compsets
+A models/lnd/clm/doc/UsersGuide/modelnl/showinfo.js
+A models/lnd/clm/doc/UsersGuide/modelnl/index.cpp
+A models/lnd/clm/doc/UsersGuide/modelnl/Makefile
+A models/lnd/clm/doc/UsersGuide/modelnl
+
+List all existing files that have been modified, and describe the changes:
+
+ - put back qflx_snomelt for consistency with older models. clm4_0_55 mods to the snow
+ balance check otherwise only effect the diagnostic fields errh2osno, snow_source and snow_sinks
+M models/lnd/clm/src/main/histFldsMod.F90
+ - modified for RTM testing
+M config_files/config_CLMtestCompsets.xml
+M bluefire.batch
+ - modified during release tag modification
+M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+M models/lnd/clm/test/system/mirage.interactive
+M models/lnd/clm/test/system/test_system
+M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+M models/lnd/clm/test/system/config_files/config_CLMtestCompsets.xml
+M models/lnd/clm/test/system/config_files/README
+M models/lnd/clm/test/system/README.testnames
+M models/lnd/clm/test/system/tests_posttag_yong
+M models/lnd/clm/test/system/TCBtools.sh
+M models/lnd/clm/test/system/test_driver.sh
+M models/lnd/clm/test/system/lynx.interactive
+M models/lnd/clm/test/system/shortlist.interactive
+M models/lnd/clm/test/system/TSMscript_tools.sh
+M models/lnd/clm/test/system/tests_posttag_mirage
+M models/lnd/clm/test/system/gen_test_table.sh
+M models/lnd/clm/test/system/input_tests_master
+M models/lnd/clm/test/system/README
+M models/lnd/clm/test/system/tests_posttag_nompi_regression
+M models/lnd/clm/test/system/bluefire.interactive
+M models/lnd/clm/tools/mkmapdata/mkmapdata.sh
+M models/lnd/clm/tools/ncl_scripts
+M models/lnd/clm/tools/ncl_scripts/README
+M models/lnd/clm/tools/ncl_scripts/getregional_datasets.pl
+M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl
+M models/lnd/clm/tools/interpinic
+M models/lnd/clm/tools/mksurfdata_map/src
+M models/lnd/clm/tools/mksurfdata_map/src/mkncdio.F90
+M models/lnd/clm/tools/mksurfdata_map/src/mkutilsMod.F90
+M models/lnd/clm/tools/mksurfdata_map/src/mkdomainMod.F90
+M models/lnd/clm/tools/mksurfdata_map/src/mksurfdat.F90
+M models/lnd/clm/tools/mksurfdata_map/src/mkgridmapMod.F90
+M models/lnd/clm/tools/mksurfdata_map/mksurfdata_map.namelist
+M models/lnd/clm/tools/mksurfdata_map/mksurfdata.pl
+M models/lnd/clm/tools/mksurfdata_map/pftdyn_hist_simyr1850-2005.txt
+M models/lnd/clm/bld
+M models/lnd/clm/bld/configure
+M models/lnd/clm/bld/queryDefaultNamelist.pl
+M models/lnd/clm/bld/user_nl_clm
+M models/lnd/clm/bld/config_files/config_definition.xsl
+M models/lnd/clm/bld/config_files/config_definition.xml
+M models/lnd/clm/bld/listDefaultNamelist.pl
+M models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+M models/lnd/clm/bld/build-namelist
+M models/lnd/clm/bld/clm.cpl7.template
+M models/lnd/clm/bld/clm.buildnml.csh
+M models/lnd/clm/bld/README
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl
+M models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/20thC_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/2000_glacierMEC_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850_glacierMEC_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/glacierMEC_pd.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml
+M models/lnd/clm/doc/UsersGuide/clm_ug.xml
+M models/lnd/clm/doc/UsersGuide/appendix.xml
+M models/lnd/clm/doc/UsersGuide/ptclm.xml
+M models/lnd/clm/doc/Quickstart.userdatasets
+M models/lnd/clm/doc/IMPORTANT_NOTES
+M models/lnd/clm/doc/Quickstart.GUIDE
+M models/lnd/clm/doc/ChangeLog
+M models/lnd/clm/doc/CodeReference/Filepath
+M models/lnd/clm/doc/KnownLimitations
+M models/lnd/clm/doc/ChangeSum
+M models/lnd/clm/doc/KnownBugs
+M models/lnd/clm/doc/README
+M models/lnd/clm/src/biogeochem/DryDepVelocity.F90
+M ChangeLog
+M ChangeSum
+M READM
+
+Machines testing ran on: (in progress)
+
+ build-namelist unit tester: yes
+
+ - OK. All FAILs (~78 of them) should pass during the next round.
+
+ CESM test lists:
+
+ bluefire/CESM: yes
+
+ -Fail due to throuput comparison problems:
+
+ FAIL ERB.ne30_g16.I_1948-2004.bluefire_ibm.GC.164220.tputcomp.clm4_0_56
+ FAIL ERS_Lm3.1x1_vancouverCAN.I1PT.bluefire_ibm.GC.164220.tputcomp.clm4_0_56
+
+ -Fail due to new and correct rdirc file. diffs in r2x_Forr_roff & r2x_Forr_ioff
+ These should pass next time around:
+
+ FAIL ERS_D.f45_g37.I.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL NCK.T31_g37.I.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL PET_PT.f45_g37.I1850.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL ERS_E.T31_g37.I1850.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL ERI.T31_g37.IG1850.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL ERB.ne30_g16.I_1948-2004.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL ERH_D.T31_g37.I1850CN.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL SMS.T31_g37.IG4804.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL SMS.1x1_mexicocityMEX.I1PT.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL ERS_Lm3.1x1_vancouverCAN.I1PT.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL ERS_Lm3.f19_g16.IGRCP60CN.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL PET_PT.f10_f10.I20TRCN.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL SMS.f10_f10.IRCP45CN.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+ FAIL ERS_D.f19_g16.IRCP85CN.bluefire_ibm.GC.164220.compare_hist.clm4_0_56
+
+ bluefire/CESM/allIcompsets: yes
+
+ Error in SBN script handling in generate of namelist files so all compare tests are BFAILs.
+
+ lynx/CESM: no
+
+ bluefire/PTCLM: no
+
+ test_system testing:
+
+ bluefire batch: yes
+
+ 4 xFAIL. The rest that fail now, but should pass the next time around.
+
+ - Fail due to new and correct rdirc file. diffs in r2x_Forr_roff & r2x_Forr_ioff
+ These should pass next time around:
+
+ FAIL ERS_Ld211.f10_f10.ICNADSPIN.bluefire_ibm.GC.164759.compare_hist.clm4_0_56
+ FAIL ERS_Ln48_D_P64x16.ne30_g16.ICN.bluefire_ibm.GC.164759.compare_hist.clm4_0_56
+ FAIL PET_D_P1x64.ne30_g16.ICN.bluefire_ibm.GC.164759.compare_hist.clm4_0_56
+ FAIL ERS_Ld211.f10_f10.I_2000_VOC_CN.bluefire_ibm.GC.164759.compare_hist.clm4_0_56
+
+ - Fail due to throughput comparison
+
+ FAIL ERS_Ld211.f10_f10.ICNADSPIN.bluefire_ibm.GC.164759.tputcomp.clm4_0_56
+ FAIL ERS_Ld211_P192x2.f19_g16.I_2000_CNDV_CROP.bluefire_ibm.GC.164759.tputcomp.clm4_0_56
+
+ - Will pass next time, these tests just introduced
+
+ BFAIL ERS.f19_g16.I_2000_CN_rtmR01.bluefire_ibm.GC.164759.compare_hist.clm4_0_56
+ BFAIL ERS.f19_g16.I_2000_CN_rtmOff.bluefire_ibm.GC.164759.compare_hist.clm4_0_56
+ BFAIL ERS.f19_g16.I_2000_CN_rtmOn.bluefire_ibm.GC.164759.compare_hist.clm4_0_56
+
+ bluefire interactive: yes
+
+ - xFAIL or new tests that will pass next time (missing baselines):
+
+ BFAIL ERS_D_Mmpi-serial.CLM_USRDAT.IalaskaCN.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+ BFAIL ERS_D_Mmpi-serial.CLM_USRDAT.I_2000_1PTFRC_US-UMB.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+ BFAIL ERS_P1x64_Mmpi-serial.f45_g37.I_1850_SPINUP_3HrWx_CN.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+ BFAIL ERS_Ld211_Mmpi-serial.1x1_camdenNJ.I_2000_VOC.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+ BFAIL ERS_Ly3_Mmpi-serial.1x1_brazil.I_2000.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+ BFAIL ERS_D_P1x25_Mmpi-serial.5x5_amazon.I_2000.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+ BFAIL ERS_D_Mmpi-serial.1x1_asphaltjungleNJ.I_2000_VOC.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+ BFAIL ERS_Ld211_Mmpi-serial.1x1_brazil.I_2000_CN.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+ BFAIL ERS_Ln48_D_P1x64_Mmpi-serial.f19_g16.I_2000_GLCMECPD.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+ BFAIL ERS_Ly3_Mmpi-serial.1x1_brazil.I_2000_CNDV.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+ BFAIL ERS_Ld211_Mmpi-serial.1x1_brazil.I_2000_VOC_SNCRFRC_CN.bluefire_ibm.GC.164744.compare_hist.clm4_0_56
+
+ lynx/pgi batch: yes
+
+ - xFAIL or will pass next time (new rdirc file effecting r2x_Forr_roff & r2x_Forr_ioff)
+
+ FAIL ERS_Ln48_D.f45_g37.I_2000_VOC.lynx_pgi.GC.170117.compare_hist.clm4_0_56
+ FAIL ERS_Ln48_D.f10_f10.I_2000_CN.lynx_pgi.GC.170117.compare_hist.clm4_0_56
+
+ lynx/pgi interactive: yes
+
+ - OK except for new test that will pass next time (missing baselines):
+
+ BFAIL SMS_RLA_Mmpi-serial.f45_f45.I.lynx_pgi.GC.170039.compare_hist.clm4_0_56
+ BFAIL SMS_Mmpi-serial.CLM_USRDAT.I_2000_1PTFRC_US-UMB.lynx_pgi.GC.170039.compare_hist.clm4_0_56
+ BFAIL ERS_Ld211_Mmpi-serial.1x1_brazil.I_2000.lynx_pgi.GC.170039.compare_hist.clm4_0_56
+ BFAIL ERS_Ln48_D_P1x12_Mmpi-serial.f10_f10.ICNCROP.lynx_pgi.GC.170039.compare_hist.clm4_0_56
+ BFAIL ERS_Ld211_Mmpi-serial.1x1_camdenNJ.I_2000_VOC.lynx_pgi.GC.170039.compare_hist.clm4_0_56
+ BFAIL SMS_D_Mmpi-serial.1x1_vancouverCAN.I1PT.lynx_pgi.GC.170039.compare_hist.clm4_0_56
+ BFAIL ERS_Mmpi-serial.1x1_mexicocityMEX.I1PT.lynx_pgi.GC.170039.compare_hist.clm4_0_56
+
+ lyn/intel mirage testlist: yes
+
+ - OK except for new tests will pass next time (missing baselines):
+
+ BFAIL ERS_Mmpi-serial.1x1_brazil.I_2000.lynx_intel.GC.095009.compare_hist.clm4_0_56
+ BFAIL ERI_D_Mmpi-serial.1x1_camdenNJ.I_2000_VOC.lynx_intel.GC.095009.compare_hist.clm4_0_56
+ BFAIL ERS_D_Mmpi-serial.1x1_asphaltjungleNJ.I_2000_VOC.lynx_intel.GC.095009.compare_hist.clm4_0_56
+ BFAIL ERS_Ln48_D_P1x12_Mmpi-serial.f10_f10.I_2000_CN.lynx_intel.GC.095009.compare_hist.clm4_0_56
+
+ test_driver.sh tools testing:
+
+ bluefire interactive: yes
+
+ test 001 fails due to a bug in mkprocdata_map_wrap
+ test 002 fails due to 001
+ test 008 will pass next time
+
+ lynx interactive: no
+
+CLM tag used for the baseline comparison tests if applicable:
+
+Changes answers relative to baseline:
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: modification to default value for effvel in RtmMod.F90
+ changes the values of r2x_Forr_roff & r2x_Forr_ioff. This causes cprnc to fail.
+ This should pass in the next round and matches the value found in CLM4.5.
+
+ - real(r8),parameter :: effvel(nt_rtm) = 0.7_r8 ! downstream velocity (m/s)
+ + real(r8),parameter :: effvel(nt_rtm) = 1.0_r8 ! downstream velocity (m/s)
+
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ with flooding on and new rdirc file, climate may be different.
+ with flooding off we have b4b
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+===============================================================
+Tag name: clm4_0_56
+Originator(s): sacks (Sacks Bill 303-497-1762 CGD)
+Date: Tue Nov 27 14:12:42 MST 2012
+One-line Summary: fix s2x tsrf, add s2x diagnostics
+
+Purpose of changes:
+
+The s2x tsrf field was not being time-averaged; this is fixed now.
+
+Also, add history fields giving per-column diagnostics of the fields sent
+from CLM to GLC.
+
+Requirements for tag:
+ fix bug 1590
+ test on bluefire (CESM, int, bat), lynx/pgi (int,bat), mirage-test for lynx_intel
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID): 1590 (surface temperature sent from CLM to GLC not averaged properly)
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: erik
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>> Do time-averaging of tsrf field; remove calls to create_clm_s2x
+M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90
+M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90
+
+>>>>>>> Call to create_clm_s2x now done here instead of lnd_comp_mct /
+>>>>>>> lnd_comp_esmf, so that clm_s2x can be used for hist file writes
+>>>>>>> (this is needed so that the fields are updated before the history
+>>>>>>> updates happen in the driver)
+M models/lnd/clm/src/main/clm_initializeMod.F90
+M models/lnd/clm/src/main/clm_driver.F90
+
+>>>>>>> Clean up interface to create_clm_s2x
+M models/lnd/clm/src/main/clm_glclnd.F90
+
+>>>>>>> Add capability to output fields sent from CLM to GLC
+M models/lnd/clm/src/main/histFileMod.F90
+M models/lnd/clm/src/main/histFldsMod.F90
+
+>>>>>>> Remove non-existent PMT test
+M models/lnd/clm/test/system/lynx.batch
+
+>>>>>>> Add ERS_Ln48_P96x2.f19_g16.I_2000_VOC_SNCRFRC_CN_GLCMECPD to xFail
+>>>>>>> list; add comment
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+
+Machines testing ran on:
+
+ build-namelist unit tester: no
+
+ CESM test lists:
+
+ bluefire/CESM: yes
+ All PASS except:
+ FAIL ERI.T31_g37.IG1850.bluefire_ibm.GC.101712.compare_hist.clm4_0_55
+ FAIL SMS.T31_g37.IG4804.bluefire_ibm.GC.101712.compare_hist.clm4_0_55
+ FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.GC.101712.compare_hist.clm4_0_55
+ FAIL ERS_Lm3.f19_g16.IGRCP60CN.bluefire_ibm.GC.101712.compare_hist.clm4_0_55
+
+ These are expected failures: diffs in topo and tsrf fields
+ sent to coupler, and topo diffs are small (RMS ~ 1e-13)
+
+ bluefire/CESM/allIcompsets: no
+ lynx/CESM: no
+
+ bluefire/PTCLM: no
+
+ test_system testing:
+
+ bluefire batch: yes
+ All PASS or xFAIL except:
+ FAIL ERS_Ld211_P192x2.f19_g16.I_2000_CNDV_CROP.bluefire_ibm.GC.101753.compare_hist.clm4_0_55
+
+ I believe this is an old problem, not due to the changes here: see bug 1598
+
+ bluefire interactive: yes
+ All PASS or xFAIL
+
+ lynx/pgi batch: yes
+ All PASS or xFAIL
+
+ lynx/pgi interactive: yes
+ All PASS
+
+ lyn/intel mirage testlist: yes
+ All PASS
+
+ test_driver.sh tools testing:
+
+ bluefire interactive: no
+ lynx interactive: no
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_55
+
+Difference in expected fails from testing:
+
+ Note: the additional expected fail is NOT a new failure, it is just newly
+ documented
+
+ --- models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml (.../trunk_tags/clm4_0_55) (revision 42229)
+ +++ models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml (.../branches/fix_glc_tsrf) (revision 42229)
+ @@ -90,7 +90,7 @@
+
+
+ Initial simulation fails
+ - History files are different on restart
+ + History files are different on restart (known problem restarting mid-day with _GLC: bug 1557)
+ Initial simulation fails
+ build error?
+
+ @@ -113,6 +113,9 @@
+
+
+
+ +
+ + History files are different on restart (known problem restarting mid-day with _GLC: bug 1557)
+ +
+
+
+
+
+Changes answers relative to baseline: YES: changes tsrf and topo fields
+sent to GLC (everything else bfb)
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: glc_mec
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ new climate for tsrf; roundoff-level for topo field sent to GLC. Note that these
+ fields are limited to GLC, and don't feed back to the atmosphere at all.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? For topo: examined differences in cprnc output
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate (put details of the simulations in the experiment database)
+ - casename: None done
+
+ URL for LMWG diagnostics output used to validate new climate: N/A
+
+===============================================================
+===============================================================
+Tag name: clm4_0_55
+Originator(s): muszala (Muszala Stefan 303-497-1320 CGD)
+Date: Thu Nov 15 10:17:23 MST 2012
+One-line Summary: bring in flooding capability
+
+Purpose of changes:
+
+ Test driver mods from Tony that allows flooding from rof to lnd. Also
+ brought in code from the rtmflood branch to handle the new flooding values.
+ Fthresh calculed by reading SLOPE and MAX_VOLR from the rdirc file. Merged
+ in qflx_snow_melt from Swensons perfmafrost sims branch to fix snow
+ balance problems in BalanceCheckMod.F90.
+
+Requirements for tag:
+
+ Test flooding code in CLM by varying fthresh. Test coupler mods by
+ plotting coupler fields. Look at differences in overall energy balance
+ with and without flooding.
+
+Test level of tag: doc, critical, standard, std-test, reg-test
+
+ Critical. Bluefire CESM/CLM tests and namelist tests only
+
+Bugs fixed (include bugzilla ID):
+
+ N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+
+ N/A
+
+Describe any changes made to the namelist:
+
+ N/A
+
+List any changes to the defaults for the boundary datasets:
+
+ N/A
+
+Describe any substantial timing or memory changes:
+
+ N/A
+
+Code reviewed by:
+
+ Tony Craig, Sean Swenson
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ Updated all of the following (in relation to clm4_0_54)
+ scripts4_121105
+ Machines_121106
+ drvseq4_2_11
+ rtm1_0_10
+ cism1_121012
+ share3_121025
+ pio1_5_7
+ mapping_121106
+
+List all files eliminated:
+
+ N/A
+
+List all files added and what they do:
+
+ N/A
+
+List all existing files that have been modified, and describe the changes:
+
+ -the following all for bringing in qflx_snow_melt for new
+ -balance check calculation with flooding
+ M models/lnd/clm/src/main/clmtypeInitMod.F90
+ M models/lnd/clm/src/main/histFldsMod.F90
+ M models/lnd/clm/src/main/mkarbinitMod.F90
+ M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90
+ M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90
+ M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90
+ M models/lnd/clm/src/biogeophys/Hydrology1Mod.F90
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90
+ M models/lnd/clm/src/biogeophys/HydrologyLakeMod.F90
+
+ - mods to bring in flooding from rtmflood
+ M main/clm_driver.F90
+ M main/cmlmtype.F90
+ M main/clmtypeInitMod.F90
+ M main/histFldsMod.F90
+ M main/mkarbinitMod.F90
+ M main/clm_varcon.F90
+
+ - fixes for some test problems
+ M build-namelist_test.pl
+ M clm/bld/configure
+
+Machines testing ran on:
+
+ build-namelist unit tester: yes
+
+ 6 xFails - all OK.
+
+ CESM test lists:
+
+ bluefire/CESM: yes
+
+ Fail due to new coupler fields:
+
+ new field r2x_Forr_roff
+
+ NCK.T31_g37.I.bluefire_ibm.C.092829.compare_hist.clm4_0_54
+ ERS_E.T31_g37.I1850.bluefire_ibm.C.092829.compare_hist.clm4_0_54
+ ERI.T31_g37.IG1850.bluefire_ibm.C.092829.compare_hist.clm4_0_54
+ ERH_D.T31_g37.I1850CN.bluefire_ibm.GC.165350.compare_hist.clm4_0_54
+ SMS.T31_g37.IG4804.bluefire_ibm.C.092829.compare_hist.clm4_0_54
+ ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.GC.165350.compare_hist.clm4_0_54
+
+ new fields r2x_Forr_roff & r2x_Forr_ioff
+
+ ERS_D.f45_g37.I.bluefire_ibm.GC.165350.compare_hist.clm4_0_54
+ PET_PT.f45_g37.I1850.bluefire_ibm.C.092829.compare_hist.clm4_0_54
+ ERB.ne30_g16.I_1948-2004.bluefire_ibm.C.092829.compare_hist.clm4_0_54
+ ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.C.092829.compare_hist.clm4_0_54
+ ERS_Lm3.f19_g16.IGRCP60CN.bluefire_ibm.C.092829.compare_hist.clm4_0_54
+ PET_PT.f10_f10.I20TRCN.bluefire_ibm.GC.165350.compare_hist.clm4_0_54
+ SMS.f10_f10.IRCP45CN.bluefire_ibm.C.092829.compare_hist.clm4_0_54
+ ERS_D.f19_g16.IRCP85CN.bluefire_ibm.GC.165350.compare_hist.clm4_0_54
+
+ Fail due to throughput differences:
+
+ FAIL ERS_D.f45_g37.I.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL ERS_E.T31_g37.I1850.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL ERI.T31_g37.IG1850.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL ERB.ne30_g16.I_1948-2004.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL ERH_D.T31_g37.I1850CN.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL SMS.T31_g37.IG4804.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL SMS.1x1_numaIA.ICN.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL ERS_Lm3.1x1_vancouverCAN.I1PT.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL ERS_Lm3.f19_g16.IGRCP60CN.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL SMS.f10_f10.IRCP45CN.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+ FAIL ERS_D.f19_g16.IRCP85CN.bluefire_ibm.GC.165350.tputcomp.clm4_0_54
+
+ bluefire/CESM/allIcompsets: no
+ lynx/CESM: no
+
+ bluefire/PTCLM: no
+
+ test_system testing:
+
+ bluefire batch: no
+ bluefire interactive: no
+ lynx/pgi batch: no
+ lynx/pgi interactive: no
+ lyn/intel mirage testlist: no
+
+ test_driver.sh tools testing:
+
+ bluefire interactive: no
+ lynx interactive: no
+
+CLM tag used for the baseline comparison tests if applicable:
+
+ us20/wus12 tests were removed so removed from expected fail
+ ne16/ne60/1x1_tropicAtl 20thC transient tests fails -- need finidat files
+ New bug: ERS_Ln48_D_P1x64.f19_g16.I_2000_GLCMECPD (bugzilla 1557)
+ New testname: ERS_Ln48_D_P1x64.f45_g37.I_2000_VOC (was ERS48s_...)
+ New fail: ERS_Ld211.1x1_camdenNJ.I_2000_VOC, ERS_Ld211_D_P112x2.f10_f10.ICNCROP
+ Some ERS_L tests now pass that failed previously
+ I1PT tests pass now
+ Most SBN tests pass now
+ Intel single point tests pass now
+@@ -5,14 +5,11 @@
+
+
+
+- us20 not fully implmented
+- us20 not fully implmented
+- wus12 not fully implmented
+- wus12 not fully implmented
+
+
+- us20 not fully implemented
+- wus12 not fully implemented
++ ne16 missing finidat file for 1850
++ ne60 missing finidat file for 1850
++ 1x1_tropicAtl missing finidat file for 1850
+
+
+
+@@ -89,19 +86,17 @@
+ CESM script issue
+ Restart length different
+ Restart length different
+- Initial simulation fails
++ Initial simulation fails
++ History files are different on restart
+ Initial simulation fails
+ build error?
+
+
+
+
+- datm namelist problem for single-point forcing
+- datm namelist problem for single-point forcing
++ ????
+
+
+- CESM script issue
+- CESM script issue
+
+
+
+@@ -110,10 +105,8 @@
+
+
+ T62 not working
+- ignore_ic_date is incompatable with crop!
+- CESM script problem didn't see both files
+- CESM script problem didn't see both files
+- build error?
++ Dies early with a floating point trap
++ build error?
+
+
+
+@@ -123,22 +116,9 @@
+
+
+
+- datm namelist issue
+- datm namelist issue
+- datm namelist issue
+ 277/277 < PASS>
+ Successully ran all testing for build-namelist
+
+ Cleanup files created
+ rm: lnd_in.default: A file or directory in the path name does not exist.
+ rm: temp_file.txt: A file or directory in the path name does not exist.
+ # Looks like you failed 4 tests of 277.
+
+%%cesm/clm tests
+
+ mostly OK
+
+ generate : ./cs.status.164019.bluefire
+
+ nohup create_test_suite -input_list bluefire.clm.auxtest -compare clm4_0_50 -baselineroot /glade/proj2/cgd/tss/clm_cesm_baselines -generate clm4_0_51 -testroot /glade/scratch/muszala/tests > & ! bf_out_`date +"%m%d%y"`.lg &
+ ID: 203212
+
+ ## Reason: throughput measure off
+ FAIL SMS_RLA.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_50
+ FAIL ERS_D.f45_g37.I.bluefire_ibm.tputcomp.clm4_0_50
+ FAIL ERS_E.T31_g37.I1850.bluefire_ibm.tputcomp.clm4_0_50
+ FAIL SMS.T31_g37.IG4804.bluefire_ibm.tputcomp.clm4_0_50
+ ## baseline diretory already existed, error copying over nc files
+ FAIL SMS_RLA.f45_f45.I.bluefire_ibm.generate.clm4_0_51
+ FAIL SMS_RLB.f45_f45.I.bluefire_ibm.generate.clm4_0_51
+ FAIL SMS_ROA.f45_f45.I.bluefire_ibm.generate.clm4_0_51
+ FAIL ERS_D.f45_g37.I.bluefire_ibm.generate.clm4_0_51
+ FAIL NCK.T31_g37.I.bluefire_ibm.generate.clm4_0_51
+ FAIL PST.f45_g37.I1850CN.bluefire_ibm.generate.clm4_0_51
+ FAIL PET_PT.f45_g37.I1850.bluefire_ibm.generate.clm4_0_51
+ FAIL ERS_E.T31_g37.I1850.bluefire_ibm.generate.clm4_0_51
+ FAIL ERI.T31_g37.IG1850.bluefire_ibm.generate.clm4_0_51
+ FAIL ERB.ne30_g16.I_1948-2004.bluefire_ibm.generate.clm4_0_51
+ FAIL ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.generate.clm4_0_51
+ FAIL ERH_D.T31_g37.I1850CN.bluefire_ibm.generate.clm4_0_51
+ FAIL SMS.T31_g37.IG4804.bluefire_ibm.generate.clm4_0_51
+ FAIL SMS.1x1_numaIA.ICN.bluefire_ibm.generate.clm4_0_51
+ FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.generate.clm4_0_51
+ ## fails due to cprnc time check. new runs are 11 ts. in 50 these were 10, I expect these to pass next time around
+ FAIL ERS_D.f45_g37.I.bluefire_ibm.compare_hist.clm4_0_50
+ FAIL ERS_E.T31_g37.I1850.bluefire_ibm.compare_hist.clm4_0_50
+ FAIL ERI.T31_g37.IG1850.bluefire_ibm.compare_hist.clm4_0_50
+ FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.compare_hist.clm4_0_50
+ FAIL ERS_D.f19_g16.IRCP85CN.bluefire_ibm.compare_hist.clm4_0_50
+ ## No Lm3 directories created during clm4_0_50 generate...new case, should pass next time around
+ SFAIL ERS_Lm3.1x1_vancouverCAN.I1PT.bluefire_ibm.GC.203212
+ ERROR: datm.buildnml.csh failed
+ BFAIL ERS_Lm3.f19_g16.IGRCP60CN.bluefire_ibm.compare_hist.clm4_0_50
+ No dir to compare to in tag 50
+ ## problems in generate due to scripts for single point
+ SFAIL SMS.1x1_mexicocityMEX.I1PT.bluefire_ibm.GC.203212
+ ERROR: datm.buildnml.csh failed also failed during generate
+ BFAIL SMS.1x1_numaIA.ICN.bluefire_ibm.compare_hist.clm4_0_50
+ No dir to compare to in tag 50 - failed during generate
+
+ ## these were failing but passed when rerun - keep an eye on these
+ BFAIL ERB.ne30_g16.I_1948-2004.bluefire_ibm.compare_hist.clm4_0_50
+ BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.compare_hist.clm4_0_50
+ BFAIL ERH_D.T31_g37.I1850CN.bluefire_ibm.compare_hist.clm4_0_50
+
+ went to generated ref case
+ ./setup -clean
+ ./setup
+ then build and rerun
+
+ went to generate case
+ ./setup -clean
+ ./setup
+ clean-build, then build then reurn
+
+ did the same in the CG case for ref and normal case
+
+ After hand running
+ ./cs.status.203212.bluefire | grep ERB.f09_g16.I1850SPINUPCN
+ PASS ERB.f09_g16.I1850SPINUPCN.bluefire_ibm
+ PASS ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.memleak
+ PASS ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.generate.clm4_0_51
+ FAIL ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.compare_hist.clm4_0_50
+ see cprnc ts error above
+ PASS ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.memcomp.clm4_0_50
+ PASS ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.tputcomp.clm4_0_50
+
+ This test was rerun with a new testlist
+ ./cs.status.203212.bluefire | grep ERB.ne30_g16.I_1948-2004
+ PASS ERB.ne30_g16.I_1948-2004.bluefire_ibm
+ PASS ERB.ne30_g16.I_1948-2004.bluefire_ibm.memleak
+ PASS ERB.ne30_g16.I_1948-2004.bluefire_ibm.generate.clm4_0_51
+ FAIL ERB.ne30_g16.I_1948-2004.bluefire_ibm.compare_hist.clm4_0_50
+ see cprnc ts error above
+ PASS ERB.ne30_g16.I_1948-2004.bluefire_ibm.memcomp.clm4_0_50
+ FAIL ERB.ne30_g16.I_1948-2004.bluefire_ibm.tputcomp.clm4_0_50
+ throughput tol. error
+
+ ./cs.status.141307.bluefire
+ PASS ERH_D.T31_g37.I1850CN.bluefire_ibm
+ PASS ERH_D.T31_g37.I1850CN.bluefire_ibm.memleak
+ PASS ERH_D.T31_g37.I1850CN.bluefire_ibm.generate.clm4_0_51
+ FAIL ERH_D.T31_g37.I1850CN.bluefire_ibm.compare_hist.clm4_0_50
+ see cprnc ts error above
+ PASS ERH_D.T31_g37.I1850CN.bluefire_ibm.memcomp.clm4_0_50
+ PASS ERH_D.T31_g37.I1850CN.bluefire_ibm.tputcomp.clm4_0_50
+
+%%cesm/clm rof tests
+ nohup create_test_suite -input_list bluefire.clmRof.auxtest -compare clm4_0_50 -baselineroot /glade/proj2/cgd/tss/clm_cesm_baselines -generate clm4_0_51 -testroot /glade/scratch/muszala/tests > & ! bf_out_`date +"%m%d%y"`.lg &
+
+ These don't exist anymore, but will be replaced once Tony works out default grid resolutions for r01 and r05
+
+ ID: 091144
+
+ BFAIL SMR.f19_g16.I_2000_CN.bluefire_ibm.compare_hist.clm4_0_50
+ nothing to compare to in clm4_0_50
+ BFAIL SMR.f09_g16.I_2000_CN.bluefire_ibm.compare_hist.clm4_0_50
+ nothing to compare to in clm4_0_50
+ FAIL SMR.f05_g16.I_2000_CN.bluefire_ibm
+ larger scipt errors in rof - kills the following two tests outright
+ BFAIL SMR.f05_g16.I_2000_CN.bluefire_ibm.generate.clm4_0_51
+ BFAIL SMR.f05_g16.I_2000_CN.bluefire_ibm.compare_hist.clm4_0_50
+ FAIL SMR.ne120_g16.I_2000_CN.bluefire_ibm.generate.clm4_0_51
+ can't copy in, clm4_0_51 baseline already exists
+ BFAIL SMR.ne120_g16.I_2000_CN.bluefire_ibm.compare_hist.clm4_0_50
+ nothing to compare to in clm4_0_50
+ RUN SMR.ne240_g16.I_2000_CN.bluefire_ibm.GC.091144
+
+
+%%%%%% testing reporting end
+
+CLM tag used for the baseline comparison tests if applicable:
+
+ clm4_0_50
+
+Changes answers relative to baseline: No bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_50
+Originator(s): muszala (Muszala Stefan 303-497-1320 CGD)
+Date: Fri Sep 21 15:13:52 MDT 2012
+One-line Summary: testing of clm and new rof component
+
+Purpose of changes:
+
+Run tests on clm for new ROF component. CLM mods by tcraig to support ROF.
+
+Requirements for tag:
+
+Test level of tag: doc, critical, standard, std-test, reg-test
+
+std-test
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+
+Describe any changes made to the namelist:
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by:
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+List all files eliminated:
+
+ Not needed since ROF is now a CLM external
+ D models/lnd/clm/src/main/clm_mct_mod.F90
+ D models/lnd/clm/src/riverroute
+ D models/lnd/clm/src/riverroute/RtmMod.F90
+ D models/lnd/clm/src/riverroute/RunoffMod.F90
+
+List all files added and what they do:
+
+ Fix for some of Erik's new tests
+ A models/lnd/clm/test/system/user_nl_dirs/monthly
+ A models/lnd/clm/test/system/user_nl_dirs/monthly/user_nl_clm
+ A models/lnd/clm/test/system/user_nl_dirs/monthly/user_nl_cpl
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/tests_pretag_jaguarpf
+ M models/lnd/clm/test/system/tests_pretag_jaguarpf_nompi
+ M models/lnd/clm/tools/ncl_scripts
+ M models/lnd/clm/tools/interpinic
+ M models/lnd/clm/tools/mksurfdata_map/src
+ M models/lnd/clm/tools/mksurfdata_map/mksurfdata.pl
+ M models/lnd/clm/bld
+ M models/lnd/clm/bld/configure
+ M models/lnd/clm/bld/user_nl_clm
+ M models/lnd/clm/bld/listDefaultNamelist.pl
+ M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+ M models/lnd/clm/bld/build-namelist
+ M models/lnd/clm/bld/clm.buildnml.csh
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl
+ M models/lnd/clm/src/cpl_share/clm_cpl_indices.F90
+ M models/lnd/clm/src/biogeochem/CNDVMod.F90
+ M models/lnd/clm/src/biogeochem/DryDepVelocity.F90
+ M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90
+ M models/lnd/clm/src/main/spmdGathScatMod.F90
+ M models/lnd/clm/src/main/clm_varpar.F90
+ M models/lnd/clm/src/main/decompInitMod.F90
+ M models/lnd/clm/src/main/clm_initializeMod.F90
+ M models/lnd/clm/src/main/clmtypeInitMod.F90
+ M models/lnd/clm/src/main/histFileMod.F90
+ M models/lnd/clm/src/main/clm_atmlnd.F90
+ M models/lnd/clm/src/main/findHistFields.pl
+ M models/lnd/clm/src/main/restFileMod.F90
+ M models/lnd/clm/src/main/controlMod.F90
+ M models/lnd/clm/src/main/clm_varctl.F90
+ M models/lnd/clm/src/main/clm_driver.F90
+ M models/lnd/clm/src/main/ncdio_pio.F90
+ M models/lnd/clm/src/main/domainMod.F90
+ M models/lnd/clm/src/main/decompMod.F90
+ M models/lnd/clm/src/main/clmtype.F90
+ M models/lnd/clm/src/main/histFldsMod.F90
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_mct.F90
+ M SVN_EXTERNAL_DIRECTORIES
+ M ChangeLog
+ M ChangeSum
+ M scripts/ccsm_utils/Case.template/config_definition.xml
+
+
+Machines testing ran on:
+
+%%%%%%%% Test reporting START %%%%%%%%
+
+ Test system is currently in flux so I will simply list all tests that have
+ failed and the 8 tests that we need to keep an eye on. I've included bluefire
+ and lynx and pointers to where tests live.
+
+* tests that will pass in next tag due to new features
+** denotes an expected fail.
+*** any tputcomp tests that fail I'm inclined to ignore also. These change from test to test.
+? or cd a test that will need fixing
+
+BLUEFIRE:
+
+OK ############ run build-namelist tests
+>>cd models/lnd/clm/bld/unit_testers
+>>./build-namelist_test.pl -compare /glade/scratch/muszala/svn/clm4_0_49/models/lnd/clm/bld/unit_testers -generate -test -csmdata /glade/proj3/cseg/inputdata >&! out_unit_`date +"%m%d%y"`.lg
+
+ OK...failed tests will pass in next tag
+
+OK ############# run new I case tests
+[be1105en /glade/scratch/muszala/svn/clm_trunk/scripts ]$
+create_test_suite -mach bluefire_ibm -input_list allIcompsetsRes.clm.auxtest -nobatch on -nobuild on -compare clm4_0_49 -baselineroot /glade/proj2/cgd/tss/clm_cesm_baselines -generate clm4_0_50 -testroot /glade/scratch/muszala/tests > & ! bf_out_allI_`date +"%m%d%y"`.lg
+
+ 165507 - /glade/scratch/muszala/tests
+ >>./cs.status.165507.bluefire | grep FAIL
+...
+** SFAIL SBN.1x1_mexicocityMEX.I1PT.bluefire_ibm.GC.165507
+** SFAIL SBN.1x1_vancouverCAN.I1PT.bluefire_ibm.GC.165507
+** SFAIL SBN.1x1_urbanc_alpha.I1PT.bluefire_ibm.GC.165507
+** TFAIL SBN.1x1_asphalt_jungle.ICNTEST.bluefire_ibm.GC.165507
+** TFAIL SBN.T42_g16.I1850.bluefire_ibm.GC.165507
+** TFAIL SBN.T31_g16.I1850.bluefire_ibm.GC.165507
+** SFAIL SBN.f05_g16.I20TR.bluefire_ibm.GC.165507
+** SFAIL SBN.1x1_tropicAtl.I20TR.bluefire_ibm.GC.165507
+** SFAIL SBN.ne30_g16.I20TR.bluefire_ibm.GC.165507
+** SFAIL SBN.ne120_g16.I20TRCN.bluefire_ibm.GC.165507
+** SFAIL SBN.f05_g16.IRCP26CN.bluefire_ibm.GC.165507
+** SFAIL SBN.f05_g16.IRCP45CN.bluefire_ibm.GC.165507
+** SFAIL SBN.f05_g16.IRCP60CN.bluefire_ibm.GC.165507
+** SFAIL SBN.f05_g16.IRCP85CN.bluefire_ibm.GC.165507
+
+
+OK ############ run clm interactive tools tests
+[be1105en /glade/scratch/muszala/svn/clm_trunk/models/lnd/clm/test/system ]$
+>>nohup env CLM_SOFF=FALSE ./test_driver.sh -i >&! bluefire_i_`date +"%m%d%y"`.lg &
+
+ OK: looking at /glade/scratch/muszala/svn/clm_trunk/models/lnd/clm/test/system/td.951030.status.xFail - rerun
+ clmTests/test-driver.533240 - /glade/scratch/muszala/svn/clm_trunk/models/lnd/clm/test/system/td.533240.status.xFail
+
+############# run old cesm/clm tests out of scripts
+
+
+[be1105en /glade/scratch/muszala/svn/clm_trunk/scripts ]$
+>>create_test_suite -input_list bluefire.clm.auxtest -compare clm4_0_49 \
+-baselineroot /glade/proj2/cgd/tss/clm_cesm_baselines \
+-generate clm4_0_50 \
+-testroot /glade/scratch/muszala/tests >&! bf_out_`date +"%m%d%y"`.lg &
+
+ 143258
+>>cs.status.143258.bluefire | grep -v PASS
+...
+
+*** FAIL SMS.T31_g37.IG4804.bluefire_ibm.tputcomp.clm4_0_49
+*** FAIL SMS.1x1_numaIA.ICN.bluefire_ibm.tputcomp.clm4_0_49
+*** FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.tputcomp.clm4_0_49
+*** FAIL SMS.f10_f10.IRCP45CN.bluefire_ibm.tputcomp.clm4_0_49
+*** FAIL ERS_D.f19_g16.IRCP85CN.bluefire_ibm.tputcomp.clm4_0_49
+*** FAIL ERI.T31_g37.IG1850.bluefire_ibm.tputcomp.clm4_0_49
+*** FAIL ERS_E.T31_g37.I1850.bluefire_ibm.tputcomp.clm4_0_49
+*** FAIL ERS_D.f45_g37.I.bluefire_ibm.tputcomp.clm4_0_49
+*** FAIL SMS_RLA.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_49
+*** FAIL SMS_RLB.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_49
+*** FAIL ERS_D.f45_g37.I.bluefire_ibm.compare_hist.clm4_0_49
+
+* FAIL NCK.T31_g37.I.bluefire_ibm.compare_hist.clm4_0_49
+* FAIL PST.f45_g37.I1850CN.bluefire_ibm.compare_hist.clm4_0_49
+* FAIL PET_PT.f45_g37.I1850.bluefire_ibm.compare_hist.clm4_0_49
+* FAIL ERS_E.T31_g37.I1850.bluefire_ibm.compare_hist.clm4_0_49
+* FAIL ERI.T31_g37.IG1850.bluefire_ibm.compare_hist.clm4_0_49
+? FAIL ERB.ne30_g16.I_1948-2004.bluefire_ibm
+
+ Failing in Generate:
+ "/glade/scratch/muszala/svn/clm4_0_49/models/drv/shr/seq_infodata_mod.F90", line 620: 1525-006 The STATUS= specifier in the OPEN
+ statement for
+ unit 98 cannot be set to OLD because the file rpointer.drv does not exist. The program will stop.
+
+ BFAIL ERB.ne30_g16.I_1948-2004.bluefire_ibm.generate.clm4_0_50
+ BFAIL ERB.ne30_g16.I_1948-2004.bluefire_ibm.compare_hist.clm4_0_49
+? FAIL ERB.f09_g16.I1850SPINUPCN.bluefire_ibm
+
+ Failing in Generate:
+ 0:"/glade/scratch/muszala/svn/clm4_0_49/models/drv/shr/seq_infodata_mod.F90", line 620: 1525-006 The STATUS= specifier in the OPEN
+ statemen t for unit 98 cannot be set to OLD because the file rpointer.drv does not exist. The program will stop.
+
+ BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.generate.clm4_0_50
+ BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.compare_hist.clm4_0_49
+? FAIL ERH_D.T31_g37.I1850CN.bluefire_ibm
+
+ Failing in Generate:
+ "/glade/scratch/muszala/svn/clm4_0_49/models/drv/shr/seq_infodata_mod.F90", line 620: 1525-006 The STATUS= specifier in the OPEN
+ statement for
+ unit 98 cannot be set to OLD because the file rpointer.drv does not exist. The program will stop.
+
+ BFAIL ERH_D.T31_g37.I1850CN.bluefire_ibm.generate.clm4_0_50
+ BFAIL ERH_D.T31_g37.I1850CN.bluefire_ibm.compare_hist.clm4_0_49
+
+* FAIL SMS.T31_g37.IG4804.bluefire_ibm.compare_hist.clm4_0_49
+** SFAIL SMS.1x1_mexicocityMEX.I1PT.bluefire_ibm.GC.143258
+ should be xFAIL : ERROR(build-namelist::new): Required input variable yearfirst was not found
+** SFAIL ERP.1x1_vancouverCAN.I1PT.bluefire_ibm.GC.143258
+ should be xFAIL : ERROR(build-namelist::new): Required input variable yearfirst was not found
+* FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.compare_hist.clm4_0_49
+* FAIL ERP.f19_g16.IGRCP60CN.bluefire_ibm.compare_hist.clm4_0_49
+* FAIL PST.f10_f10.I20TRCN.bluefire_ibm.compare_hist.clm4_0_49
+* FAIL PET_PT.f10_f10.I20TRCN.bluefire_ibm.compare_hist.clm4_0_49
+* FAIL SMS.f10_f10.IRCP45CN.bluefire_ibm.compare_hist.clm4_0_49
+* FAIL ERS_D.f19_g16.IRCP85CN.bluefire_ibm.compare_hist.clm4_0_49
+
+
+
+
+############# run new test_system tests
+
+## interactive
+>> test_system -i -c clm4_0_49 -g clm4_0_50 >&! bluefire_tsi_`date +"%m%d%y"`.lg &
+
+ 161038
+>>cs.status.161038.bluefire | grep -v PASS
+...
+SFAIL ERS_D.CLM_USRDAT.IalaskaCN.bluefire_ibm.GC.161038
+* FAIL ERS_P1x64.f45_g37.I_1850_SPINUP_3HrWx_CN.bluefire_ibm.compare_hist.clm4_0_49
+ new fields in cpl history files
+* FAIL ERS_P1x64.f45_g37.I_1850_SPINUP_3HrWx_CN.bluefire_ibm.memcomp.clm4_0_49
+ max memory values differ
+*** FAIL ERS_P1x64.f45_g37.I_1850_SPINUP_3HrWx_CN.bluefire_ibm.tputcomp.clm4_0_49
+** FAIL ERS_D_P1x64.f19_g16.I_1850-2000_VOC_SNCRFRC_CN.bluefire_ibm
+** BFAIL ERS_D_P1x64.f19_g16.I_1850-2000_VOC_SNCRFRC_CN.bluefire_ibm.generate.clm4_0_50
+** BFAIL ERS_D_P1x64.f19_g16.I_1850-2000_VOC_SNCRFRC_CN.bluefire_ibm.compare_hist.clm4_0_49
+cd RUN ERS_D_P1x25.5x5_amazon.I_2000.bluefire_ibm.GC.161038
+** RUN ERS_D.1x1_asphaltjungleNJ.I_2000_VOC.bluefire_ibm.GC.161038
+** RUN ERS48s_D_P1x64.f45_g37.I_2000_VOC.bluefire_ibm.GC.161038
+* FAIL ERS48s_D_P1x64.f19_g16.I_2000_GLCMECPD.bluefire_ibm.compare_hist.clm4_0_49
+ new fields in cpl history files
+*** FAIL ERS48s_D_P1x64.f19_g16.I_2000_GLCMECPD.bluefire_ibm.tputcomp.clm4_0_49
+** FAIL PET_D_P1x64.f45_g37.I_2000_VOC.bluefire_ibm
+
+
+
+## batch
+>>test_system -c clm4_0_49 -g clm4_0_50 > & ! bluefire_ts_`date +"%m%d%y"`.lg &
+
+ 143420 -- cs.status.143420.bluefire
+cs.status.143420.bluefire | grep -v PASS
+...
+* FAIL ERS211d.f10_f10.ICNADSPIN.bluefire_ibm.compare_hist.clm4_0_49
+ new fields in cpl history files
+*** FAIL ERS211d.f10_f10.ICNADSPIN.bluefire_ibm.tputcomp.clm4_0_49
+** CFAIL ERS48s_D.f09_g16.ICNEXSPIN.bluefire_ibm.GC.143420
+ this is xFAIL for interactive, should also be listed here
+* FAIL ERS48s_D_P64x16.ne30_g16.ICN.bluefire_ibm.compare_hist.clm4_0_49
+ new fields in cpl history files
+*** FAIL ERS48s_D_P64x16.ne30_g16.ICN.bluefire_ibm.tputcomp.clm4_0_49
+* FAIL PET_D_P1x64.ne30_g16.ICN.bluefire_ibm.compare_hist.clm4_0_49
+ new fields in cpl history files
+? RUN ERS211d_D_P112x2.f10_f10.ICNCROP.bluefire_ibm.GC.143420
+ run failed
+** FAIL ERS211d.f10_f10.I_2000_VOC_CN.bluefire_ibm.compare_hist.clm4_0_49
+*** FAIL ERS211d.f10_f10.I_2000_VOC_CN.bluefire_ibm.tputcomp.clm4_0_49
+** RUN ERS211d_P192x2.f19_g16.I_2000_CNDV_CROP.bluefire_ibm.GC.143420
+** SFAIL ERS_D_P96x32.T62_g37.I_2000.bluefire_ibm.GC.143420
+
+
+
+#############
+
+LYNX:
+
+############# run new test_system tests
+
+## interactive
+>> test_system -i -c clm4_0_49_lynx_pgi -g clm4_0_50_lynx_pgi >&! lynx_i_`date +"%m%d%y"`.lg &
+
+ 144558
+
+cs.status.144558.lynx | grep -v PASS
+...
+*** FAIL ERS211d.1x1_brazil.I_2000.lynx_pgi.tputcomp.clm4_0_49_lynx_pgi
+* FAIL ERS48s_D_P1x12.f10_f10.ICNCROP.lynx_pgi.compare_hist.clm4_0_49_lynx_pgi
+ new fields in cpl history files
+** SFAIL SMS_D.1x1_vancouverCAN.I1PT.lynx_pgi.GC.144558
+** SFAIL ERS.1x1_mexicocityMEX.I1PT.lynx_pgi.GC.144558
+
+
+## batch
+>> test_system -c clm4_0_49_lynx_pgi -g clm4_0_50_lynx_pgi >&! lynx_`date +"%m%d%y"`.lg &
+
+ 160925
+./cs.status.160925.lynx | grep -v PASS
+...
+? BFAIL PMT_D.f45_g37.I_2000.lynx_pgi.compare_hist.clm4_0_49_lynx_pgi
+
+ problem in generate case not copying over file
+ PASS
+ Initial Test log is /glade/scratch/muszala/PMT_D.f45_g37.I_2000.lynx_pgi.G.114232/run/cpl.log.120920-152048
+ /var/spool/torque/mom_priv/jobs/102008.nid00003.SC: Storing new baseline in /glade/proj2/cgd/tss/clm_cesm_baselines/clm4_0_49_ly
+ nx_pgi/PMT_D.f45_g37.I_2000.lynx_pgi
+ ERROR in /var/spool/torque/mom_priv/jobs/102008.nid00003.SC: could not copy /glade/scratch/muszala/archive/PMT_D.f45_g37.I_2000.
+ lynx_pgi.G.114232/cpl/hist/ to /glade/proj2/cgd/tss/clm_cesm_baselines/clm4_0_49_lynx_pgi/PMT_D.f45_g37.I_2000.lynx_pgi/cpl.hi.nc
+
+? RUN ERS48s_D.f45_g37.I_2000_VOC.lynx_pgi.GC.160925
+ run didn't finish?
+? RUN ERS48s_D.f10_f10.I_2000_CN.lynx_pgi.GC.160925
+ PBS: job killed: walltime 9021 exceeded limit 9000
+? RUN ERS48s_P96x2.f19_g16.I_2000_VOC_SNCRFRC_CN_GLCMECPD.lynx_pgi.GC.160925
+ PBS: job killed: walltime 9041 exceeded limit 9000
+
+## interactive, with mirage test list and intel compiler
+>>test_system -i -p intel -l mirage.interactive -o "-mach lynx" -c clm4_0_49_lynx_intel -g clm4_0_50_lynx_intel > & ! lynx_mi_intel_`date +"%m%d%y"`.lg &
+
+ 143620 cs.status.143620.lynx
+>>cs.status.143620.lynx | grep -v PASS
+...
+** FAIL ERS.1x1_brazil.I_2000.lynx_intel
+** BFAIL ERS.1x1_brazil.I_2000.lynx_intel.generate.clm4_0_50_lynx_intel
+** BFAIL ERS.1x1_brazil.I_2000.lynx_intel.compare_hist.clm4_0_49_lynx_intel
+? FAIL ERI_D.1x1_camdenNJ.I_2000_VOC.lynx_intel
+ forrtl: error (73): floating divide by zero - rtmmod_mp_rtmini_ 303 RtmMod.F90
+** RUN ERS_D.1x1_asphaltjungleNJ.I_2000_VOC.lynx_intel.GC.143620
+ forrtl: error (73): floating divide by zero - rtmmod_mp_rtmini_ 303 RtmMod.F90
+* FAIL ERS48s_D_P1x12.f10_f10.I_2000_CN.lynx_intel.compare_hist.clm4_0_49_lynx_intel
+ new fields in cpl history files
+*** FAIL ERS48s_D_P1x12.f10_f10.I_2000_CN.lynx_intel.tputcomp.clm4_0_49_lynx_intel
+
+%%%%%%%% Test reporting END %%%%%%%%
+
+
+ build-namelist unit tester:
+
+ CESM test lists:
+
+ bluefire/CESM
+ lynx/CESM
+
+ bluefire/PTCLM
+
+ test_system testing:
+
+ bluefire batch:
+ bluefire interactive:
+ lynx/pgi batch:
+ lynx/pgi interactive:
+ mirage,storm/ifort interactive:
+
+CLM tag used for the baseline comparison tests if applicable:
+
+ clm4_0_49
+
+Changes answers relative to baseline: Yes, runoff is different (similar climate)
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: all with RTM
+ - what platforms/compilers: all
+ - nature of change (similar climate) I compsets only have runoff change
+
+ MSS location of control simulations used to validate new climate:
+
+ https://wiki.ucar.edu/display/ccsm/CCSM4+-+Track5+experiments
+
+ /CCSM/csm/b.e11.B1850CN.f19_g16.004
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+===============================================================
+Tag name: clm4_0_49
+Originator(s): erik (Kluzek Erik 303-497-1326 CGD)
+Date: Sun Sep 16 01:05:04 MDT 2012
+One-line Summary: Move clm testing to use CESM test framework
+
+Purpose of changes:
+
+Move testing for CLM from CLM stand-alone test_driver.sh to one based on
+the CESM testing framework. Create CLM specific tests-lists, user_nl_dir,
+and compset files to handle most CLM testing.
+
+Requirements for tag:
+
+ Move major testing from test_driver.sh to one based on CESM framework. Try
+ to get most of it to work.
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ Update to cesm1_1_beta18b
+
+ scripts updated to: scripts4_120915
+ scripts updated to: Machines_120915
+ mapping updated to: mapping_120816
+ stubs updated to: stubs1_3_05
+ drv updated to: drvseq1_4_26
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+A + models/lnd/clm/test/system/test_system ---- New main testing script for CLM.
+ wrapper script to CESM scripts/create_test_suite with behavior
+ similar to test_driver.sh
+
+ -b directory [or --baseline] baseline directory
+ -c version [or --compare] version to compare to
+ (generate must already have been run to create these)
+ -d debug usage -- display tests that will run -- but
+ do NOT actually execute them
+ -g version [or --generate] name of this version to generate version as
+ -h [or --help] displays help
+ -i interactive usage
+ -l list [or --list] input test list to use instead of default
+ (path relative to this directory)
+ -o options [or --options] options to pass to create_test_suite
+ -p compiler [or --compiler] compiler to use instead of default
+ -s [or --shortlist] use the short test list
+
+ Typical use:
+
+ cd models/lnd/clm/test/system
+ test_system -i -c clm4_0_48 -g clm4_0_49
+ test_system -c clm4_0_48 -g clm4_0_49
+
+
+>>>>>>>>>>>>> Test lists
+A + models/lnd/clm/test/system/mirage.interactive
+A + models/lnd/clm/test/system/lynx.interactive
+A + models/lnd/clm/test/system/shortlist.interactive
+A + models/lnd/clm/test/system/bluefire.batch
+A + models/lnd/clm/test/system/bluefire.interactive
+A + models/lnd/clm/test/system/lynx.batch
+A + models/lnd/clm/test/system/shortlist.batch
+
+>>>>>>>>>>>>> compset file and user_nl_* files for testing
+A + models/lnd/clm/test/system/config_files/config_CLMtestCompsets.xml
+A + models/lnd/clm/test/system/user_nl_dirs
+A + models/lnd/clm/test/system/user_nl_dirs/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/crop
+A + models/lnd/clm/test/system/user_nl_dirs/crop/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/cn_conly
+A + models/lnd/clm/test/system/user_nl_dirs/cn_conly/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/voc
+A + models/lnd/clm/test/system/user_nl_dirs/voc/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/voc/user_nl_cpl
+A + models/lnd/clm/test/system/user_nl_dirs/glcMEC
+A + models/lnd/clm/test/system/user_nl_dirs/glcMEC/user_nl_clm
+A + models/lnd/clm/test/system/user_nl_dirs/glcMEC/user_nl_cpl
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+M models/lnd/clm/bld/listDefaultNamelist.pl
+M models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml - New failed
+ tests from new test framework
+M models/lnd/clm/bld/clm.buildnml.csh - Copy drv_flds_in if clm creates it
+ and if it doesn't already exist.
+M UpDateChangeLog.pl -- Add some support for xFail. Not fully working.
+
+Machines testing ran on:
+
+ build-namelist unit tester: yes
+
+ test_system testing:
+
+ bluefire batch: yes
+ bluefire interactive: yes
+ bluefire/CESM: yes
+ lynx/pgi batch: yes
+ lynx/pgi interactive: yes
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_48
+
+Difference in expected fails from testing:
+
+Index: expectedClmTestFails.xml
+===================================================================
+--- expectedClmTestFails.xml (.../trunk_tags/clm4_0_48/models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml) (revision 40288)
++++ expectedClmTestFails.xml (.../trunk/models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml) (revision 40288)
+@@ -34,6 +34,10 @@
+
+
+ cprnc showing diffs are not b4b
++ problem configuring
++ problem configuring
++ clm stand-alone can no longer work
++ clm stand-alone can no longer work
+
+
+
+@@ -71,14 +75,70 @@
+
+
+
++
++ CESM script issue
++ CESM script issue
++
+
+
+
+
+
++
++
++
++ CESM script issue
++ Restart length different
++ Restart length different
++ Initial simulation fails
++ Initial simulation fails
++
++
++
++
++ datm namelist problem for single-point forcing
++ datm namelist problem for single-point forcing
++
++
++
++
++
++
++
++ T62 not working
++ ignore_ic_date is incompatable with crop!
++ CESM script problem didn't see both files
++ CESM script problem didn't see both files
++
++
++
++
++
++
+
+
+
++ datm namelist issue
++ datm namelist issue
++ datm namelist issue
++ datm namelist issue
++ missing wus12 datasets
++ missing datasets
++ missing datasets
++ missing datasets
++ missing datasets
++ missing datasets
++ missing datasets
++ missing datasets
++ missing datasets
++ missing datasets
++ missing datasets
++
++
++
++
++
++
+
+
+
+
+Changes answers relative to baseline: No bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_48
+Originator(s): muszala (Muszala Stefan 303-497-1320 CGD)
+Date: Tue Sep 11 09:14:40 MDT 2012
+One-line Summary: bug fixes, xFail to tests and normalize test output for CLM
+
+Purpose of changes: Bug Fixes. Add xFail capability to CLM batch,
+interactive and namelist tests. Make test output the same for CLM
+tests.
+
+Requirements for tag: Test on bluefire (CESM, int, bat), lynx/pgi (int,bat)
+Fix bugs: 1436,1500,1521,1537
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID):
+
+ 1436,1500,1521,1537
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+ 1545 - on lynx clm-batch doesn't call our new xFAIL module.
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+
+Describe any changes made to the namelist:
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by: xFail module Bill, Erik. Rest of code Erik.
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+List all files eliminated:
+
+List a files added and what they do:
+
+A models/lnd/clm/bld/unit_testers/xFail
+A models/lnd/clm/bld/unit_testers/xFail/expectedFail.pm
+A models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml
+A models/lnd/clm/bld/unit_testers/xFail/wrapClmTests.pl
+
+ - xFAIL module that implements expected fail reporting. wrapClmTests.pl is used
+as a wrapper and called by test_driver.sh. The XML file holds test cases.
+Documentation is in POD in expectedFail.pm
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/test/system/TBL.sh
+ - fix so that mct and pio are built out of the baseline directory
+M models/lnd/clm/bld/configure
+ - fix path to mct/.../mpi-serial
+M models/lnd/clm/src/main/getdatetime.F90
+ - fix to broadcast correct time stamp
+M models/lnd/clm/test/system/test_driver.sh
+M models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+ - both of these files modified to support xFAIL functionality
+M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml
+ - remove wrf mapping entry that isn't in inputdata. fix entries per bug
+ 1521
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+ - remove wrf mapping entry that isn't in inputdata. fix entry per bug 1521
+
+Summary of testing:
+
+Note that tests that used to fail are now being reported as xFAIL. The file
+to look at is models/lnd/clm/bld/unit_testers/xFail/expectedClmTestFails.xml.
+
+There are no tests that used to FAIL that now PASS.
+
+ build-namelist unit testing: all pass
+ bluefire: all pass
+ bluefire interactive testing: all pass
+ bluefire/CESM testing: a few throughput failures
+
+ FAIL SMS_RLA.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_47
+ COMMENT tput_decr = 2.945 tput_percent_decr = 21.3
+
+ FAIL SMS.1x1_numaIA.ICN.bluefire_ibm.tputcomp.clm4_0_47
+ COMMENT tput_decr = 65.6 tput_percent_decr = 41.9
+
+ FAIL ERS_E.T31_g37.I1850.bluefire_ibm.tputcomp.clm4_0_47
+ COMMENT tput_decr = 1.8039999 tput_percent_decr = 2.07
+
+ FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.tputcomp.clm4_0_47
+ COMMENT tput_decr = 2.325 tput_percent_decr = 9.80
+
+ FAIL SMS.f10_f10.IRCP45CN.bluefire_ibm.tputcomp.clm4_0_47
+ COMMENT tput_decr = 7.0280000 tput_percent_decr = 3.69
+
+ bluefire/PTCLM testing: N/A
+ lynx/pgi testing: all pass
+ lynx/pgi interactive testing: all pass
+ lynx/CESM testing:
+ mirage,storm/ifort interactive testing: all pass
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_47
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_47
+Originator(s): muszala (Muszala Stefan 303-497-1320 CGD)
+Date: Thu Aug 23 11:09:27 MDT 2012
+One-line Summary: bug fixes
+
+Purpose of changes:
+
+ Fix some bugs and tag early since CAM needs fix of bug 1538 asap.
+
+Requirements for tag:
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID):
+
+ 1534,1533,1507,1444,1538
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+
+Describe any changes made to the namelist:
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by:
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/configure
+-- Passes FLAGS down to cesm_lib build and for pio (only for CLM testing)
+
+M models/lnd/clm/tools/mkmapdata/regridbatch.sh
+M models/lnd/clm/tools/mkmapdata/mkmapdata.sh
+-- Fixes as per bug 1507. Logic clean up for interactive and using mpi
+
+M models/lnd/clm/bld/build-namelist
+-- Fix as per bug 1538
+
+M models/lnd/clm/src/main/controlMod.F90
+-- Fix as per bug 1444. remove call to "mpi_bcast (glc_topomax," and logic
+ controlling it.
+
+M models/lnd/clm/src/main/ncdio_pio.F90
+-- Fix as per bug 1533 and 1534
+
+M SVN_EXTERNAL_DIRECTORIES
+-- Mistake from last tag. Replaced two repos with correct trunk-tag urls.
+
+
+Summary of testing:
+
+ build-namelist unit testing:
+ All PASS except:
+ fails involve us20 and wus12
+ not ok 141 - lnd_in file the same as expected for CN - 94x192 - fixed. Should pass in next tag.
+ not ok 214 - lnd_in file exists - us20 - no tests in place
+ not ok 219 - lnd_in file exists - wus12 - no tests in place
+ not ok 221 - compare file lnd_in DNE for CN and -res+wus12 - wus12 - no tests in place
+ not ok 222 - compare file temp_file.txt DNE for CN and -res+wus12 - wus12 - no tests in place
+ bluefire:
+
+ 016-019 will be removed and put in CESM/CLM tests
+ 016 smW51 TSM.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -5 cold .................FAIL! rc= 10
+ 017 erW51 TER.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -3+-2 cold ..............FAIL! rc= 5
+ 018 brW51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 5
+ 019 blW51 TBL.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -5 cold .................FAIL! rc= 4
+
+ 036-039 Failed in the past, see prior versions
+ 036 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 10
+ 037 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 5
+ 038 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 5
+ 039 blU61 TBL.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 48 cold ......FAIL! rc= 4
+
+ 049-052 Failed in the past, see prior versions
+ 049 smCI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+ 050 erCI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+ 051 brCI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+ 052 blCI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+
+ bluefire interactive testing:
+ All PASS execpt:
+ 004 blC74 TBL.sh _sc_s clm_std^nl_urb 20020101:1800 1x1_brazil navy -10 arb_ic ..................FAIL! rc= 5
+ 008 blC97 TBL.sh _sc_do clm_spin^nl_urb 20030101:1800 4x5 gx3v7@1850 -6 arb_ic ..................FAIL! rc= 5
+ 012 blHS3 TBL.sh 17p_cnsc_do clm_usrdat 20030101:1800 13x12pt_f19_alaskaUSA gx1v6 -6 arb_ic .....FAIL! rc= 5
+ 016 blCA4 TBL.sh _sc_ds clm_drydep^nl_urb 20021001:3600 1x1_camdenNJ navy -90 arb_ic ............FAIL! rc= 5
+ 020 blNB4 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:3600 1x1_mexicocityMEX navy 158 arb_ic ....FAIL! rc= 5
+ 024 blCA8 TBL.sh _sc_ds clm_drydep^nl_urb 20021230:3600 1x1_asphaltjungleNJ navy -90 arb_ic .....FAIL! rc= 5
+ 026 blCK4 TBL.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -10 cold ...............FAIL! rc= 5
+ 028 blCK8 TBL.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -10 cold ...........FAIL! rc= 5
+ 030 blC78 TBL.sh _sc_s clm_std^nl_urb 20021231:1800 1x1_brazil navy -10 arb_ic ..................FAIL! rc= 5
+ 034 blF93 TBL.sh 17p_sc_do clm_drydep^nl_voc 20021230:1800 4x5 gx3v7 48 cold ....................FAIL! rc= 5
+ 038 blC83 TBL.sh _sc_do clm_std^nl_urb 20020115:3600 5x5_amazon navy -10 arb_ic .................FAIL! rc= 5
+ 042 blK74 TBL.sh 17p_cndvsc_s clm_std 19971231:1800 1x1_brazil navy -670 arb_ic .................FAIL! rc= 5
+ 046 blC63 TBL.sh _sc_do clm_glcmec 19980115:1800 1.9x2.5 gx1v6 48 arb_ic ........................FAIL! rc= 5
+ 050 blHQ4 TBL.sh 17p_cnsc_ds clm_drydep 20000214:1800 1x1_brazil navy@2000 -150 cold ............FAIL! rc= 5
+ 054 blH43 TBL.sh 17p_cnsc_do clm_transient_20thC 19790101:1800 1.9x2.5 gx1v6@1850-2000 -10 startup FAIL! rc= 5
+ 067 bl8Z3 TBLrst_tools.sh 21p_cncrpsc_do interpinic clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6FAIL! rc= 5
+ Reason: changed configure, but configure in previous tag not updated.
+ These will pass when a new tag is compared to clm4_0_47
+ bluefire/CESM testing:
+ All PASS except:
+ FAIL SMS_RLB.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_46
+ FAIL SMS_ROA.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_46
+ FAIL ERS_D.f19_g16.IRCP85CN.bluefire_ibm.tputcomp.clm4_0_46
+ Reason: the throughput tolerance is likely still not large enough.
+ bluefire/PTCLM testing: N/A
+ lynx/pgi testing:
+ lynx/pgi interactive testing:
+ 004 blC74 TBL.sh _sc_s clm_std^nl_urb 20020101:1800 1x1_brazil navy -10 arb_ic ..................FAIL! rc= 5
+ 008 blTZ3 TBL.sh 21p_cncrpsc_do clm_stdIgnYr^nl_crop 20020401:3600 10x15 USGS -10 cold ..........FAIL! rc= 5
+ 010 blCL4 TBL.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 10x15 USGS -10 cold ..................FAIL! rc= 5
+ 014 blCA4 TBL.sh _sc_ds clm_drydep^nl_urb 20021001:3600 1x1_camdenNJ navy -90 arb_ic ............FAIL! rc= 5
+ Reason: changed configure, but configure in previous tag not updated.
+ These will pass when a new tag is compared to clm4_0_47
+ lynx/CESM testing:
+ N/A
+ mirage,storm/ifort interactive testing:
+ All PASS except:
+ 70 004 blC74 TBL.sh _sc_s clm_std^nl_urb 20020101:1800 1x1_brazil navy -10 arb_ic ..................FAIL! rc= 5
+ 71 007 blD94 TBL.sh _persc_ds clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 5
+ 72 011 blCA4 TBL.sh _sc_ds clm_drydep^nl_urb 20021001:3600 1x1_camdenNJ navy -90 arb_ic ............FAIL! rc= 5
+ 73 015 blCA8 TBL.sh _sc_ds clm_drydep^nl_urb 20021230:3600 1x1_asphaltjungleNJ navy -90 arb_ic .....FAIL! rc= 5
+ 74 019 blL54 TBL.sh _sc_ds clm_std^nl_urb 20020115:1800 10x15 USGS 48 arb_ic .......................FAIL! rc= 5
+ 75 023 blR53 TBL.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:1800 10x15 USGS@1850 48 cold ............FAIL! rc= 5
+ Reason: changed configure, but configure in previous tag not updated.
+ These will pass when a new tag is compared to clm4_0_47
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_46
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_46
+Originator(s): muszala (Muszala Stefan 303-497-1320 CGD)
+Date: Wed Aug 8 11:53:44 MDT 2012
+One-line Summary: R01 support and update externals
+
+Purpose of changes:
+
+ Add support for r01 rtm. Add mapping files for ne120 and ne240. Update all svn
+ externals to what is in cesm_alpha16e and modify and update our test system as
+ necessary.
+
+Requirements for tag: test on bluefire (CESM, int, bat, build-namelist), lynx/pgi (int,bat), mirage.
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID):N/A
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:N/A
+
+Describe any changes made to the namelist:N/A
+
+List any changes to the defaults for the boundary datasets:N/A
+
+Describe any substantial timing or memory changes:N/A
+
+Code reviewed by: self, Erik
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ Created the following tags:
+ https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_120808
+ https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_1_23
+ https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_120807
+
+List all files eliminated:N/A
+
+List all files added and what they do:N/A
+
+List all existing files that have been modified, and describe the changes:
+
+== modifications to update externals ==
+ M models/lnd/clm/tools/SVN_EXTERNAL_DIRECTORIES
+ M SVN_EXTERNAL_DIRECTORIES
+
+== modifications to get cesm/clm, interactive tests to pass ==
+
+ M models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+ -- added more output for easier test reading
+ M models/lnd/clm/test/system/TCB.sh
+ -- fix some indentation
+ M models/lnd/clm/test/system/TCBtools.sh
+ -- add support for gen_domain configure on bluefire
+ M models/lnd/clm/test/system/test_driver.sh
+ -- move tests to clmTest directory on /glade/scratch
+ M models/lnd/clm/test/system/CLM_runcmnd.sh
+ -- just indent diffs
+ M models/lnd/clm/tools/mksurfdata_map/src/mkfileMod.F90
+ -- handle all files as large files (from ne240 runs)
+ M models/lnd/clm/bld/configure
+ -- add dependency for pio piovdf.o: pio_kinds.o
+ -- remove -lgptl from cesm Makefile
+
+== added r01 maps to namelist_defaults_clm.xml ==
+
+ M clm.buildnml.csh
+ M namelist_files/namelist_defaults_overall.xml
+ -- modified namelist_defaults_overall.xml to take wus12 and us20 with rmt off
+ -- also added ne240 and default gx1v6 ocean mask
+ -- modes to bld/clm.buildnml.csh so that rtm is off for wus12
+ -- added path and script name to xml generated by createMapEntry.pl
+ -- checked wus12_wus12 run. Configures and runs. Error message to look for is:
+
+ "Do not run the River Transport Model (RTM)" which is correct since wus12_wus12 is
+ a regional grid
+
+== modify scripts and drv to get new r01 to gx1v6 mapping files ==
+ M scripts/ccsm_utils/Case.template/config_grid.xml
+ M scripts/ccsm_utils/Case.template/config_definition.xml
+ M models/drv/bld/namelist_files/namelist_defaults_drv.xml
+ -- namelist_defaults_drv.xml - added rof_grid for r01 and gx1v6
+
+== modify xml so that 1/10 degree runs work ==
+ M models/lnd/clm/tools/mkmapdata/mkmapdata.sh
+ M models/lnd/clm/tools/mkmapgrids/mkmapgrids.namelist
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml
+ M models/lnd/clm/src/riverroute/RtmMod.F90
+
+Summary of testing:
+
+ build-namelist unit testing:
+ All PASS except:
+ fails involve us20 and wus12
+ not ok 141 - lnd_in file the same as expected for CN - 94x192 - fixed. Should pass in next tag.
+ not ok 214 - lnd_in file exists - us20 - no tests in place
+ not ok 219 - lnd_in file exists - wus12 - no tests in place
+ not ok 221 - compare file lnd_in DNE for CN and -res+wus12 - wus12 - no tests in place
+ not ok 222 - compare file temp_file.txt DNE for CN and -res+wus12 - wus12 - no tests in place
+ bluefire:
+ All PASS except:
+ 018 brW51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+ modified to -3+-3 -- still FAIL
+ 036 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 10
+ 037 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 5
+ 038 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 5
+ 039 blU61 TBL.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 48 cold ......FAIL! rc= 4
+ 036-039 needs major work
+
+ and why they fail...
+
+ 018 - cprnc differences in comparison
+ 036 - endrun initiated from CNBalanceCheckMod.F90
+ 037 - __cnbalancecheckmod_NMOD_cbalancecheck
+ 038, 039 - fail since 037 didn't run
+
+ bluefire interactive testing:
+ All PASS except:
+ bl514 - will fail because tag 45 has a broken gen_domain build
+ bl954 - no ne240 in tag 45
+ bl9C4 - 8 bit difference in file size due to using large file write in mkfileMod.F90
+ These should pass in next tag
+ bluefire/CESM testing:
+ All PASS except:
+ FAIL SMS_RLA.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_45
+ FAIL SMS_RLB.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_45
+ FAIL SMS_ROA.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_45
+ FAIL ERS_D.f45_g37.I.bluefire_ibm.tputcomp.clm4_0_45
+ FAIL SMS.1x1_numaIA.ICN.bluefire_ibm.tputcomp.clm4_0_45
+ FAIL ERS_E.T31_g37.I1850.bluefire_ibm.tputcomp.clm4_0_45
+ FAIL ERI.T31_g37.IG1850.bluefire_ibm.tputcomp.clm4_0_45
+ FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.tputcomp.clm4_0_45
+ FAIL SMS.f10_f10.IRCP45CN.bluefire_ibm.tputcomp.clm4_0_45
+ FAIL ERS_D.f19_g16.IRCP85CN.bluefire_ibm.tputcomp.clm4_0_45
+
+ There is tolerance check built into the tests that may be too tight. Jay will look at this in the future.
+ Throughputs are reported and a sampling looks reasonable:
+
+ CHECK SMS_RLA.f45_f45.I.bluefire_ibm.perf npes=1 tput=16.026 memh=259.677 memr=-0.001 tag=clm4_0_45 baseline
+ CHECK SMS_RLA.f45_f45.I.bluefire_ibm.perf npes=1 tput=9.392 memh=259.856 memr=-0.001 tag=
+
+ CHECK SMS.f10_f10.IRCP45CN.bluefire_ibm.perf npes=16 tput=200.866 memh=265.074 memr=-0.001 tag=clm4_0_45 baseline
+ CHECK SMS.f10_f10.IRCP45CN.bluefire_ibm.perf npes=16 tput=187.881 memh=267.630 memr=-0.001 tag=
+
+ CHECK ERS_D.f19_g16.IRCP85CN.bluefire_ibm.perf npes=64 tput=29.741 memh=292.035 memr=-0.001 tag=clm4_0_45 baseline
+ CHECK ERS_D.f19_g16.IRCP85CN.bluefire_ibm.perf npes=64 tput=28.368 memh=294.879 memr=-0.001 tag=
+
+ bluefire/PTCLM testing:
+ lynx/pgi testing:
+ lynx/pgi interactive testing:All PASS
+ lynx/CESM testing:All PASS
+ mirage,storm/ifort interactive testing:All PASS
+
+CLM tag used for the baseline comparison tests if applicable: CLM4_0_45
+
+Changes answers relative to baseline:No
+
+===============================================================
+===============================================================
+Tag name: clm4_0_45
+Originator(s): sacks (Sacks Bill 303-497-1762 CGD)
+Date: Fri Jul 20 11:41:14 MDT 2012
+One-line Summary: fix virtual columns; new urban mksurfdata_map
+
+Purpose of changes:
+
+GLC-related: Fix places where glc virtual columns were not being treated
+correctly (major bug!). Change albice default to 0.6,0.4 for glc_mec
+cases, based on suggestion from Bill Lipscomb. Fix dust calculation for
+glc_mec.
+
+Other CLM changes: Add an instance of istcrop. Fix landunit-level output
+for dov2xy=false.
+
+Tools changes: Update mksurfdata_map to handle new urban raw data format
+(use dominant density class, together with lookup tables; currently used
+for mksurfdata_map with hires). Minor fixes to mksurfdata_map. Add unit
+tests to mksurfdata_map. Change tools build to support addition of unit
+tests. Minor fixes to mkscripgrid.ncl, mkunitymap.ncl and mknoocnmap.pl.
+
+Namelist-related: Refer to correct scrip grid files for f09, f19; and
+a few fixed mapping files for those resolutions, including clm->rtm mapping
+files for those resolutions (changes answers for RTM). (The old scrip grid
+files had a displaced pole, which is not what we want for CLM. Note that I
+did NOT replace the f05 scrip grid file, because the only alternative I can
+find has bad values in the corner arrays -- see bug 1518.)
+
+SPM--Mostly changes to get more tests to pass. Added 1x1_* mapping files to
+inputdata. Created script that auto-generates XML for new mapping files for
+easier inclusion into existing XML files. Modify build-namelist_test.pl to
+pass CSMDATA to build-namelist. Tests were failing if a user didn't have
+CSMDATA env set. Touched CFGtools__ds to get a CLM interactive test to pass.
+Added openMP and debug openMP tests for bluefire interactive tests for 10x15.
+--SPM
+
+Requirements for tag: test on bluefire (CESM, int, bat), lynx/pgi (int,bat), mirage.
+Fix bug 1492
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID):
+ 1492 (missing istcrop)
+ 1515 (nedd mapping files for
+ single-point)-SPM
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+
+ Makefiles reorganized for tools (mksurfdata_map, interpinic, mkmapgrids)
+
+Describe any changes made to the namelist:
+
+ albice changed to 0.6,0.4 for glc_mec cases
+
+List any changes to the defaults for the boundary datasets:
+
+ Use corrected mapping files for CLM->RTM for f09,f19, and for some
+ mapping files used to create surface datasets. Use correct scrip grids
+ for f09,f19. Add new urban raw data file for hires mksurfdata_map, and
+ associated scrip grid file & mapping files.
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+List all files eliminated:
+
+>>>>>>> Split into mkurbanparCommonMod, mkurbanparAvgMod and mkurbanparDomMod
+D models/lnd/clm/tools/mksurfdata_map/src/mkurbanparMod.F90
+
+>>>>>>> Modify build system to make it easier to add unit testers
+D models/lnd/clm/tools/mksurfdata_map/src/Macros.custom
+D models/lnd/clm/tools/interpinic/src/Macros.custom
+D models/lnd/clm/tools/mkmapgrids/src/Macros.custom
+
+
+List all files added and what they do:
+
+>>>>>>> SPM-- Dump XML of mappings for a specified resolution
+A namelist_files/createMapEntry.pl
+>>>>>>> --SPM
+
+
+>>>>>>> Pull out routines from mkurbanparMod that are common to different
+>>>>>>> ways of creating urban parameter data
+A models/lnd/clm/tools/mksurfdata_map/src/mkurbanparCommonMod.F90
+
+>>>>>>> Modules to handle old (area-average) and new (dominant-type) urban
+>>>>>>> input files
+A models/lnd/clm/tools/mksurfdata_map/src/mkurbanparAvgMod.F90 - mostly from mkurbanparMod
+A models/lnd/clm/tools/mksurfdata_map/src/mkurbanparDomMod.F90 - new code, to handle new format
+
+>>>>>>> New modules with general-purpose utilities for mksurfdata_map
+A models/lnd/clm/tools/mksurfdata_map/src/mkutilsMod.F90
+A models/lnd/clm/tools/mksurfdata_map/src/mkindexmapMod.F90
+
+>>>>>>> New unit testers for mksurfdata_map
+A models/lnd/clm/tools/mksurfdata_map/unit_testers
+A models/lnd/clm/tools/mksurfdata_map/unit_testers/test_lookup_2d_netcdf.nc
+A models/lnd/clm/tools/mksurfdata_map/unit_testers/test_mkutilsMod.F90
+A models/lnd/clm/tools/mksurfdata_map/unit_testers/test_mkindexmapMod.F90
+A models/lnd/clm/tools/mksurfdata_map/unit_testers/Srcfiles
+A models/lnd/clm/tools/mksurfdata_map/unit_testers/test_mksurfdata_map.F90
+A models/lnd/clm/tools/mksurfdata_map/unit_testers/test_mod.F90
+A models/lnd/clm/tools/mksurfdata_map/unit_testers/Filepath
+A models/lnd/clm/tools/mksurfdata_map/unit_testers/Makefile
+A models/lnd/clm/tools/mksurfdata_map/unit_testers/README
+A models/lnd/clm/tools/mksurfdata_map/unit_testers/test_mkurbanparDomMod.F90
+
+>>>>>>> Modify build system to make it easier to add unit testers
+A models/lnd/clm/tools/mksurfdata_map/src/Makefile.common
+A models/lnd/clm/tools/mkmapgrids/src/Makefile.common
+A models/lnd/clm/tools/interpinic/src/Makefile.common
+
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>> SPM--
+>>>>>>> Pass csmdata down to build-namelist and add logic so CSMDATA is set
+>>>>>>> even if user does not
+M unit_testers/build-namelist_test.pl
+>>>>>>> Add support for 1x1_* single point mapping files
+M namelist_files/namelist_defaults_clm.xml
+M namelist_files/namelist_defaults_clm_tools.xml
+>>>>>>> Add support for a few openMP 10x15 tests, modify test list
+>>>>>>> and remove some old single point tests
+M test/system/tests_pretag_bluefire_nompi
+M test/system/input_tests_master
+M test/system/tests_posttag_nompi_regression
+
+>>>>>>> --SPM
+
+>>>>>>> Fix glc virtual column bugs: change checks of (wt > 0)
+>>>>>>> to (wt > 0 .or. ityplun(l)==istice_mec)
+M models/lnd/clm/src/main/histFileMod.F90 ---------- also fix landunit-level fields
+ with dov2xy=.false.
+M models/lnd/clm/src/main/subgridAveMod.F90
+M models/lnd/clm/src/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/biogeochem/DryDepVelocity.F90 - also change
+ 'if (itypelun==istice)' to 'if (itypelun==istice .or. itypelun==istice_mec)'
+ in setting parameters
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/biogeophys/Biogeophysics2Mod.F90
+
+>>>>>>> Add istcrop (fix bug 1492)
+M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90
+
+>>>>>>> Remove unnecessary 'use'
+M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90
+
+>>>>>>> Change albice to 0.6,0.4 for glc_mec cases; add mapping files for
+>>>>>>> 3x3min_LandScan2004; use corrected mapping files for 5x5min_ISRIC-WISE_to_0.9x1.25,
+>>>>>>> 3x3min_MODIS_to_0.9x1.25 and 5x5min_nomask_to_1.9x2.5, as well as for CLM->RTM
+>>>>>>> for f09 and f19
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+
+>>>>>>> Point to correct files for f09,f19 scrip grids; add new urban raw data file
+>>>>>>> for hires mksurfdata_map
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml
+
+>>>>>>> Add support for new 3x3min_LandScan2004 grid
+M models/lnd/clm/tools/mkmapdata/mkmapdata.sh
+M models/lnd/clm/bld/namelist_files/checkmapfiles.ncl
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml
+
+>>>>>>> Changes to mksurfdata_map to support new input urban format
+M models/lnd/clm/tools/mksurfdata_map/src/Srcfiles ------ add new source files
+M models/lnd/clm/tools/mksurfdata_map/src/mksurfdat.F90 - use new urban interfaces;
+ also, substantially increase tolerance for roundoff error fix in
+ normalizencheck_landuse (the latter change is unrelated to the new urban
+ format; this change makes it so more points have 100% special rather than
+ nearly-100% special, which was required to avoid CLM termination due to
+ rounding errors in some cases)
+M models/lnd/clm/tools/mksurfdata_map/src/mkfileMod.F90 - add URBAN_DENSITY_CLASS
+ and URBAN_REGION_ID fields
+M models/lnd/clm/tools/mksurfdata_map/src/mkncdio.F90 --- public declarations of
+ routines that are now needed
+
+>>>>>>> Other, incidental changes to mksurfdata_map
+M models/lnd/clm/tools/mksurfdata_map/src/mkpftMod.F90 ---- fix zero_out
+M models/lnd/clm/tools/mksurfdata_map/src/mkglcmecMod.F90 - correct rounding errors
+ in topoglcmec_o; change a warning to a fatal error
+M models/lnd/clm/tools/mksurfdata_map/src/mkdomainMod.F90 - add tolerance in
+ checking for lat/lon equality
+
+>>>>>>> Add src_grid_dims & dst_grid_dims
+M models/lnd/clm/tools/mkmapdata/mkunitymap.ncl
+
+>>>>>>> Fix direction of ocn->atm mapping file
+M models/lnd/clm/tools/mkmapdata/mknoocnmap.pl
+M models/lnd/clm/tools/README ------------------ also fix typos
+
+>>>>>>> Fix ordering of corners
+M models/lnd/clm/tools/mkmapgrids/mkscripgrid.ncl
+
+>>>>>>> Modify build system to make it easier to add unit testers
+M models/lnd/clm/tools/mksurfdata_map/src/Makefile
+M models/lnd/clm/tools/interpinic/src/Makefile
+M models/lnd/clm/tools/mkmapgrids/src/Makefile
+M models/lnd/clm/test/system/TCBtools.sh ----------- copy correct file
+
+>>>>>>> Document copy of test_mod
+M models/lnd/clm/tools/README.filecopies
+
+
+Summary of testing:
+
+--SPM. New tests run after update to clm4_0_44 and after tests modifications.
+
+==== bluefire build-namelist tests: ====
+
+ * expected fail due to new mapping file
+ < fmapinp_rtm = '/glade/proj3/CESM/cseg/inputdata//lnd/clm2/mappingdata/maps/1.9x2.5/map_1.9x2.5_nomask_to_0.5x0.5_nomask_aave_da_c120522.nc'
+ ---
+ > fmapinp_rtm = '/glade/proj3/CESM/cseg/inputdata//lnd/clm2/mappingdata/maps/1.9x2.5/map_1.9x2.5_nomask_to_0.5x0.5_nomask_aave_da_c120221.nc'
+
+ * not ok 6 - lnd_in file the same as expected for standard
+ not ok 23 - lnd_in file the same as expected for standard
+
+This is expected. Bill states this mod in ChangeLog
+
+ < albice = 0.60,0.40
+ ---
+ > albice = 0.50,0.50
+
+ * not ok 29 - lnd_in file the same as expected for standard
+ * not ok 36 - lnd_in file the same as expected for standard
+ * not ok 43 - lnd_in file the same as expected for standard
+ * not ok 49 - lnd_in file the same as expected for standard
+ * not ok 54 - lnd_in file the same as expected for standard
+ * not ok 59 - lnd_in file the same as expected for standard
+ * not ok 64 - lnd_in file the same as expected for standard
+ * not ok 69 - lnd_in file the same as expected for standard
+ * not ok 74 - lnd_in file the same as expected for standard
+ * not ok 80 - lnd_in file the same as expected for standard
+ * not ok 85 - lnd_in file the same as expected for standard
+ * not ok 91 - lnd_in file the same as expected for standard
+ * not ok 156 - lnd_in file the same as expected for CN
+ * not ok 161 - lnd_in file the same as expected for CN
+
+ 221 and 222 are for new WRF tests, ignoring for now since tests not complete
+ not ok 221 - compare file lnd_in DNE for CN and -res+wus12
+ # in NMLTest/CompFiles.pm at line 103.
+ WARNING(NMLTest::CompFiles::comparefiles):: File /glade/scratch/muszala/svn/clm4_0_44/models/lnd/clm/bld/unit_testers/temp_file.txt.CN.-res+wus12
+ does NOT exist!
+
+ not ok 222 - compare file temp_file.txt DNE for CN and -res+wus12
+
+WARNING(NMLTest::CompFiles::comparefiles):: File /glade/scratch/muszala/svn/clm4_0_44/models/lnd/clm/bld/unit_testers/temp_file.txt.CN.-res+wus12
+ 2349 does NOT exist!
+
+ # Failed test 'compare file temp_file.txt DNE for CN and -res+wus12
+ # '
+ # in NMLTest/CompFiles.pm at line 103.
+
+ * not ok 276 - lnd_in file the same as expected for crop
+
+==== bluefire interactive ====
+ now pass due to new mapping files (ignore numbering, use test descriptor)
+001 sm514 TSMCFGtools.sh gen_domain CFGtools__ds T31.runoptions .................................PASS
+002 sm974 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds PASS
+003 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dPASS
+004 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds PASS
+
+ new tests and test descriptors for 10x15 openMP tests
+001 sm953 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__o .......PASS
+002 bl953 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__o .......SKIPPED*
+003 sm954 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__ds ......PASS
+004 bl954 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__ds ......SKIPPED*
+005 sm957 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__do ......PASS
+006 bl957 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__do ......SKIPPED*
+007 sm959 TSMscript_tools.sh mkmapdata mkmapdata.sh mkmapdata_if10 ..............................PASS
+008 bl959 TBLscript_tools.sh mkmapdata mkmapdata.sh mkmapdata_if10 ..............................SKIPPED*
+
+--SPM
+
+NOTE: UNLESS OTHERWISE NOTED, THE BELOW TESTS WERE RUN FROM TAG
+virtual_column_fix_03_clm4_0_43. This means that they were run before
+reverting the scrip grid file & rtm mapping file for f05. However, that
+shouldn't change any test results, since as far as I can tell, nothing in
+the CLM test suite tests f05 resolution. THESE TESTS SHOULD BE RERUN ON THE
+FINAL VERSION OF THE TAG BEFORE MERGING IT TO THE TRUNK.
+
+ bluefire build-namelist unit testing (run with -test): ALL PASS EXCEPT:
+not ok 42 - rtm tstep inconsistent
+ bluefire mksurfdata_map unit testing: ALL PASS
+ bluefire: All PASS except:
+015 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:1800 1.9x2.5 gx1v6@1850-2100 -10 cold FAIL! rc= 7
+018 brW51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+036 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 10
+037 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 5
+038 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 5
+039 blU61 TBL.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 48 cold ......FAIL! rc= 4
+043 blCn1 TBL.sh _sc_dh clm_transient_glcMEC_rcp8.5 20331231:1800 1.9x2.5 gx1v6@1850-2100 48 arb_ic FAIL! rc= 7
+049 smCI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+050 erCI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+051 brCI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+052 blCI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+ bluefire interactive testing: All PASS except:
+028 blCK8 TBL.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 4
+046 blC63 TBL.sh _sc_do clm_glcmec 19980115:1800 1.9x2.5 gx1v6 48 arb_ic ........................FAIL! rc= 7
+054 blH43 TBL.sh 17p_cnsc_do clm_transient_20thC 19790101:1800 1.9x2.5 gx1v6@1850-2000 -10 startup FAIL! rc= 7
+061 sm774 TSMtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 5
+062 bl774 TBLtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 4
+069 bl8Z3 TBLrst_tools.sh 21p_cncrpsc_do interpinic clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6FAIL! rc= 5
+076 sm974 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 6
+077 bl974 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 4
+078 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+079 bl9T4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 4
+080 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+081 bl9C4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 4
+ bluefire/CESM testing: All PASS except:
+BFAIL PST.f45_g37.I1850CN.bluefire_ibm.compare_hist.clm4_0_43
+BFAIL PET_PT.f45_g37.I1850.bluefire_ibm.compare_hist.clm4_0_43
+FAIL ERI.T31_g37.IG1850.bluefire_ibm.compare_hist.clm4_0_43
+FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.compare_hist.clm4_0_43
+FAIL ERP.f19_g16.IGRCP60CN.bluefire_ibm.compare_hist.clm4_0_43
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire_ibm.compare_hist.clm4_0_43
+BFAIL PST.f10_f10.I20TRCN.bluefire_ibm.compare_hist.clm4_0_43
+BFAIL PET_PT.f10_f10.I20TRCN.bluefire_ibm.compare_hist.clm4_0_43
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire_ibm.compare_hist.clm4_0_43
+ bluefire/PTCLM testing: NOT DONE!
+ lynx/pgi testing: All PASS
+ lynx/pgi interactive testing: All PASS except:
+023 sm978 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850^tools__ds .....FAIL! rc= 6
+024 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+025 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+ lynx/CESM testing: All PASS except:
+CFAIL ERI.f10_f10.IRCP60CN.lynx_pathscale.C.123047
+BFAIL PST.f19_g16.I.lynx_pgi.compare_hist.clm4_0_43
+FAIL ERS.f19_g16.IRCP26CN.lynx_gnu.compare_hist.clm4_0_43
+FAIL ERS.f19_g16.IG1850.lynx_pgi.compare_hist.clm4_0_43
+ mirage,storm/ifort interactive testing: All PASS
+
+ Additional testing: Additional CESM B compset tests to test new RTM
+ mapping files. Ran these from cesm1_1_alpha13e; for most tests, switched
+ clm to virtual_column_fix_03_clm4_0_43; for the lynx f05 test, switched
+ clm to virtual_column_fix_04_clm4_0_43. Note that the baseline
+ comparisons are expected to fail, except for the f05 test.
+PASS ERI.f19_g16.BRCP45WCN.bluefire_ibm
+FAIL ERI.f19_g16.BRCP45WCN.bluefire_ibm.compare_hist.cesm1_1_alpha13e
+PASS ERS.f19_g16.B2000CNCHM.bluefire_ibm
+FAIL ERS.f19_g16.B2000CNCHM.bluefire_ibm.compare_hist.cesm1_1_alpha13e
+PASS SMS_D.f19_g16.B20TRC5.bluefire_ibm
+FAIL SMS_D.f19_g16.B20TRC5.bluefire_ibm.compare_hist.cesm1_1_alpha13e
+PASS ERS.f09_g16.B1850BPRP.bluefire_ibm
+FAIL ERS.f09_g16.B1850BPRP.bluefire_ibm.compare_hist.cesm1_1_alpha13e
+PASS SMS_D.f19_g16.B20TRC5.lynx_pgi
+FAIL SMS_D.f19_g16.B20TRC5.lynx_pgi.compare_hist.cesm1_1_alpha13e
+PASS SMS.f05_g16.B.lynx_pgi
+PASS SMS.f05_g16.B.lynx_pgi.compare_hist.cesm1_1_alpha13e
+
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_43;
+for my additional CESM tests, compared against cesm1_1_alpha13e
+
+Changes answers relative to baseline: YES, for GLC configurations and all
+f09 & f19 configurations with RTM
+
+ Baseline failures that are not because of GLC or RTM mapping file changes
+ are:
+
+>>> also failed in clm4_0_43; fails with "build-namelist ERROR:: bad input to drv_runlength option"
+028 blCK8 TBL.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 4
+>>> I think the problem here is that the baseline test is trying to build interpinic from the current directory,
+>>> rather than from BL_ROOT. This is a problem because of changes in the tools' build.
+069 bl8Z3 TBLrst_tools.sh 21p_cncrpsc_do interpinic clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6FAIL! rc= 5
+
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: - all GLC configurations (albice change & virtual column bug fix)
+ - all f09 & f19 configurations with RTM (due to change in RTM mapping file)
+ - what platforms/compilers: All
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ For GLC: climate-changing
+
+ For f09/f19 due to RTM mapping file change: Larger than roundoff, but
+ expected to have same climate. The new scrip grid files have
+ roundoff-level differences globally, plus differ substantially at the
+ poles because the old (incorrect) files had poles displaced from
+ -90/90.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? N/A
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - compset (and additional configure options):
+ - build-namelist options (or complete namelist):
+ - MSS location of output:
+
+ EVALUATION OF NEW CLIMATE NOT PERFORMED
+
+===============================================================
+===============================================================
+Tag name: clm4_0_44
+Originator(s): erik (Erik Kluzek)
+Date: Mon Jul 9 11:14:11 MDT 2012
+One-line Summary: Add wrf resolutions, update externals to cesm1_1_beta15, all components use build-namelist now
+
+Purpose of changes:
+
+set nsegspc=1 for all ne grids. Update to latest externals and new datm. Latest externals
+have ALL components using a build-namelist, and user_nl.$COMP files are created for you.
+Env files changed most fields in env_conf moved to env_run and secondly env_build.
+env_mach_pes moved to env_configure. env_conf removed. Add ne4, ne16, ne60 datasets. Add
+in ne16, ne30, ne120 20th Century datasets. Change of templates to have
+clm.buildnml.csh and clm.buildexe.csh copied to Buildconf. Have -chk_res option to
+build-namelist to check for resolution/mask, -note option to include (or not) note on the
+bottom of the namelist. Expand build-namelist unit test.
+
+Requirements for tag:
+ Requirements: test on bluefire (CESM, int, bat)
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID):
+ 1513 (mksurfdata.pl doesn't work with -crop flag)
+ 1514 (inconsistancy in char variable fexcl)
+
+Known bugs (include bugzilla ID):
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 935 (RTM warning NOT an error)
+ 1025 (PTS_MODE can NOT use a global finidat file)
+ 1017 (PTS_MODE can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1393 (error when running Gregorian calendar)
+ 1397 (c2l_scale_type not specified for many history fields)
+ 1401 (Restart files do NOT have _FillValue/missing_value attributes on fields)
+ 1405 (Problem with irrigation on clm4_0_34 with intel compiler)
+ 1407 (Build problem on jaguar for test_driver.sh with -c option)
+ 1409 (ne120 is having restart trouble on jaguar with NetCDF3)
+ 1410 (Problem running PST.f09_g16.I.jaguarpf)
+ 1411 (ERI_D.ne30_g16.I1850CN.jaguarpf.G.235924 fails on jaguarpf)
+ 1454 (lack of 1D history files in CLM testing)
+ 1455 (bad time-stamp in CLM testing)
+ 1457 (bug in soil color in mksurfdata_map)
+ 1474 (Missing eulerian domain files)
+ 1479 (fails on jaguarpf)
+ 1485 (Performance issue with esmf_wrf_timemgr)
+ 1488 (Problem reading restarts@ne30_g16 for some layouts)
+ 1517 (Performance of datm in clm4_0_44 is even worse)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: Yes!
+ env*.xml files changed as follows:
+
+ env_conf.xml ------> removed!
+ Most variables moved to env_run.xml. Some variables moved to env_build.xml
+ env_mach_pes.xml --> Renamed to env_configure.xml
+
+ Buildconf directory ---> think of it as readonly!
+Describe any changes made to the namelist: Yes!
+
+ user_nl_* files for ALL components created for you. Put, your changes to namelists
+ files here.
+
+ New options to clm build-namelist:
+ (all but -chk_res and -note are already exercised when running CESM)
+ -chk_res ------- Check resolution and land mask first.
+ -clm_startfile - Input file to use to startup for branch or startup cases.
+ -co2_type ------ CO2 type
+ -inst_string --- Instance string to use for clm_startfile for multi-instance cases.
+ -l_ncpl -------- Number of coupling time-steps to take per day.
+ -lnd_frac ------ Land fraction file to use (domain file)
+ -note ---------- Write out note about build-namelist usage to end of file.
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes: Yes!
+ Most of the throughput tests fail, and single-point performance looks horrible.
+ Although this may be a sporatic problem due to file systems. See bug 1517.
+
+Code reviewed by: self, mvertens, tcraig
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ Update to cesm1_1_beta15 external versions (other than timing)
+ scripts to scripts4_120604
+ Machines to Machines_120529
+ drv to drvseq4_1_15
+ datm to datm8_120528
+ csm_share to share3_120509
+ mct to MCT2_8_0_120503
+ pio to pio_1_4_5
+ stubs to cism1_120529
+ esmf_wrf_timemgr to esmf_wrf_timemgr_120427
+ mapping to mapping_120525
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>>>>>>> Add tools testers for directories that use CESM configure
+ A models/lnd/clm/test/system/TCBCFGtools.sh
+ A models/lnd/clm/test/system/TSMCFGtools.sh
+ A models/lnd/clm/test/system/TBLCFGtools.sh
+
+ A models/lnd/clm/bld/unit_testers/NMLTest/CompFiles.pm - New test module
+
+ A models/lnd/clm/test/system/nl_files/mksrfdt_T31_crpglc_2000 - mksurfdata crop test
+
+>>>>>>>>>>>> Split out buildexe/buildnml from template so that editing templates
+>>>>>>>>>>>> isn't a nightmare
+ A models/lnd/clm/bld/clm.buildexe.csh
+ A models/lnd/clm/bld/clm.buildnml.csh
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>> Change tests a bit add a global crop test, get working on mirage
+ M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+ M models/lnd/clm/test/system/CLM_runcmnd.sh --- Allow hostname==vpn* for yong
+ M models/lnd/clm/test/system/test_driver.sh ---- Add TOOLSLIBS TOOLS_CONF_STRING
+ if hostname=vpn* use setup for yong
+ M models/lnd/clm/test/system/input_tests_master Fix test blCK8, add global
+ crop test case for mksurfdata, add cfg-tool tests
+ M models/lnd/clm/test/system/TSM.sh ------------ Add cpl.log file
+ M models/lnd/clm/test/system/TCBtools.sh ------- Set SLIBS needed on generic machines
+
+ M models/lnd/clm/tools/mkmapdata/mkmapdata.sh ------- Use different version of ESMF
+ for regional, don't do RTM maps for regional
+ M models/lnd/clm/tools/interpinic/src/Makefile ------ Use NETCDF4 link
+ M models/lnd/clm/tools/mkmapgrids/src/Makefile ------ Use NETCDF4 link
+ M models/lnd/clm/tools/mksurfdata_map/src/Makefile -- Use NETCDF4 link
+ M models/lnd/clm/tools/mksurfdata_map/mksurfdata.pl - Send crop setting for
+ determining LAI file
+
+>>>>>>>>>>>> Add CESM options to build-namelist, add handling of SLIBS for generic
+>>>>>>>>>>>> machines. Add new grids: wrf, ne4, ne16, ne60. Set nsegspv=1 for hi-res/ne
+>>>>>>>>>>>> grids. Use drv/datm namelist definition/defaults files. Extend
+>>>>>>>>>>>> build-namelist unit tester test ALL resolutions/use-cases.
+ M models/lnd/clm/bld/configure ---- Add ability to handle slibs
+ M models/lnd/clm/bld/user_nl_clm -- Format change
+ M models/lnd/clm/bld/config_files/config_definition.xml - slibs, wrf grids
+ M models/lnd/clm/bld/build-namelist ---------- Add a bunch of options needed for CESM
+ (all but -chk_res and -note are already exercised when running CESM)
+ -chk_res ------- Check resolution and land mask first.
+ -clm_startfile - Input file to use to startup for branch or startup cases.
+ -co2_type ------ CO2 type
+ -inst_string --- Instance string to use for clm_startfile for multi-instance cases.
+ -l_ncpl -------- Number of coupling time-steps to take per day.
+ -lnd_frac ------ Land fraction file to use (domain file)
+ -note ---------- Write out note about build-namelist usage to end of file.
+ namelist definition/defaults files also come from drv/bld and datm/bld, get working
+ with latest externals
+ M models/lnd/clm/bld/clm.cpl7.template ------- Use new
+ clm.buildnml.csh/clm.buildexe.csh scripts which save us from the "\" nightmare
+ M models/lnd/clm/bld/queryDefaultNamelist.pl - namelist_defaults/definition files
+ are now split out in datm/drv directories
+ M models/lnd/clm/bld/queryDefaultXML.pm ------ definition files are an array now
+ M models/lnd/clm/bld/unit_testers/build-namelist_test.pl - Test a ton more
+ things. Add -compare, -test, -generate options. Test all use_cases and all
+ resolutions
+ M models/lnd/clm/bld/namelist_files/checkdatmfiles.ncl ------ Add some more resolutions
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml - Get rid of drv/datm
+ namelist items
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xsl - Get rid of drv/datm
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml - Add chk_res, note
+ and default masks for WRF grids
+ M models/lnd/clm/bld/namelist_files/datm-build-namelist ----- Use datm namelist
+ defaults/definition files.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Set do_rtm for
+ regional grids to .false. Add wrf grids: us20, wus12. Add ne4, ne16, ne60
+ files. Add 20th transient PFT for: ne16, ne30, ne60, ne120. Set nsegspc to 1
+ for hi-res and ne grids.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml - Domain files
+ for 512x1024, ne4, ne16, ne60, ne240, and us20, wus12.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml - Set crop
+ for LAI and vegtyp files
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml - Remove settings
+ already in drv/bld file
+
+>>>>>>>>>>>> Remove write(6 for write to iulog, remove unneeded writes
+>>>>>>>>>>>> use shr_pio over seq_pio. Allow -180-180 form.
+ M models/lnd/clm/src/cpl_share/clm_cpl_indices.F90 - remove write
+ M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90 - write to iulog
+ M models/lnd/clm/src/biogeochem/CNrestMod.F90 ------ use endrun not write(6
+ M models/lnd/clm/src/main/ndepStreamMod.F90 -------- use shr_pio_getiotype
+ M models/lnd/clm/src/main/pftdynMod.F90 ------------ use endrun not write(6
+ M models/lnd/clm/src/main/histFileMod.F90 ---------- use shr_pio_getiotype
+ dimension hist_excl* as max_namlen+2
+ M models/lnd/clm/src/main/ncdio_pio.F90 ------------ use
+ shr_pio_getiotype/shr_pio_getiosys
+ M models/lnd/clm/src/main/surfrdMod.F90 ------------ remove write(6 statements
+ put write in "if ( masterproc )", if longitudes off by more than 300
+ see if -180-180 form works
+
+Summary of testing:
+
+ build-namelist unit testing: All PASS except...
+ us20
+ bluefire: All PASS except...
+018 brW51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+036 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 10
+037 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 5
+038 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 5
+039 blU61 TBL.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 48 cold ......FAIL! rc= 4
+049 smCI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+050 erCI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+051 brCI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+052 blCI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+058 bl514 TBLCFGtools.sh gen_domain CFGtools__ds T31.runoptions .................................FAIL! rc= 4
+060 bl754 TBLtools.sh mksurfdata_map tools__s namelist ..........................................FAIL! rc= 5
+061 sm774 TSMtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 5
+062 bl774 TBLtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 4
+071 bl924 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_T31_crpglc_2000^tools__ds .....FAIL! rc= 5
+073 bl953 TBLscript_tools.sh mkmapdata mkmapdata.sh mkmapdata_if10 ..............................FAIL! rc= 7
+075 bl954 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__ds ......FAIL! rc= 5
+078 sm974 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 6
+079 bl974 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 4
+080 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+081 bl9T4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 4
+082 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+083 bl9C4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 4
+ bluefire/CESM testing: All PASS except..
+FAIL ERI.T31_g37.IG1850.bluefire_ibm.tputcomp.clm4_0_43
+FAIL SMS_RLA.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_43
+FAIL SMS_RLB.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_43
+FAIL SMS_ROA.f45_f45.I.bluefire_ibm.tputcomp.clm4_0_43
+FAIL ERS_D.f45_g37.I.bluefire_ibm.tputcomp.clm4_0_43
+FAIL SMS.1x1_numaIA.ICN.bluefire_ibm.tputcomp.clm4_0_43
+FAIL ERS_E.T31_g37.I1850.bluefire_ibm.tputcomp.clm4_0_43
+FAIL ERS_D.f19_g16.IGRCP26CN.bluefire_ibm.tputcomp.clm4_0_43
+FAIL SMS.f10_f10.IRCP45CN.bluefire_ibm.tputcomp.clm4_0_43
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire_ibm.tputcomp.clm4_0_43
+BFAIL PST.f45_g37.I1850CN.bluefire_ibm.compare_hist.clm4_0_43
+BFAIL PET_PT.f45_g37.I1850.bluefire_ibm.compare_hist.clm4_0_43
+BFAIL ERI.T31_g37.IG1850.bluefire_ibm.generate.clm4_0_44
+BFAIL PST.f10_f10.I20TRCN.bluefire_ibm.compare_hist.clm4_0_43
+BFAIL PET_PT.f10_f10.I20TRCN.bluefire_ibm.compare_hist.clm4_0_43
+ lynx/pgi testing: All PASS
+ lynx/pgi interactive testing: All PASS except...
+024 sm978 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850^tools__ds .....FAIL! rc= 6
+025 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+026 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+ mirage,storm/ifort interactive testing: All PASS
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_43
+
+Changes answers relative to baseline: No, bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_43
+Originator(s): sacks (Bill Sacks); erik (Erik Kluzek)
+Date: Fri Apr 6 11:36:21 MDT 2012
+One-line Summary: Add diagnostic fields, modify some existing history fields
+
+Purpose of changes:
+
+Add new diagnostic fields to track snow and ice fluxes. Modify some soil-related fields to
+only be averaged over vegetated landunits (from Dave Lawrence). Fix some diagnostic fields
+that were incorrect, especially over lakes and urban areas. Change QICE to spval rather
+than 0 over non-ice_mec landunits. Rename QMELT to QSNOMELT. Delete redundant QICEYR. Add
+snow balance check from Keith Oleson. Add flexible handling of l2g_scale_type in
+subgridAveMod, replacing 'urbanh' c2l_scale_type and adding new functionality. Modify
+create_clm_s2x to only reference qflx_glcice in the run loop, not in initialization,
+because it is now NaN in initialization. Update scripts and esmf_wrf_timemgr. Changes in
+clm.cpl7.template from Tony. Add in unit_testers for build-namelist. Update to nsegspc
+branch. New qtr-degree RTM file, updates to mkmapdata.sh so requires -r if -f set,
+build-namelist changes to ensure rtm and glc options consistent, and updates of
+documentation to the latest cesm1_0_4 release tag. Sets nsegspc in the namelist and for
+ne30_g16 sets it to 5. Enhancements to baseline tests.
+
+Requirements for tag: test on bluefire (CESM, int, bat), lynx/pgi (int,bat), mirage.
+Fix perf bug 1485, Fix ne30 issue 1488, Fix history dimension issue 1489
+
+Test level of tag: std-test
+
+Bugs fixed (include bugzilla ID):
+ 1485 (Performance issue with esmf_wrf_timemgr)
+ 1488 (partial -- now works with nsegspc=5)
+ 1489 (history dimension issue)
+
+Known bugs (include bugzilla ID):
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 935 (RTM warning NOT an error)
+ 1025 (PTS_MODE can NOT use a global finidat file)
+ 1017 (PTS_MODE can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1393 (error when running Gregorian calendar)
+ 1397 (c2l_scale_type not specified for many history fields)
+ 1401 (Restart files do NOT have _FillValue/missing_value attributes on fields)
+ 1405 (Problem with irrigation on clm4_0_34 with intel compiler)
+ 1407 (Build problem on jaguar for test_driver.sh with -c option)
+ 1409 (ne120 is having restart trouble on jaguar with NetCDF3)
+ 1410 (Problem running PST.f09_g16.I.jaguarpf)
+ 1411 (ERI_D.ne30_g16.I1850CN.jaguarpf.G.235924 fails on jaguarpf)
+ 1454 (lack of 1D history files in CLM testing)
+ 1455 (bad time-stamp in CLM testing)
+ 1457 (bug in soil color in mksurfdata_map)
+ 1474 (Missing eulerian domain files)
+ 1479 (fails on jaguarpf)
+ 1485 (Performance issue with esmf_wrf_timemgr)
+ 1488 (Problem reading restarts@ne30_g16 for some layouts)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+
+ Modified clm.cpl7.template to no longer copy lnd_in to the case directory
+
+Describe any changes made to the namelist:
+
+ For glacierMEC, use QICE rather than QICEYR for annual history files
+ Set nsegspc to 5 for ne30np4 and the default of 20 otherwise
+
+List any changes to the defaults for the boundary datasets:
+
+ Fix qtr-degree RTM mapping file name
+
+Describe any substantial timing or memory changes:
+
+ Fixes bug 1485 (performance issue with esmf_wrf_timemgr)
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, Machines,
+ esmf_wrf_timemgr, cprnc
+
+ scripts to scripts4_120329d
+ Machines to Machines_120406
+ esmf_wrf_timemgr to esmf_wrf_timemgr_120327
+ cprnc to cprnc_120405
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>> Enhancements to baseline tests, and post-processor for test results
+A models/lnd/clm/test/system/get_cprnc_diffs.sh - Script used by TBL.sh and
+ TBLrst_tools.sh
+A models/lnd/clm/test/system/show_var_diffs.sh -- Post-processor for baseline test
+ results
+
+>>>>>>> Add build-namelist unit_tester
+A models/lnd/clm/bld/unit_testers
+A models/lnd/clm/bld/unit_testers/build-namelist_test.pl
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>> Use CSMDATA rather than HOME
+M models/lnd/clm/test/system/nl_files/getregional
+
+>>>>>>> Require -res to be set if -f option used
+M models/lnd/clm/tools/mkmapdata/mkmapdata.sh
+
+>>>>>>> Fix qtr-degree RTM map, set nsegspc, work on usability
+M models/lnd/clm/bld/user_nl_clm ---- Add notes about setting some things
+ with build-namelist options
+M models/lnd/clm/bld/build-namelist - Set nsegspc, make sure glc_grid, glc_smb
+ do_rtm, and maxpatch_glcmec aren't set inconsistently between user_nl_clm
+ and build-namelist options
+M models/lnd/clm/bld/README --------- Add notes about new unit_testers
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -- qtr-degree RTM
+ mapping file, and set nsegspc
+
+>>>>>>> Bring in documentation updates from cesm1_0_4_n05_clm4_0_32, notes on setting
+>>>>>>> finidat, adding history fields list
+M models/lnd/clm/doc/UsersGuide/special_cases.xml
+M models/lnd/clm/doc/UsersGuide/preface.xml
+M models/lnd/clm/doc/UsersGuide/clm_ug.xml
+M models/lnd/clm/doc/UsersGuide/custom.xml
+M models/lnd/clm/doc/UsersGuide/Makefile
+
+>>>>>>> Remove duplicate line
+M models/lnd/clm/test/system/tests_pretag_bluefire
+
+>>>>>>> Use get_cprnc_diffs.sh; truly print diffs from last file with a failed comparison
+>>>>>>> rather than just printing diffs if last comparison failed
+M models/lnd/clm/test/system/TBL.sh
+M models/lnd/clm/test/system/TBLrst_tools.sh
+
+>>>>>>> Use QICE rather than QICEYR for annual history files
+M models/lnd/clm/bld/namelist_files/use_cases/20thC_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/2000_glacierMEC_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850_glacierMEC_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_glacierMEC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/glacierMEC_pd.xml
+
+>>>>>>> no longer copy lnd_in to the case directory
+M models/lnd/clm/bld/clm.cpl7.template
+
+>>>>>>> Add new variables for tracking snow and ice fluxes
+M models/lnd/clm/src/main/clmtype.F90
+M models/lnd/clm/src/main/clmtypeInitMod.F90 ----------- Also changed init of btran
+ and fpsn to spval
+M models/lnd/clm/src/main/histFldsMod.F90 -------------- Add some fields, add
+ c2l_scale_type or l2g_scale_type for others; rename QMELT to QSNOMELT; delete
+ QICEYR
+M models/lnd/clm/src/main/histFileMod.F90 -------------- Time-constant fields just
+ averaged over certain land units; add handling of set_noglcmec for pft-level
+ variables. Also, use lon & lat rather than lonatm & latatm
+M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 ------ Compute qflx_glcice_frz
+M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90 - Compute qflx_glcice_melt
+ and qflx_snofrz_col
+M models/lnd/clm/src/biogeophys/HydrologyLakeMod.F90 --- To get proper grid cell
+ averages, turn some locals into globals, and add calculation of additional
+ fields
+
+>>>>>>> Change QICE to spval rather than 0 over non-ice_mec landunits
+M models/lnd/clm/src/biogeophys/clm_driverInitMod.F90
+
+>>>>>>> Add snow balance check, fix water balance check for glc_dyntopo
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90 - Add calculation of
+ qflx_sl_top_soil, needed for snow balance check
+M models/lnd/clm/src/main/pft2colMod.F90 ------------- Column-level averages of some
+ variables needed for snow balance check; also fixed average of qflx_evap_tot
+ for lakes
+
+>>>>>>> Only reference qflx_glcice in the run loop, not in initialization
+M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90 - pass init argument
+M models/lnd/clm/src/main/clm_glclnd.F90 ------ in init, qice remains 0
+
+>>>>>>> Flexible handling of l2g_scale_type
+M models/lnd/clm/src/main/clm_varcon.F90 ---- max_lunit parameter
+M models/lnd/clm/src/main/subgridAveMod.F90 - new subroutines for concise handling
+ of l2g_scale_type; add checks for l2g_scale_type==spval; remove urbanh
+ c2l_scale_type
+
+
+
+Summary of testing:
+
+ bluefire: All PASS except:
+004 blC91 TBL.sh _sc_dh clm_std^nl_urb 20030101:3600 4x5 gx3v7 -6 arb_ic ........................FAIL! rc= 7
+008 blTZ1 TBL.sh 21p_cncrpsc_dh clm_stdIgnYr^nl_crop 20020401:3600 10x15 USGS -10 cold ..........FAIL! rc= 7
+011 blD91 TBL.sh _persc_dh clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 7
+015 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:1800 1.9x2.5 gx1v6@1850-2100 -10 cold FAIL! rc= 7
+018 brW51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+019 blW51 TBL.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -5 cold .................FAIL! rc= 7
+023 blHO2 TBL.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 7
+027 blHo1 TBL.sh 17p_cnsc_dh clm_drydep 20000101:1800 10x15 USGS@2000 -10 cold ..................FAIL! rc= 7
+031 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 7
+035 blH52 TBL.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 48 cold ...............FAIL! rc= 7
+036 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 10
+037 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 5
+038 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 5
+039 blU61 TBL.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 48 cold ......FAIL! rc= 4
+043 blCn1 TBL.sh _sc_dh clm_transient_glcMEC_rcp8.5 20331231:1800 1.9x2.5 gx1v6@1850-2100 48 arb_ic FAIL! rc= 7
+044 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 2
+045 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 2
+046 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 2
+047 blU61 TBL.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 48 cold ......FAIL! rc= 2
+051 blH#2 TBL.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 ne30np4 gx1v6@2000 48 startup .........FAIL! rc= 7
+053 smCI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+054 erCI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+055 brCI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+056 blCI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+ bluefire interactive testing: All PASS except:
+008 blC97 TBL.sh _sc_do clm_spin^nl_urb 20030101:1800 4x5 gx3v7@1850 -6 arb_ic ..................FAIL! rc= 7
+012 blHS3 TBL.sh 17p_cnsc_do clm_usrdat 20030101:1800 13x12pt_f19_alaskaUSA gx1v6 -6 arb_ic .....FAIL! rc= 7
+028 blCK8 TBL.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 4
+034 blF93 TBL.sh 17p_sc_do clm_drydep^nl_voc 20021230:1800 4x5 gx3v7 48 cold ....................FAIL! rc= 7
+038 blC83 TBL.sh _sc_do clm_std^nl_urb 20020115:3600 5x5_amazon navy -10 arb_ic .................FAIL! rc= 7
+046 blC63 TBL.sh _sc_do clm_glcmec 19980115:1800 1.9x2.5 gx1v6 48 arb_ic ........................FAIL! rc= 7
+054 blH43 TBL.sh 17p_cnsc_do clm_transient_20thC 19790101:1800 1.9x2.5 gx1v6@1850-2000 -10 startup FAIL! rc= 7
+061 sm774 TSMtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 5
+062 bl774 TBLtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 4
+069 bl8Z3 TBLrst_tools.sh 21p_cncrpsc_do interpinic clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6FAIL! rc= 7
+075 bl9S4 TBLscript_tools.sh ncl_scripts getregional_datasets.pl getregional ....................FAIL! rc= 5
+076 sm974 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 6
+077 bl974 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 4
+078 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+079 bl9T4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 4
+080 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+081 bl9C4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 4
+ bluefire/CESM testing: All PASS except:
+BFAIL PST.f45_g37.I1850CN.bluefire_ibm.compare_hist.clm4_0_42
+BFAIL PET_PT.f45_g37.I1850.bluefire_ibm.compare_hist.clm4_0_42
+BFAIL PST.f10_f10.I20TRCN.bluefire_ibm.compare_hist.clm4_0_42
+BFAIL PET_PT.f10_f10.I20TRCN.bluefire_ibm.compare_hist.clm4_0_42
+ bluefire/PTCLM testing: Not done
+ lynx/pgi testing: All PASS except:
+004 blC92 TBL.sh _sc_dm clm_std^nl_urb 20030101:3600 4x5 gx3v7 -6 arb_ic ........................FAIL! rc= 7
+007 blD92 TBL.sh _persc_dm clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 7
+011 blF92 TBL.sh 17p_sc_dm clm_drydep^nl_voc 20021230:1800 4x5 gx3v7 48 cold ....................FAIL! rc= 7
+015 blH52 TBL.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 48 cold ...............FAIL! rc= 7
+019 blL52 TBL.sh _sc_dm clm_std^nl_urb 20020115:1800 10x15 USGS 48 arb_ic .......................FAIL! rc= 7
+ lynx/pgi interactive testing: All PASS except:
+008 blTZ3 TBL.sh 21p_cncrpsc_do clm_stdIgnYr^nl_crop 20020401:3600 10x15 USGS -10 cold ..........FAIL! rc= 7
+023 sm978 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850^tools__ds .....FAIL! rc= 6
+024 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+025 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+ lynx/CESM testing: All PASS except:
+FAIL ERS_D.T31_g37.I1850.lynx_pgi.compare_hist.clm4_0_42
+FAIL ERS_D.T31_g37.I1850.lynx_pgi.compare_hist.clm4_0_42
+CFAIL ERI.f10_f10.IRCP60CN.lynx_pathscale.C.124327
+BFAIL PST.f19_g16.I.lynx_pgi.compare_hist.clm4_0_42
+ mirage,storm/ifort interactive testing: All PASS except:
+007 blD94 TBL.sh _persc_ds clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 7
+019 blL54 TBL.sh _sc_ds clm_std^nl_urb 20020115:1800 10x15 USGS 48 arb_ic .......................FAIL! rc= 7
+023 blR53 TBL.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:1800 10x15 USGS@1850 48 cold ............FAIL! rc= 7
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_42
+
+Changes answers relative to baseline: Just changes some diagnostic fields
+
+ Changes the following default history fields: TSOI, HCSOI, ZWT, WA, WT, H2OSOI,
+ SOILLIQ, SOILICE, SOILWATER_10CM, QICE, QSNWCPICE_NODYNLNDUSE, QSNWCPLIQ
+
+ Renames QMELT to QSNOMELT
+
+ Also changes some fields not output by default
+
+ Also changes cpl avghist files due to changes in qflx_glcice in initialization, but
+ this doesn't affect the simulation
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: all
+ - what platforms/compilers: all
+ - nature of change: diagnostic fields only
+
+===============================================================
+===============================================================
+Tag name: clm4_0_42
+Originator(s): erik (Erik Kluzek)
+Date: Tue Mar 27 21:14:59 MDT 2012
+One-line Summary: Bring in Francis Vitt's MEGAN changes.
+
+Purpose of changes:
+
+Bring Francis Vitt's MEGAN branch to the trunk. Replace the five VOC
+compounds with the MEGAN model that allows up to 150 compounds to be
+generated and passed to the driver. The mechanism allows the fields to
+be choosen by a driver namelist which CLM responds to.
+
+Requirements for tag: test on bluefire (CESM, int, bat), lynx/pgi (int,bat), mirage
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID):
+ 1459 (PTSMODE fails)
+ 1480 (Darwin_intel build)
+ 1482 (Problems running 1x1 resolutions for CLM)
+ 1484 (re-configure removes the user_nl_clm)
+ 1486 (bad irrigation maps)
+
+Known bugs (include bugzilla ID):
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 935 (RTM warning NOT an error)
+ 1025 (PTS_MODE can NOT use a global finidat file)
+ 1017 (PTS_MODE can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1393 (error when running Gregorian calendar)
+ 1397 (c2l_scale_type not specified for many history fields)
+ 1401 (Restart files do NOT have _FillValue/missing_value attributes on fields)
+ 1405 (Problem with irrigation on clm4_0_34 with intel compiler)
+ 1407 (Build problem on jaguar for test_driver.sh with -c option)
+ 1409 (ne120 is having restart trouble on jaguar with NetCDF3)
+ 1410 (Problem running PST.f09_g16.I.jaguarpf)
+ 1411 (ERI_D.ne30_g16.I1850CN.jaguarpf.G.235924 fails on jaguarpf)
+ 1454 (lack of 1D history files in CLM testing)
+ 1455 (bad time-stamp in CLM testing)
+ 1457 (bug in soil color in mksurfdata_map)
+ 1474 (Missing eulerian domain files)
+ 1479 (fails on jaguarpf)
+ 1485 (Performance issue with esmf_wrf_timemgr)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist:
+
+ Add new -megan option to CLM build-namelist to add a megan namelist to
+ the drv_flds_in file.
+ Rename -drv_drydep option to -drydep.
+
+ New namelist items for MEGAN: megan_factors_file, megan_specifier, and
+ megan_mapped_emisfctrs go into the megan_emis_nl namelist in drv_flds_in
+
+ History fields removed: BIOGENCO, ISOPRENE (replaced by MEG_isoprene),
+ MONOTERP, ORVOC, ORVOC, OVOC
+
+ Units of VOCFLXT changed from uGC/M2/H to moles/m2/sec
+
+ New history fields:
+
++ GAMMAC = gamma C for VOC calc (0-1)
++ MEG_2met_2s = MEGAN flux (kg/m2/sec)
++ MEG_2met_nonatriene = MEGAN flux (kg/m2/sec)
++ MEG_2met_s = MEGAN flux (kg/m2/sec)
++ MEG_2met_styrene = MEGAN flux (kg/m2/sec)
++ MEG_3met_3DCTT = MEGAN flux (kg/m2/sec)
++ MEG_Ehsalate = MEGAN flux (kg/m2/sec)
++ MEG_MBO_2m3e2ol = MEGAN flux (kg/m2/sec)
++ MEG_MBO_3m2e1ol = MEGAN flux (kg/m2/sec)
++ MEG_MBO_3m3e1ol = MEGAN flux (kg/m2/sec)
++ MEG_Napthalene = MEGAN flux (kg/m2/sec)
++ MEG_PPPP_2s = MEGAN flux (kg/m2/sec)
++ MEG_acetaldehyde = MEGAN flux (kg/m2/sec)
++ MEG_acetic_acid = MEGAN flux (kg/m2/sec)
++ MEG_acetone = MEGAN flux (kg/m2/sec)
++ MEG_acoradiene = MEGAN flux (kg/m2/sec)
++ MEG_ammonia = MEGAN flux (kg/m2/sec)
++ MEG_anisole = MEGAN flux (kg/m2/sec)
++ MEG_aromadendrene = MEGAN flux (kg/m2/sec)
++ MEG_benzaldehyde = MEGAN flux (kg/m2/sec)
++ MEG_benzyl-acetate = MEGAN flux (kg/m2/sec)
++ MEG_benzyl-alcohol = MEGAN flux (kg/m2/sec)
++ MEG_bergamotene_a = MEGAN flux (kg/m2/sec)
++ MEG_bergamotene_b = MEGAN flux (kg/m2/sec)
++ MEG_bisabolene_a = MEGAN flux (kg/m2/sec)
++ MEG_bisabolene_b = MEGAN flux (kg/m2/sec)
++ MEG_bornene = MEGAN flux (kg/m2/sec)
++ MEG_borneol = MEGAN flux (kg/m2/sec)
++ MEG_bornyl_ACT = MEGAN flux (kg/m2/sec)
++ MEG_bourbonene_b = MEGAN flux (kg/m2/sec)
++ MEG_butanone_2 = MEGAN flux (kg/m2/sec)
++ MEG_butene = MEGAN flux (kg/m2/sec)
++ MEG_cadinene_d = MEGAN flux (kg/m2/sec)
++ MEG_cadinene_g = MEGAN flux (kg/m2/sec)
++ MEG_camphene = MEGAN flux (kg/m2/sec)
++ MEG_camphor = MEGAN flux (kg/m2/sec)
++ MEG_carbon_2s = MEGAN flux (kg/m2/sec)
++ MEG_carbon_monoxide = MEGAN flux (kg/m2/sec)
++ MEG_carbonyl_s = MEGAN flux (kg/m2/sec)
++ MEG_carene_3 = MEGAN flux (kg/m2/sec)
++ MEG_caryophyllene_b = MEGAN flux (kg/m2/sec)
++ MEG_cedrene_a = MEGAN flux (kg/m2/sec)
++ MEG_cedrol = MEGAN flux (kg/m2/sec)
++ MEG_cineole_1_8 = MEGAN flux (kg/m2/sec)
++ MEG_copaene_a = MEGAN flux (kg/m2/sec)
++ MEG_cubebene_a = MEGAN flux (kg/m2/sec)
++ MEG_cubebene_b = MEGAN flux (kg/m2/sec)
++ MEG_cymene_o = MEGAN flux (kg/m2/sec)
++ MEG_cymene_p = MEGAN flux (kg/m2/sec)
++ MEG_decanal = MEGAN flux (kg/m2/sec)
++ MEG_diallyl_2s = MEGAN flux (kg/m2/sec)
++ MEG_dodecene_1 = MEGAN flux (kg/m2/sec)
++ MEG_elemene_b = MEGAN flux (kg/m2/sec)
++ MEG_estragole = MEGAN flux (kg/m2/sec)
++ MEG_ethane = MEGAN flux (kg/m2/sec)
++ MEG_ethanol = MEGAN flux (kg/m2/sec)
++ MEG_ethene = MEGAN flux (kg/m2/sec)
++ MEG_farnescene_a = MEGAN flux (kg/m2/sec)
++ MEG_farnescene_b = MEGAN flux (kg/m2/sec)
++ MEG_fenchene_a = MEGAN flux (kg/m2/sec)
++ MEG_fenchone = MEGAN flux (kg/m2/sec)
++ MEG_formaldehyde = MEGAN flux (kg/m2/sec)
++ MEG_formic_acid = MEGAN flux (kg/m2/sec)
++ MEG_geranyl_acetone = MEGAN flux (kg/m2/sec)
++ MEG_germacrene_B = MEGAN flux (kg/m2/sec)
++ MEG_germacrene_D = MEGAN flux (kg/m2/sec)
++ MEG_gurjunene_b = MEGAN flux (kg/m2/sec)
++ MEG_heptanal = MEGAN flux (kg/m2/sec)
++ MEG_heptane = MEGAN flux (kg/m2/sec)
++ MEG_heptanone = MEGAN flux (kg/m2/sec)
++ MEG_hexanal = MEGAN flux (kg/m2/sec)
++ MEG_hexane = MEGAN flux (kg/m2/sec)
++ MEG_hexanol_1 = MEGAN flux (kg/m2/sec)
++ MEG_hexenal_c3 = MEGAN flux (kg/m2/sec)
++ MEG_hexenal_t2 = MEGAN flux (kg/m2/sec)
++ MEG_hexenol_c3 = MEGAN flux (kg/m2/sec)
++ MEG_hexenyl_ACT_c3 = MEGAN flux (kg/m2/sec)
++ MEG_homosalate = MEGAN flux (kg/m2/sec)
++ MEG_humulene_a = MEGAN flux (kg/m2/sec)
++ MEG_humulene_g = MEGAN flux (kg/m2/sec)
++ MEG_hydrogen_cyanide = MEGAN flux (kg/m2/sec)
++ MEG_hydrogen_s = MEGAN flux (kg/m2/sec)
++ MEG_indole = MEGAN flux (kg/m2/sec)
++ MEG_ionone_b = MEGAN flux (kg/m2/sec)
++ MEG_ipsenol = MEGAN flux (kg/m2/sec)
++ MEG_isolongifolene = MEGAN flux (kg/m2/sec)
++ MEG_isoprene = MEGAN flux (kg/m2/sec)
++ MEG_jasmone = MEGAN flux (kg/m2/sec)
++ MEG_limonene = MEGAN flux (kg/m2/sec)
++ MEG_linalool = MEGAN flux (kg/m2/sec)
++ MEG_linalool_OXD_c = MEGAN flux (kg/m2/sec)
++ MEG_linalool_OXD_t = MEGAN flux (kg/m2/sec)
++ MEG_longifolene = MEGAN flux (kg/m2/sec)
++ MEG_longipinene = MEGAN flux (kg/m2/sec)
++ MEG_met_benzoate = MEGAN flux (kg/m2/sec)
++ MEG_met_bromide = MEGAN flux (kg/m2/sec)
++ MEG_met_chloride = MEGAN flux (kg/m2/sec)
++ MEG_met_heptenone = MEGAN flux (kg/m2/sec)
++ MEG_met_iodide = MEGAN flux (kg/m2/sec)
++ MEG_met_jasmonate = MEGAN flux (kg/m2/sec)
++ MEG_met_mercaptan = MEGAN flux (kg/m2/sec)
++ MEG_met_propenyl_2s = MEGAN flux (kg/m2/sec)
++ MEG_met_salicylate = MEGAN flux (kg/m2/sec)
++ MEG_meta-cymenene = MEGAN flux (kg/m2/sec)
++ MEG_methane = MEGAN flux (kg/m2/sec)
++ MEG_methanol = MEGAN flux (kg/m2/sec)
++ MEG_muurolene_a = MEGAN flux (kg/m2/sec)
++ MEG_muurolene_g = MEGAN flux (kg/m2/sec)
++ MEG_myrcene = MEGAN flux (kg/m2/sec)
++ MEG_myrtenal = MEGAN flux (kg/m2/sec)
++ MEG_nerolidol_c = MEGAN flux (kg/m2/sec)
++ MEG_nerolidol_t = MEGAN flux (kg/m2/sec)
++ MEG_neryl_acetone = MEGAN flux (kg/m2/sec)
++ MEG_nitric_OXD = MEGAN flux (kg/m2/sec)
++ MEG_nitrous_OXD = MEGAN flux (kg/m2/sec)
++ MEG_nonanal = MEGAN flux (kg/m2/sec)
++ MEG_nonenal = MEGAN flux (kg/m2/sec)
++ MEG_ocimene_al = MEGAN flux (kg/m2/sec)
++ MEG_ocimene_c_b = MEGAN flux (kg/m2/sec)
++ MEG_ocimene_t_b = MEGAN flux (kg/m2/sec)
++ MEG_octanal = MEGAN flux (kg/m2/sec)
++ MEG_octanol = MEGAN flux (kg/m2/sec)
++ MEG_octenol_1e3ol = MEGAN flux (kg/m2/sec)
++ MEG_oxopentanal = MEGAN flux (kg/m2/sec)
++ MEG_pentanal = MEGAN flux (kg/m2/sec)
++ MEG_pentane = MEGAN flux (kg/m2/sec)
++ MEG_phellandrene_a = MEGAN flux (kg/m2/sec)
++ MEG_phellandrene_b = MEGAN flux (kg/m2/sec)
++ MEG_phenyl_CCO = MEGAN flux (kg/m2/sec)
++ MEG_pinene_a = MEGAN flux (kg/m2/sec)
++ MEG_pinene_b = MEGAN flux (kg/m2/sec)
++ MEG_piperitone = MEGAN flux (kg/m2/sec)
++ MEG_propane = MEGAN flux (kg/m2/sec)
++ MEG_propene = MEGAN flux (kg/m2/sec)
++ MEG_pyruvic_acid = MEGAN flux (kg/m2/sec)
++ MEG_sabinene = MEGAN flux (kg/m2/sec)
++ MEG_selinene_b = MEGAN flux (kg/m2/sec)
++ MEG_selinene_d = MEGAN flux (kg/m2/sec)
++ MEG_terpinene_a = MEGAN flux (kg/m2/sec)
++ MEG_terpinene_g = MEGAN flux (kg/m2/sec)
++ MEG_terpineol_4 = MEGAN flux (kg/m2/sec)
++ MEG_terpineol_a = MEGAN flux (kg/m2/sec)
++ MEG_terpinolene = MEGAN flux (kg/m2/sec)
++ MEG_terpinyl_ACT_a = MEGAN flux (kg/m2/sec)
++ MEG_tetradecene_1 = MEGAN flux (kg/m2/sec)
++ MEG_thujene_a = MEGAN flux (kg/m2/sec)
++ MEG_thujone_a = MEGAN flux (kg/m2/sec)
++ MEG_thujone_b = MEGAN flux (kg/m2/sec)
++ MEG_toluene = MEGAN flux (kg/m2/sec)
++ MEG_tricyclene = MEGAN flux (kg/m2/sec)
++ MEG_verbenene = MEGAN flux (kg/m2/sec)
+
+List any changes to the defaults for the boundary datasets:
+ Correct fpftdyn historical f05 dataset, and add rcp datasets
+ Fix irrig map for f19 and f10
+
+Describe any substantial timing or memory changes: None
+ Although bug 1485 is NOT fixed! (4X performance hit due to updated esmf_wrf_timemgr!)
+
+Code reviewed by: self,fvitt
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, Machines,
+ drv, pio, cprnc, and cism
+
+ scripts to scripts4_120323
+ Machines to Machines_120323a
+ drv to drvseq4_1_04
+ pio to pio_1_4_2
+ cprnc to cprnc_120322
+ cism to cism1_120322
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+A models/lnd/clm/bld/namelist_files/history_fields.xsl - Style sheet to view history_fields XML file
+A + models/lnd/clm/src/biogeochem/MEGANFactorsMod.F90 ---- MEGAN factors file
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>> Remove PTS-MODE restart tests
+M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+M models/lnd/clm/test/system/tests_posttag_yong
+MM models/lnd/clm/test/system/tests_pretag_jaguarpf_nompi
+M models/lnd/clm/test/system/tests_posttag_nompi_regression
+M models/lnd/clm/test/system/test_driver.sh ----- Correct machine name for lynx
+M models/lnd/clm/test/system/nl_files/nl_voc ---- Add megan namelist
+M models/lnd/clm/test/system/nl_files/clm_drydep Add -megan option rename drydep
+to drydep
+M models/lnd/clm/test/system/input_tests_master - Tests with VOC must use
+ clm_drydep, make CA8 tests use drydep
+
+M models/lnd/clm/tools/SVN_EXTERNAL_DIRECTORIES - update cprnc
+
+M models/lnd/clm/bld/configure --------- Change top level model from cesm to driver
+M models/lnd/clm/bld/build-namelist ---- Add -megan option/namelist, rename
+ -drv_drydep to drydep, add checking for megan namelist items
+M models/lnd/clm/bld/clm.cpl7.template - Fix multi-instance issues, and
+ don't overwrite user_nl_clm file if it already exists
+
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml ----- Update driver
+ namelist items, add megan namelist, more fields to drydep_list, list
+ megan compounds
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml --- Add commented
+ out finidat file for f05, update f05 fpftdyn and add fpftdyn for f05 rcp's
+ update irrig 10x15 mapping file
+M models/lnd/clm/bld/namelist_files/namelist_defaults_drydep.xml - Add defaults
+ for megan namelist
+
+M models/lnd/clm/src/cpl_share/clm_cpl_indices.F90 - Change VOC fields to megan
+ fields
+M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90 ------ Change VOC fields to megan
+ fields
+M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90 ---- Change VOC fiels to megan
+M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90 - Use Megan_factors_mod,
+ add VOCEmission_init, megan namelist determines the fields that will be
+ output rather than the 5 VOC fields
+MM models/lnd/clm/src/biogeochem/DryDepVelocity.F90 - More fields that can be
+ "mapped": 'SOAM', 'SOAI', 'SOAT', 'SOAB', 'SOAX'
+M models/lnd/clm/src/main/clm_varpar.F90 ----------- Remove nvoc
+M models/lnd/clm/src/main/clm_initializeMod.F90 ---- Add call to VOCEmission_init
+M models/lnd/clm/src/main/clmtypeInitMod.F90 ------- Remove averaged voc fields
+M models/lnd/clm/src/main/clm_atmlnd.F90 ----------- Remove voc add megan fields
+M models/lnd/clm/src/main/findHistFields.pl -------- Add ability to handle new
+ megan fields
+M models/lnd/clm/src/main/clm_driver.F90 ----------- Initialize cisun/cisha
+ to -999. each time-step for VOCEmission
+M models/lnd/clm/src/main/ncdio_pio.F90 ------------ Changes from John Truesdale
+ so that PTS_MODE will work
+M models/lnd/clm/src/main/clmtype.F90 -------------- VOC fields have extra
+ dimension remove averaged field
+M models/lnd/clm/src/main/histFldsMod.F90 ---------- Remove specific VOC fields
+ add MEG_ fields
+
+Summary of testing:
+
+ bluefire: All PASS except...
+018 brW51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+023 blHO2 TBL.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 7
+027 blHo1 TBL.sh 17p_cnsc_dh clm_drydep 20000101:1800 10x15 USGS@2000 -10 cold ..................FAIL! rc= 7
+036 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 10
+037 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 5
+038 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 5
+039 blU61 TBL.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 48 cold ......FAIL! rc= 4
+044 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 2
+045 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 2
+046 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 2
+047 blU61 TBL.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 48 cold ......FAIL! rc= 2
+ bluefire interactive testing: All PASS except
+028 blCK8 TBL.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 4
+034 blF93 TBL.sh 17p_sc_do clm_drydep^nl_voc 20021230:1800 4x5 gx3v7 48 cold ....................FAIL! rc= 7
+050 blHQ4 TBL.sh 17p_cnsc_ds clm_drydep 20000214:1800 1x1_brazil navy@2000 -150 cold ............FAIL! rc= 7
+061 sm774 TSMtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 5
+062 bl774 TBLtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 4
+073 bl954 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__ds ......FAIL! rc= 6
+076 sm974 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 6
+077 bl974 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 4
+078 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+079 bl9T4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 4
+080 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+081 bl9C4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 4
+ bluefire/CESM testing: All PASS except..
+BFAIL PST.f45_g37.I1850CN.bluefire_ibm.compare_hist.clm4_0_41
+BFAIL PET_PT.f45_g37.I1850.bluefire_ibm.compare_hist.clm4_0_41
+BFAIL SMS.1x1_numaIA.ICN.bluefire_ibm.compare_hist.clm4_0_41
+BFAIL ERP.1x1_mexicocityMEX.I.bluefire_ibm.compare_hist.clm4_0_41
+BFAIL PST.f10_f10.I20TRCN.bluefire_ibm.compare_hist.clm4_0_41
+BFAIL PET_PT.f10_f10.I20TRCN.bluefire_ibm.compare_hist.clm4_0_41
+ bluefire/PTCLM testing: All FAIL
+ lynx interactive testing: ALL PASS up to...
+023 sm978 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850^tools__ds .....FAIL! rc= 6
+ lynx/CESM testing: All PASS except..
+FAIL ERS_D.T31_g37.I1850.lynx_pgi.generate.clm4_0_42
+CFAIL ERI.f10_f10.IRCP60CN.lynx_pathscale.GC.121827
+BFAIL PST.f19_g16.I.lynx_pgi.compare_hist.clm4_0_41
+BFAIL ERS.1x1_vancouverCAN.I.lynx_pgi.compare_hist.clm4_0_41
+ mirage,storm/ifort interactive testing: All PASS!
+ yong/darwin/ifort interactive testing: All PASS up to...
+005 smCL4 TSM.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 10x15 USGS -10 cold ..................FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_41
+
+Changes answers relative to baseline: bit-for-bit (except cases with MEGAN or VOC)
+
+===============================================================
+===============================================================
+Tag name: clm4_0_41
+Originator(s): erik (Erik Kluzek)
+Date: Tue Mar 13 23:43:45 MDT 2012
+One-line Summary: Bring rmfmesh/rtmmap branches to trunk
+
+Purpose of changes:
+
+Get working with latest scripts and have clm template call build-namelist directly. Move
+rmfmesh/rtmmap branch to trunk. Remove CASA completely. Start using RTM mapping files.
+Allow bigger tolerance for mksurfdata_map frac up to 1.e-5 so can work for f4x5. New
+half-degree mapping files. Remove code to calculate RTM mapping. Remove ability to set
+maxpatch_pft to something different than numpft in CLM configure. Remove
+-ad_spinup/-exit_spinup options in configure make generic -spinup option with a few
+allowed values (similar to the clm45sci version of configure). New 1850 fsurdat dataset
+for ne240np4. Update externals to the latest, get test_driver working.
+
+Requirements for tag: test on bluefire (CESM,int,bat), lynx/pgi (CESM), mirage,
+template calls build-namelist. Fix 1477, 1476, 1468, 1467
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID):
+ 1477 (Bad f05 datasets)
+ 1476 (Problem with stand-alone build on bluefire)
+ 1468 (Bad f09, f19 SCRIP Grid files)
+ 1467 (Remove runinit_ibm.csh script)
+ 1449 (Remove fine-mesh)
+ 1448 (Remove CASA)
+ 1432 (Several resolutions fail for new mksurfdata_map)
+
+Known bugs (include bugzilla ID):
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 935 (RTM warning NOT an error)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1393 (error when running Gregorian calendar)
+ 1397 (c2l_scale_type not specified for many history fields)
+ 1401 (Restart files do NOT have _FillValue/missing_value attributes on fields)
+ 1405 (Problem with irrigation on clm4_0_34 with intel compiler)
+ 1407 (Build problem on jaguar for test_driver.sh with -c option)
+ 1409 (ne120 is having restart trouble on jaguar with NetCDF3)
+ 1410 (Problem running PST.f09_g16.I.jaguarpf)
+ 1411 (ERI_D.ne30_g16.I1850CN.jaguarpf.G.235924 fails on jaguarpf)
+ 1454 (lack of 1D history files in CLM testing)
+ 1455 (bad time-stamp in CLM testing)
+ 1457 (bug in soil color in mksurfdata_map)
+ 1459 (PTSMODE fails)
+ 1474 (Missing eulerian domain files)
+ 1479 (fails on jaguarpf)
+ 1480 (Darwin_intel build)
+ 1482 (Problems running 1x1 resolutions for CLM)
+ 1485 (Performance issue with esmf_wrf_timemgr)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+ Remove CASA option, don't allow maxpft to be set.
+ CESM scripts/Machines updated.
+
+Describe any changes made to the namelist:
+
+ BuildConf/clm.buildnml.csh now becomes a script that simply calls the clm
+ build-namelist script to build your namelist. This means you effectively treat it
+ as a READ-only script that you don't put changes into! Instead you use
+ the "user_nl_clm" file to put your custom changes to the namelist into.
+ The use of user_nl_clm is documented in the CLM User's Guide at...
+
+ http://www.cesm.ucar.edu/models/cesm1.0/clm/models/lnd/clm/doc/UsersGuide/x1423.html#config_time_nml
+
+ Use preview_namelists to see full namelists that will be created.
+
+List any changes to the defaults for the boundary datasets: Activate RTM mapping files
+ New 1840 ne240 fsurdat file, replace all f05 mapping files, replace all f05 mapping
+ files, and f05, f09, and f19 SCRIP grid files
+
+Describe any substantial timing or memory changes: Yes!
+ Much less global memory needed now! Only one temporary global integer array
+ used.
+
+ 4X performance hit due to updated esmf_wrf_timemgr! (see bug 1485)
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, Machines,
+ drv, csm_share, esmf_wrf_timemgr, datm
+
+ scripts to datm8_120219
+ Machines to Machines_120309
+ drv to drvseq4_1_02
+ csm_share to share3_120308
+ esmf_wrf_timemgr to esmf_wrf_timemgr_120218
+ datm to datm8_120219
+
+List all files eliminated:
+
+>>>>>>> Eliminate stand-alone intrepid/kraken testing files, CASA, fine-mesh,
+>>>>>>> and RTM mapping calc. Elimanate interpinic run script, too hard to support.
+D models/lnd/clm/test/system/tests_posttag_intrepid
+D models/lnd/clm/test/system/tests_posttag_intrepid_nompi
+D models/lnd/clm/test/system/tests_posttag_kraken
+D models/lnd/clm/tools/interpinic/runinit_ibm.csh
+D models/lnd/clm/src/biogeochem/CASAPhenologyMod.F90
+D models/lnd/clm/src/biogeochem/CASAMod.F90
+D models/lnd/clm/src/main/CASAiniTimeVarMod.F90
+D models/lnd/clm/src/main/downscaleMod.F90
+D models/lnd/clm/src/riverroute/RtmMapMod.F90
+
+List all files added and what they do: Add config defaults files for supported
+ single point datasets, add empty user_nl_clm file
+
+A + models/lnd/clm/bld/user_nl_clm
+A + models/lnd/clm/bld/config_files/config_defaults_1x1_smallvilleIA.xml
+A + models/lnd/clm/bld/config_files/config_defaults_1x1_mexicocityMEX.xml
+A + models/lnd/clm/bld/config_files/config_defaults_1x1_numaIA.xml
+A + models/lnd/clm/bld/config_files/config_defaults_1x1_vancouverCAN.xml
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>> Changes to config_file for changes to configure
+>>>>>>>>>>>>> eliminate use of maxpft, ad_spinup and exit_spinup use spinup option
+M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dh
+M models/lnd/clm/test/system/config_files/17p_cndvsc_m
+M models/lnd/clm/test/system/config_files/21p_cndvcrpsc_m
+M models/lnd/clm/test/system/config_files/17p_cndvsc_o
+M models/lnd/clm/test/system/config_files/21p_cndvcrpsc_o
+M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dm
+M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_do
+M models/lnd/clm/test/system/config_files/21p_cndvcrpsc_s
+M models/lnd/clm/test/system/config_files/17p_cndvsc_s
+M models/lnd/clm/test/system/config_files/17p_sc_dh
+M models/lnd/clm/test/system/config_files/17p_sc_dm
+M models/lnd/clm/test/system/config_files/17p_sc_do
+M models/lnd/clm/test/system/config_files/17p_sc_ds
+M models/lnd/clm/test/system/config_files/17p_cnsc_h
+M models/lnd/clm/test/system/config_files/21p_cncrpsc_h
+M models/lnd/clm/test/system/config_files/17p_cnsc_dh
+M models/lnd/clm/test/system/config_files/21p_cncrpsc_dh
+M models/lnd/clm/test/system/config_files/21p_cncrpsc_m
+M models/lnd/clm/test/system/config_files/17p_cnsc_m
+M models/lnd/clm/test/system/config_files/17p_cnsc_o
+M models/lnd/clm/test/system/config_files/21p_cncrpsc_o
+M models/lnd/clm/test/system/config_files/17p_cnsc_dm
+M models/lnd/clm/test/system/config_files/21p_cncrpsc_dm
+M models/lnd/clm/test/system/config_files/17p_cnsc_do
+M models/lnd/clm/test/system/config_files/17p_cnc13sc_dh
+M models/lnd/clm/test/system/config_files/21p_cncrpsc_do
+M models/lnd/clm/test/system/config_files/21p_cncrpsc_s
+M models/lnd/clm/test/system/config_files/17p_sc_h
+M models/lnd/clm/test/system/config_files/21p_cncrpsc_ds
+M models/lnd/clm/test/system/config_files/17p_cnsc_ds
+M models/lnd/clm/test/system/config_files/17p_cnc13sc_dm
+M models/lnd/clm/test/system/config_files/17p_cnc13sc_do
+M models/lnd/clm/test/system/config_files/17p_sc_m
+M models/lnd/clm/test/system/config_files/17p_sc_o
+M models/lnd/clm/test/system/config_files/17p_cnnfsc_dh
+M models/lnd/clm/test/system/config_files/17p_cnnfsc_dm
+M models/lnd/clm/test/system/config_files/21p_cndvcrpsc_dh
+M models/lnd/clm/test/system/config_files/17p_cndvsc_dh
+M models/lnd/clm/test/system/config_files/17p_cnnfsc_do
+M models/lnd/clm/test/system/config_files/17p_cndvsc_dm
+M models/lnd/clm/test/system/config_files/21p_cndvcrpsc_dm
+M models/lnd/clm/test/system/config_files/17p_cndvsc_do
+M models/lnd/clm/test/system/config_files/21p_cndvcrpsc_do
+M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dh
+M models/lnd/clm/test/system/config_files/21p_cndvcrpsc_ds
+M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dm
+M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_do
+M models/lnd/clm/test/system/config_files/README
+M models/lnd/clm/test/system/config_files/21p_cndvcrpsc_h
+M models/lnd/clm/test/system/config_files/17p_cndvsc_h
+
+>>>>>>>>>>>>> Get working with latest scripts, eliminate intrepid.
+M models/lnd/clm/test/system/TCB.sh --------- Send -comp to configure
+M models/lnd/clm/test/system/test_driver.sh - Eliminate intrepid, get working
+ with latest CESM scripts/Machines, update env settings to Machines
+M models/lnd/clm/test/system/CLM_runcmnd.sh - Eliminate intrepid
+M models/lnd/clm/test/system/tests_pretag_bluefire - Correct test name
+
+>>>>>>>>>>>>> Eliminate CASA
+M models/lnd/clm/tools/interpinic/src/interpinic.F90
+M models/lnd/clm/tools/mksurfdata_map/src/clm_varctl.F90
+
+>>>>>>>>>>>>> Eliminate CASA, and maxpft. Read site specific config_defaults
+>>>>>>>>>>>>> Change spinup option, get working with latest scripts.
+M models/lnd/clm/bld/configure --------- Use clm45sci API (use -spinup in
+ place of ad_spinup/exit_spinup), read site specific config_defaults
+ file when sitespf_pt option is used. Eliminate CASA, and maxpft option.
+ Get configure working with latest CESM scripts. Add mct/pio subdirectory
+ for SMP=on/off so will build on bluefire. Add -comp option required
+ for new CESM scripts (for stand-alone test).
+M models/lnd/clm/bld/build-namelist ---- Remove faerdep, use spinup from
+ configure rather than ad/exit_spinup, remove substition of CSMDATA
+ in filenames.
+M models/lnd/clm/bld/clm.cpl7.template - Use sitespf_pt for regional case
+ when CLM_USRDAT NOT used and don't use clm_root in configure.
+M models/lnd/clm/bld/config_files/config_sys_defaults.xml - Add comp settings
+ and change mach settings to NOT include compiler. Remove: dec_osf,
+ es, irix, solaris, super-ux, unicosmp as no longer tested on
+M models/lnd/clm/bld/config_files/config_definition.xml --- Remove CASA option
+ mxpft can only be 17 or 21. Add comp, remove ad_spinup/exit_spinup
+ for spinup option. Change description of sitespf_pt option.
+
+>>>>>>>>>>>>> Eliminate CASA, move ad/exit_spinup to spinup, add 1850 ne240 fsurdat
+>>>>>>>>>>>>> Activate all RTM maps, replace all f05 maps.
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml - Rm fget_archdev
+ only allow R05 for rtm_res
+M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml - ad_spinup
+ to spinup
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+ add 1840 ne240 fsurdat, remove null setting of findat for maxpft=4
+ activate RTM maps, replace all 0.47x0.63 mapping files
+M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml - Use
+ standard shared SCRIP-grid files for: f05, f09, f19 resolutions
+M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml ------- Change
+ ad/exit_spinup to spinup
+M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml - Remove
+ fatmgrid, flndtopo, fatmtopo, and fatmlndfrc files
+M models/lnd/clm/doc/IMPORTANT_NOTES - Remove CASA, fine-mesh, and fget_archdev
+
+>>>>>>>>>>>>> Eliminate CASA, fine-mesh and atm data, change llatlon for ldomain
+>>>>>>>>>>>>> Require RTM map files to be read. Require fatmlndfrc files to be
+>>>>>>>>>>>>> in CESM domain file format. Require maxpft=numpft+1. Fix a pnetcdf issue.
+M models/lnd/clm/src/biogeochem/STATICEcosysDynMod.F90 - Remove CASA use ldomain
+ in place of llatlon
+M models/lnd/clm/src/biogeochem/CNDVMod.F90 ------ ldomain replaces llatlon
+M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90 --- Get rid of fine-mesh
+ downscaling, use ldomain in place of llatlon remove atm, lnd versions
+ of everything (adomain, adecomp, get_proc_bounds_atm, begg_a/l, atm_sx for example.
+M models/lnd/clm/src/main/organicFileMod.F90 ---- llatlon becomes ldomain
+M models/lnd/clm/src/main/decompInitMod.F90 ----- Remove decompInit_atm, acid
+ remove atm grid stuff for: decompInit_lnd and decompInit_glcp
+M models/lnd/clm/src/main/clm_initializeMod.F90 - Remove downscaling and atm/lnd
+ grid stuff as well as CASA.
+M models/lnd/clm/src/main/clm_glclnd.F90 -------- Remove clm_maps2x and clm_mapx2s
+ and atm_s2x and atm_x2s
+M models/lnd/clm/src/main/clmtypeInitMod.F90 ---- Remove CASA stuff
+M models/lnd/clm/src/main/ndepStreamMod.F90 ----- Replace llatlon with ldomain
+M models/lnd/clm/src/main/histFileMod.F90 ------- Remove atm grid stuff such
+ as gratm, namea grids, remove CASA
+M models/lnd/clm/src/main/clm_atmlnd.F90 -------- Remove downscaling/upscaling
+ init_adiag_type, clm_downscale_a2l and data: atm_a2l, atm_l2a, adiag_arain
+M models/lnd/clm/src/main/restFileMod.F90 ------- Remove CASA
+M models/lnd/clm/src/main/controlMod.F90 -------- Remove fatmgrid, CASA, fatmtopo
+ add write about flndtopo (still needed for glc_nec)
+M models/lnd/clm/src/main/initSurfAlbMod.F90 ---- Remove CASA
+M models/lnd/clm/src/main/clm_varctl.F90 -------- Remove downscale and CASA
+M models/lnd/clm/src/main/clm_driver.F90 -------- Remove CASA
+M models/lnd/clm/src/main/initGridCellsMod.F90 -- Remove setting of _a domain
+ info, gindex_a, longdeg_a, latdeg_a, lon_a, lat_a
+M models/lnd/clm/src/main/ncdio_pio.F90 --------- Remove use of gratm, set
+ data=' ' needed for pnetcdf
+M models/lnd/clm/src/main/surfrdMod.F90 --------- Remove surfrd_get_latlon,
+ surfrd_get_frac, surfrd_wtxy_veg_rank, surfrd_mkrank, add
+ surfrd_get_globmask in place of surfrd_get_latlon, get rid of
+ ability to read in CLM frac datasets and only read in CESM domain file
+ format. Abort if allocate_all_vegpfts is NOT true.
+M models/lnd/clm/src/main/domainMod.F90 --------- Remove latlon_type,
+ nara, and ntop add isgrid2d, adomain, alatlon, llatlon, gatm, amask, pftm
+ methods: domain_setptrs, latlon_init, latlon_check, latlon_clean,
+ latlon_setsame
+M models/lnd/clm/src/main/decompMod.F90 --------- Remove get_proc_global_atm,
+ get_proc_bounds_atm, and atmosphere decomposition data
+M models/lnd/clm/src/main/clmtype.F90 ----------- Remove CASA, gratm
+M models/lnd/clm/src/main/histFldsMod.F90 ------- Remove use of atm_a2l,
+- adiag_arain, adiag_asnow, adiag_aflux, adiag_lflux, downscale
+ remove CASA and downscale if's
+M models/lnd/clm/src/main/mkarbinitMod.F90 ------ Remove CASA
+M models/lnd/clm/src/riverroute/RtmMod.F90 ------ Remove some global RTM
+ data. Replace call's to endrun to shr_sys_abort as intial
+ step of the move to having RTM on it's own component.
+ Add rtm_celledge.
+M models/lnd/clm/src/biogeophys/UrbanInputMod.F90 ---- llatlon to ldomain
+M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90 - formatting change
+M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 ---- Remove CASA
+M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90 ------ Remove downscaling
+
+Summary of testing:
+
+ bluefire: TBL tests fail because of use of RTM mapping files and NetCDF issue and
+018 brW51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+036 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 10
+037 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 5
+038 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 5
+044 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 2
+045 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 2
+046 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 2
+ bluefire interactive testing: All PASS except, TBL tests fail because of NetCDF build issue and
+026 erCK4 TER.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 7
+027 brCK4 TBR.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 6
+031 brCK8 TBR.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 6
+ bluefire/CESM testing: All PASS except
+ (and ALL compare tests fail couldn't find base result)
+FAIL SMS.1x1_numaIA.ICN.bluefire_ibm
+FAIL ERP.1x1_mexicocityMEX.I.bluefire_ibm
+ bluefire/PTCLM testing: All FAIL
+ lynx/pgi testing: ALL FAIL
+ lynx/pgi interactive testing: ALL FAIL
+ lynx CESM testing: ALL PASS except... (don't compare as no baselines for clm4_0_40)
+CFAIL ERI.f10_f10.IRCP60CN.lynx_pathscale.162157
+FAIL PST.f19_g16.I.lynx_pgi
+RUN ERS.1x1_vancouverCAN.I.lynx_pgi.162157
+ mirage,storm/ifort interactive testing: All PASS!
+ jaguarpf CESM testing: All FAIL
+RUN ERS_D.f09_g16.I1850.titan_pgi.182111
+FAIL ERI.f10_f10.IRCP60CN.titan_pgi
+FAIL PST.f09_g16.I.titan_pgi
+FAIL PET_PT.f10_f10.I20TRCN.titan_pgi
+FAIL ERP.f19_g16.I4804CN.titan_pgi
+RUN ERS.1x1_mexicocityMEX.I.titan_pgi.182111
+FAIL ERI_D.ne30_g16.I1850CN.titan_pgi
+TFAIL ERH.ne120_g16.I2000CN.titan_pgi.182111
+RUN ERS.f09_g16.IRCP26CN.titan_pgi.182111
+FAIL SMS.f10_f10.IRCP45CN.titan_pgi
+RUN ERS.f19_g16.IRCP60CN.titan_pgi.182111
+FAIL SMS_D.f10_f10.IRCP85CN.titan_pgi
+RUN ERS.f09_g16.IG1850.titan_pgi.182111
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_40
+
+Changes answers relative to baseline: Yes (using RTM mapping files now)
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: RTM mapping for f05
+ - what platforms/compilers:
+ - nature of change larger than roundoff/same climate
+
+===============================================================
+===============================================================
+Tag name: clm4_0_40
+Originator(s): erik (Erik Kluzek)
+Date: Thu Feb 16 14:19:28 MST 2012
+One-line Summary: Back out update to new T31 surface datasets
+
+Purpose of changes:
+
+Back out the new T31 surface datasets so will have initial conditions to use
+for T31. Bring in the new surface datasets with initial conditions in the next tag.
+
+Requirements for tag: Run on bluefire
+
+Test level of tag: critical
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 935 (RTM warning NOT an error)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1393 (error when running Gregorian calendar)
+ 1397 (c2l_scale_type not specified for many history fields)
+ 1401 (Restart files do NOT have _FillValue/missing_value attributes on fields)
+ 1405 (Problem with irrigation on clm4_0_34 with intel compiler)
+ 1407 (Build problem on jaguar for test_driver.sh with -c option)
+ 1409 (ne120 is having restart trouble on jaguar with NetCDF3)
+ 1410 (Problem running PST.f09_g16.I.jaguarpf)
+ 1411 (ERI_D.ne30_g16.I1850CN.jaguarpf.G.235924 fails on jaguarpf)
+ 1454 (lack of 1D history files in CLM testing)
+ 1455 (bad time-stamp in CLM testing)
+ 1457 (bug in soil color in mksurfdata_map)
+ 1459 (PTSMODE fails)
+ 1468 (Bad f09, f19 SCRIP Grid files)
+ 1476 (Problem with stand-alone build on bluefire)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: Bring back old T31 datasets
+ Comment out the new T31 surface datasets and put back the old T31 datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+ A models/lnd/clm/src/main/findHistFields.pl -- Add script to figure out list of
+ history field names, long_names, and units
+ Create's a XML file as well as giving you a neatly formatted sorted list.
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Move T31 files
+ back to previous version and comment out the new files.
+
+Summary of testing:
+
+ bluefire: All PASS except...
+018 brW51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+020 smHO2 TSM.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 4
+021 erHO2 TER.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -3+-7 cold ................FAIL! rc= 5
+022 brHO2 TBR.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -5+-5 cold ................FAIL! rc= 5
+028 smC45 TSM.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 4
+029 erC45 TER.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -3+-7 arb_ic ............FAIL! rc= 5
+030 brC45 TBR.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -5+-5 arb_ic ............FAIL! rc= 5
+032 smH52 TSM.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 96 cold ...............FAIL! rc= 4
+033 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 10+38 cold ............FAIL! rc= 5
+034 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:1800 10x15 USGS@2000 72+72 cold .........FAIL! rc= 5
+036 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 10
+037 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 5
+038 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 5
+ bluefire interactive testing: All PASS except...
+026 erCK4 TER.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 7
+027 brCK4 TBR.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 6
+031 brCK8 TBR.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 6
+065 sm774 TSMtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 5
+080 sm974 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 6
+082 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+084 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc=
+ bluefire/CESM testing: All PASS except...
+018 brW51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+020 smHO2 TSM.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 4
+021 erHO2 TER.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -3+-7 cold ................FAIL! rc= 5
+022 brHO2 TBR.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -5+-5 cold ................FAIL! rc= 5
+028 smC45 TSM.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 4
+029 erC45 TER.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -3+-7 arb_ic ............FAIL! rc= 5
+030 brC45 TBR.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -5+-5 arb_ic ............FAIL! rc= 5
+032 smH52 TSM.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 96 cold ...............FAIL! rc= 4
+033 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 10+38 cold ............FAIL! rc= 5
+034 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:1800 10x15 USGS@2000 72+72 cold .........FAIL! rc= 5
+036 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 10
+037 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 5
+038 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 5
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_39
+
+Changes answers relative to baseline: T31
+
+ Answers are identical to clm4_0_38, if fatmgrid=fsurdat in controlMod.F90
+ except for ntop for some grids and TREFMXAV and TREFMNAV for others.
+
+===============================================================
+===============================================================
+Tag name: clm4_0_39
+Originator(s): erik (Erik Kluzek)
+Date: Wed Feb 1 11:40:11 MST 2012
+One-line Summary: Bring newgrid branch to trunk
+
+Purpose of changes:
+
+Move newgrid branch from Mariana to trunk. Add ne4np4, ne16np4, ne240np4 surface
+datasets. Replace all T31 surface datasets. Start removing CASA and fine-mesh testing
+and support. Bring in Tony's updates to ESMF5.2.0.
+
+Requirements for tag:
+
+run on lynx-pgi/bluefire/mirage-intel, fix bugs: 1446, 1444, 1442, 1404, 1430, 1425, 1420
+
+Test level of tag: standard
+
+Bugs fixed (include bugzilla ID):
+ 1458 (Problem using fsurdat for fatmgrid as no verticies)
+ 1444 (attempt to read unallocated variable)
+ 1442 (Make clm-template same as CAM template)
+ 1430 (Remove DIN_LOC_ROOT_CLMQIAN -- add ...CLM_FORC)
+ 1425 (Double quotes causes Namelist.pm to hang)
+ 1420 (Bad history output for TREFMNAV, TREFMXAV)
+ 1404 (Inconsistent domain and fatmlndfrc files)
+
+Known bugs (include bugzilla ID):
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 935 (RTM warning NOT an error)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1393 (error when running Gregorian calendar)
+ 1397 (c2l_scale_type not specified for many history fields)
+ 1401 (Restart files do NOT have _FillValue/missing_value attributes on fields)
+ 1405 (Problem with irrigation on clm4_0_34 with intel compiler)
+ 1407 (Build problem on jaguar for test_driver.sh with -c option)
+ 1409 (ne120 is having restart trouble on jaguar with NetCDF3)
+ 1410 (Problem running PST.f09_g16.I.jaguarpf)
+ 1411 (ERI_D.ne30_g16.I1850CN.jaguarpf.G.235924 fails on jaguarpf)
+ 1454 (lack of 1D history files in CLM testing)
+ 1455 (bad time-stamp in CLM testing)
+ 1457 (bug in soil color in mksurfdata_map)
+ 1459 (PTSMODE fails)
+ 1468 (Bad f09, f19 SCRIP Grid files)
+ 1476 (Problem with stand-alone build on bluefire)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system:
+ Move rtm, voc, and glc_nec from configure-time to run-time namelist options
+
+ Remove the -rtm -glc_nec and -voc options from "configure"
+ (NO longer available to CLM_CONFIG_OPTS)
+
+
+Describe any changes made to the namelist:
+
+ Add "-glc_nec, -glc_smb, -rtm options to build-namelist
+ (now available to CLM_BLDNML_OPTS)
+
+ Remove fine-mesh option to build-namelist "-lnd_res"
+ (NO longer available to CLM_BLDNML_OPTS)
+
+ Add following to clm_inparm namelist:
+
+ do_rtm => If TRUE, turn on rtm river routing
+ maxpatch_glcmec => Number of multiple elevation classes over glacier points.
+ Normally this is ONLY used when running CESM with the active glacier model.
+
+ Add following to the driver namelist to pass extra fields
+
+ flds_voc
+ flds_co2a
+ flds_co2a
+ flds_co2c
+ flds_co2_dmsa
+ cplflds_custom
+ glc_nec
+
+List any changes to the defaults for the boundary datasets:
+ Replace T31 surface datasets, remove T31 finidat datasets
+ Add T31 fpftdyn datasets for all cases
+ Add ne240np4 datasets, ne4np4, ne16np4 surface datasets
+
+ remove ALL fatmtopo datasets and all but T31, f09, f19 for glc_nec flndtopo
+
+ Remove fatmlndfrc datasets -- use datm domainfiles in their place
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self,mvertens,tcraig (ESMF update)
+
+List any svn externals directories updated (csm_share, mct, etc.): almost all
+ scripts to scripts4_120123
+ Machines to Machines_120123
+ drv to drvseq4_1_01
+ datm to datm8_120123
+ socn/sice/sglc to stubs1_3_01
+ cism to cism1_120123
+ csm_share to share3_120123
+ esmf_wrf_tmgr to esmf_wrf_timemgr_120123
+ gen_domain to gen_domain_120117
+
+List all files eliminated:
+
+>>>>>>>>>>>>>> Get rid of mkgriddata as no longer needed
+>>>>>>>>>>>>>> Use gen_domain or models/lnd/clm/tools/mkmapdata/mknoocnmap.pl
+ D mkgriddata/mkgriddata.namelist
+ D mkgriddata/mkgriddata.regional
+ D mkgriddata/src/mkvarpar.F90
+ D mkgriddata/src/mkvarctl.F90
+ D mkgriddata/src/clm_varpar.F90
+ D mkgriddata/src/clm_varctl.F90
+ D mkgriddata/src/shr_sys_mod.F90
+ D mkgriddata/src/shr_file_mod.F90
+ D mkgriddata/src/ncdio.F90
+ D mkgriddata/src/shr_log_mod.F90
+ D mkgriddata/src/Filepath
+ D mkgriddata/src/Macros.custom
+ D mkgriddata/src/shr_kind_mod.F90
+ D mkgriddata/src/shr_const_mod.F90
+ D mkgriddata/src/mkgriddata.F90
+ D mkgriddata/src/domainMod.F90
+ D mkgriddata/src/areaMod.F90
+ D mkgriddata/src/creategridMod.F90
+ D mkgriddata/src/nanMod.F90
+ D mkgriddata/src/Srcfiles
+ D mkgriddata/src/Mkdepends
+ D mkgriddata/src/Makefile
+ D mkgriddata/src
+ D mkgriddata/mkgriddata.singlept
+ D mkgriddata/mkgriddata.cesm_dom
+ D mkgriddata/README
+ D mkgriddata
+
+>>>>>>>>>>>>>> Remove config files for CASA or that turn off RTM, or
+>>>>>>>>>>>>>> turn on VOC or glc_mec
+ D models/lnd/clm/test/system/config_files/_nrsc_dh
+ D models/lnd/clm/test/system/config_files/4p_casasc_dh
+ D models/lnd/clm/test/system/config_files/4p_casasc_dm
+ D models/lnd/clm/test/system/config_files/4p_casasc_do
+ D models/lnd/clm/test/system/config_files/4p_casasc_ds
+ D models/lnd/clm/test/system/config_files/4p_casasc_h
+ D models/lnd/clm/test/system/config_files/4p_casasc_m
+ D models/lnd/clm/test/system/config_files/4p_casasc_o
+ D models/lnd/clm/test/system/config_files/17p_nrsc_ds
+ D models/lnd/clm/test/system/config_files/_nrsc_dm
+ D models/lnd/clm/test/system/config_files/_nrsc_do
+ D models/lnd/clm/test/system/config_files/4p_nrcasasc_ds
+ D models/lnd/clm/test/system/config_files/17p_vorsc_h
+ D models/lnd/clm/test/system/config_files/_nrsc_ds
+ D models/lnd/clm/test/system/config_files/17p_vorsc_m
+ D models/lnd/clm/test/system/config_files/17p_nrcnsc_do
+ D models/lnd/clm/test/system/config_files/17p_vorsc_o
+ D models/lnd/clm/test/system/config_files/17p_nrcnsc_ds
+ D models/lnd/clm/test/system/config_files/_nrmexsc_ds
+ D models/lnd/clm/test/system/config_files/_mec10sc_dh
+ D models/lnd/clm/test/system/config_files/_nrcnsc_do
+ D models/lnd/clm/test/system/config_files/_mec10sc_dm
+ D models/lnd/clm/test/system/config_files/_nrcnsc_ds
+ D models/lnd/clm/test/system/config_files/_mec10sc_do
+ D models/lnd/clm/test/system/config_files/_mec10sc_ds
+ D models/lnd/clm/test/system/config_files/_nrsc_s
+ D models/lnd/clm/test/system/config_files/_nrvansc_ds
+ D models/lnd/clm/test/system/config_files/_nrnil3sc_dh
+ D models/lnd/clm/test/system/config_files/_nrnil3sc_dm
+ D models/lnd/clm/test/system/config_files/17p_vorsc_dh
+ D models/lnd/clm/test/system/config_files/21p_nrcncrpsc_s
+ D models/lnd/clm/test/system/config_files/21p_nrcncrpsc_ds
+ D models/lnd/clm/test/system/config_files/17p_vorsc_dm
+ D models/lnd/clm/test/system/config_files/17p_vorsc_do
+ D models/lnd/clm/test/system/config_files/17p_vorsc_ds
+ D models/lnd/clm/test/system/config_files/_mec10sc_h
+ D models/lnd/clm/test/system/config_files/_mec10sc_m
+ D models/lnd/clm/test/system/config_files/_mec10sc_o
+
+>>>>>>>>>>>>>> Remove mkdatadomain always use gen_domain
+ D models/lnd/clm/tools/mkdatadomain
+ D models/lnd/clm/tools/mkdatadomain/mkdatadomain.namelist
+ D models/lnd/clm/tools/mkdatadomain/src
+ D models/lnd/clm/tools/mkdatadomain/src/addglobal.F90
+ D models/lnd/clm/tools/mkdatadomain/src/create_domain.F90
+ D models/lnd/clm/tools/mkdatadomain/src/Mkdepends
+ D models/lnd/clm/tools/mkdatadomain/src/Srcfiles
+ D models/lnd/clm/tools/mkdatadomain/src/Filepath
+ D models/lnd/clm/tools/mkdatadomain/src/Macros.custom
+ D models/lnd/clm/tools/mkdatadomain/src/Makefile
+ D models/lnd/clm/tools/mkdatadomain/src/shr_kind_mod.F90
+ D models/lnd/clm/tools/mkdatadomain/src/shr_const_mod.F90
+ D models/lnd/clm/tools/mkdatadomain/README
+>>>>>>>>>>>>>> fine-mesh no longer supported don't worry about topo files anymore
+ D models/lnd/clm/bld/namelist_files/checktopofiles.ncl
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>> Rename without RTM off option
+ A + models/lnd/clm/test/system/config_files/17p_cnsc_ds
+ A + models/lnd/clm/test/system/config_files/_mexsc_ds
+ A + models/lnd/clm/test/system/config_files/_vansc_ds
+
+>>>>>>>>>>>>>> namelist to turn on VOC and RTM off, and gen_domain options
+ A + models/lnd/clm/test/system/nl_files/nl_voc
+ A + models/lnd/clm/test/system/nl_files/clm_nortm
+ A + models/lnd/clm/test/system/nl_files/gen_domain.ne30.runoptions
+ A + models/lnd/clm/test/system/nl_files/gen_domain.T31.runoptions
+
+>>>>>>>>>>>>>> Add scripts to create SCRIP grid/map files for region/single-point domains
+ A + models/lnd/clm/tools/mkmapdata/mkunitymap.ncl
+ A + models/lnd/clm/tools/mkmapdata/mknoocnmap.pl
+ A + models/lnd/clm/tools/mkmapgrids/mkscripgrid.ncl
+
+ mknoocnmap.pl [options] Gets map and grid files for a single land-only point.
+ REQUIRED OPTIONS
+ -centerpoint [or -p] Center latitude,longitude of the grid to create.
+ -name [-or -n] Name to use to describe point
+
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>> Remove rtm off from config files and update README file
+ M models/lnd/clm/test/system/config_files/_nil3sc_dh
+ M models/lnd/clm/test/system/config_files/_nil3sc_dm
+ M models/lnd/clm/test/system/config_files/README
+
+>>>>>>>>>>>>>> Change tests
+ M models/lnd/clm/test/system/README.testnames --- Update test names
+ 6, A, J, Q, S, V, X, and Z configurations are now unused
+ resolutions: 3, F, G and H are now unused
+ M models/lnd/clm/test/system/mknamelist --------- Remove fine-mesh option
+ M models/lnd/clm/test/system/test_driver.sh ----- Update paths for
+ edinburgh/jaguar
+ M models/lnd/clm/test/system/input_tests_master - Remove nr,vo,mec in configure
+ files for tests and move to namelist, remove compile-only test names
+ M models/lnd/clm/test/system/TSMtools.sh -------- Allow run files to
+ be in test directory
+
+>>>>>>>>>>>>>> Change testnames
+ 6, A, J, Q, S, V, X, and Z configurations are now unused
+ resolutions: 3, F, G and H are now unused
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/tests_posttag_yong
+ M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+ M models/lnd/clm/test/system/tests_pretag_bluefire
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_edinburgh
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+ M models/lnd/clm/test/system/tests_posttag_kraken
+ MM models/lnd/clm/test/system/tests_pretag_jaguarpf
+ MM models/lnd/clm/test/system/tests_pretag_jaguarpf_nompi
+ M models/lnd/clm/test/system/tests_posttag_mirage
+ M models/lnd/clm/test/system/tests_posttag_intrepid
+ M models/lnd/clm/test/system/tests_posttag_intrepid_nompi
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+ M models/lnd/clm/test/system/tests_posttag_lynx
+
+>>>>>>>>>>>>>> Fix run-time options
+ M models/lnd/clm/test/system/nl_files/clm_usrdat ----- Add rtm off
+ M models/lnd/clm/test/system/nl_files/mkmapdata_if10 - Remove -i option
+
+>>>>>>>>>>>>>> Add option to create datasets NOT entered into XML database
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl - Use domainfile
+ rather than fatmgrid file, which changes variable names as well
+ M models/lnd/clm/tools/README.testing --------------- Note that run files
+ can be in tool directory or test directory
+ M models/lnd/clm/tools/README ----------------------- Update information on
+ process
+ MM models/lnd/clm/tools/mksurfdata_map/mksurfdata.pl - Add usrspc option
+ M models/lnd/clm/tools/mkmapdata/regridbatch.sh ----- Use -b instead of -i
+ M models/lnd/clm/tools/mkmapdata/mkmapdata.sh ------- Replace -i option with
+ -b for batch, add option to read input SCRIP grid file (-f) and (-t)
+ option for regional or global type, update usage
+ M models/lnd/clm/tools/mksurfdata_map/README -------- Update usage info
+ M models/lnd/clm/tools/mkmapdata/README ------------- Update usage info
+
+ New options for unsupported resolutions to mksurfdata.pl
+
++ For unsupported, user-specified resolutions:
++ $ProgName -res usrspec -usr_gname -usr_gdate [OPTIONS]
++ -usr_gname "user_gname" User resolution name to find grid file with
++ (only used if -res is set to 'usrspec')
++ -usr_gdate "user_gdate" User map date to find mapping files with
++ (only used if -res is set to 'usrspec')
++ NOTE: all mapping files are assumed to be in mkmapdata
++ - and the user needs to have invoked mkmapdata in
++ that directory first
++
+
+>>>>>>>>>>>>>> Move rtm, glc_nec, voc from configure to build-namelist
+ M models/lnd/clm/bld/configure -------------- Remove -rtm, -glc_nec, -voc options
+ M models/lnd/clm/bld/listDefaultNamelist.pl - Get datm namelist files as well
+ M models/lnd/clm/bld/build-namelist --------- Add: glc_nec, glc_smb, rtm options
+ Remove: lnd_res fine-mesh option
+ M models/lnd/clm/bld/clm.cpl7.template ------ Move rtm, glc_nec settings from
+ configure to build-namelist, set fatmlndfrc from domain file set in scripts
+ add processing for LND_GRID=reg, set glc_smb, loop over namelists for DART,
+ M models/lnd/clm/bld/README
+ M models/lnd/clm/bld/config_files/config_definition.xml - Remove rtm, glc_nec, voc
+
+>>>>>>>>>>>>>> Add new namelist items, remove CASA, fine-mesh, update T31
+>>>>>>>>>>>>>> add ne4np4, ne16np4, ne240np4 datasets
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml ------
+ Add: rtm, maxpatch_glcmec, do_rtm, new cpl files, navy lmask
+ remove: fatmtopo, CASA namelist items,
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml - rtm/glc_nec
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xsl ------- rm CASA
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -----
+ Add: do_rtm, new T31 fsurdat/fpftdyn files, new ne4np4, ne16np4,
+ fsurdat and ne240np4 fsurdat/fatmlndfrc, missing map files (f19,T31)
+ Remove: T31 finidat, remove fatmtopo, and most flndtopo
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml ---- Change
+ paths of domainfiles to share/domains
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml Add 10min
+ navy grid file
+
+>>>>>>>>>>>>>> Add glc_nec
+ M models/lnd/clm/bld/namelist_files/use_cases/20thC_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/2000_glacierMEC_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850_glacierMEC_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_glacierMEC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/glacierMEC_pd.xml
+
+
+>>>>>>>>>>>>>> Remove RTM, GLC_NEC, ESMF5.2.0 updates, don't require extents on fsurdat
+ M models/lnd/clm/src/main/clm_varpar.F90 -- Change for glc_nec
+ M models/lnd/clm/src/main/accumulMod.F90 -- Initialize val to zero if period=1
+ M models/lnd/clm/src/main/clm_initializeMod.F90 - Change order of calls, rm RTM
+ M models/lnd/clm/src/main/clm_glclnd.F90 -- Use maxpatch_glcmec NOT glc_nec
+ M models/lnd/clm/src/main/subgridMod.F90 -- Use maxpatch_glcmec NOT glc_nec
+ M models/lnd/clm/src/main/histFileMod.F90 - Remove RTM add do_rtm
+ make sure ninst suffix is in restart history filename
+ M models/lnd/clm/src/main/restFileMod.F90 - Remove RTM add do_rtm
+ M models/lnd/clm/src/main/controlMod.F90 -- Remove RTM add do_rtm,
+ maxpatch_glcmec, glc_grid, use fatmlndfrc for fatmgrid if empty
+ broadcast glc_topomax if create_glacier_mec_landunit
+ M models/lnd/clm/src/main/clm_time_manager.F90 - Changes from Tony to update
+ to ESMF5.2.0
+ M models/lnd/clm/src/main/clm_varctl.F90 ---- Remove RTM, use do_rtm, and remove
+ GLC_NEC use arrays for glc_nec variables
+ M models/lnd/clm/src/main/clm_driver.F90 ---- Remove RTM use do_rtm
+ M models/lnd/clm/src/main/initGridCellsMod.F90 - Write more info on error
+ M models/lnd/clm/src/main/pftvarcon.F90 ----- Remove unused MPI vars
+ M models/lnd/clm/src/main/surfrdMod.F90 ----- Don't require LATS/N,LONE/W on
+ files set to nan if not used, use maxpatch_glcmec NOT glc_nec
+ M models/lnd/clm/src/main/domainMod.F90 ----- Don't write LATS/N,LONE/W if
+ first lonw is nan
+ M models/lnd/clm/src/main/decompMod.F90 ----- Remove RTM use do_rtm
+ M models/lnd/clm/src/main/histFldsMod.F90 --- Remove RTM use do_rtm
+ M models/lnd/clm/src/riverroute/RtmMod.F90 -- Remove RTM use run_rtm NOT do_rtm
+ M models/lnd/clm/src/riverroute/RunoffMod.F90 Remove RTM
+
+>>>>>>>>>>>>>> ESMF5.2.0 updates, remove RTM, GLC_NEC
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90 - ESMF5.2.0 updates, remove RTM
+ use do_rtm, remove GLC_NEC use arrays of glc_nec
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_mct.F90 -- ESMF5.2.0 updates
+ M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90 --- ESMF4.2.0 updates
+ M models/lnd/clm/src/cpl_share/clm_cpl_indices.F90 - Updated for new coupler fields spec
+ Remove RTM, GLC_NEC CPP tokens, make glc_nec variables arrays
+ some updates to ESMF5.2.0
+
+Summary of testing:
+
+ bluefire: All PASS except...
+004 blC91 TBL.sh _sc_dh clm_std^nl_urb 20030101:3600 4x5 gx3v7 -6 arb_ic ........................FAIL! rc= 7
+008 blTZ1 TBL.sh 21p_cncrpsc_dh clm_stdIgnYr^nl_crop 20020401:3600 10x15 USGS -10 cold ..........FAIL! rc= 7
+011 blD91 TBL.sh _persc_dh clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 7
+015 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:1800 1.9x2.5 gx1v6@1850-2100 -10 cold FAIL! rc= 7
+018 brW51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+019 blW51 TBL.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -5 cold .................FAIL! rc= 7
+020 smHO2 TSM.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 4 (bluefire compiler error)
+021 erHO2 TER.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -3+-7 cold ................FAIL! rc= 5 (bluefire compiler error)
+022 brHO2 TBR.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -5+-5 cold ................FAIL! rc= 5 (bluefire compiler error)
+023 blHO2 TBL.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 4 (bluefire compiler error)
+027 blHo1 TBL.sh 17p_cnsc_dh clm_drydep 20000101:1800 10x15 USGS@2000 -10 cold ..................FAIL! rc= 7 (bluefire compiler error)
+028 smC45 TSM.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 4 (bluefire compiler error)
+029 erC45 TER.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -3+-7 arb_ic ............FAIL! rc= 5 (bluefire compiler error)
+030 brC45 TBR.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -5+-5 arb_ic ............FAIL! rc= 5 (bluefire compiler error)
+031 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 4 (bluefire compiler error)
+032 smH52 TSM.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 96 cold ...............FAIL! rc= 4 (bluefire compiler error)
+033 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 10+38 cold ............FAIL! rc= 5 (bluefire compiler error)
+034 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:1800 10x15 USGS@2000 72+72 cold .........FAIL! rc= 5 (bluefire compiler error)
+035 blH52 TBL.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 48 cold ...............FAIL! rc= 4 (bluefire compiler error)
+036 smU61 TSM.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -6 cold ......FAIL! rc= 10 (bluefire compiler error)
+037 erU61 TER.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 10+38 cold ...FAIL! rc= 5 (bluefire compiler error)
+038 brU61 TBR.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 -3+-3 cold ...FAIL! rc= 5 (bluefire compiler error)
+039 blU61 TBL.sh 21p_cndvcrpsc_dh clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6 48 cold ......FAIL! rc= 4 (bluefire compiler error)
+ bluefire interactive testing:
+008 blC97 TBL.sh _sc_do clm_spin^nl_urb 20030101:1800 4x5 gx3v7@1850 -6 arb_ic ..................FAIL! rc= 7
+012 blHS3 TBL.sh 17p_cnsc_do clm_usrdat 20030101:1800 13x12pt_f19_alaskaUSA gx1v6 -6 arb_ic .....FAIL! rc= 7
+016 blCA4 TBL.sh _sc_ds clm_std^nl_urb 20021001:3600 1x1_camdenNJ navy -90 arb_ic ...............FAIL! rc= 7
+020 blNB4 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:3600 1x1_mexicocityMEX navy 158 arb_ic ....FAIL! rc= 5
+024 blJ74 TBL.sh 4p_casasc_ds clm_std^nl_urb 10001230:3600 1x1_tropicAtl test -100 arb_ic .......FAIL! rc= 7
+028 blCA8 TBL.sh _sc_ds clm_std^nl_urb 20021230:3600 1x1_asphaltjungleNJ navy -90 arb_ic ........FAIL! rc= 7
+029 smCK4 TSM.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -10 cold ...............FAIL! rc= 10
+030 erCK4 TER.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 5
+031 brCK4 TBR.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 5
+032 blCK4 TBL.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -10 cold ...............FAIL! rc= 4
+033 smCK8 TSM.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -10 cold ...........FAIL! rc= 10
+034 erCK8 TER.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 5
+035 brCK8 TBR.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 5
+036 blCK8 TBL.sh _sc_ds clm_nortm^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 4
+042 blF93 TBL.sh 17p_sc_do clm_std^nl_voc 20021230:1800 4x5 gx3v7 48 cold .......................FAIL! rc= 5
+046 blC83 TBL.sh _sc_do clm_std^nl_urb 20020115:3600 5x5_amazon navy -10 arb_ic .................FAIL! rc= 7
+054 blC63 TBL.sh _sc_do clm_glcmec 19980115:1800 1.9x2.5 gx1v6 48 arb_ic ........................FAIL! rc= 5
+058 blHQ4 TBL.sh 17p_cnsc_ds clm_drydep 20000214:1800 1x1_brazil navy@2000 -150 cold ............FAIL! rc= 5
+062 blH43 TBL.sh 17p_cnsc_do clm_transient_20thC 19790101:1800 1.9x2.5 gx1v6@1850-2000 -10 startup FAIL! rc= 7
+066 bl514 TBLtools.sh gen_domain tools__ds T31.runoptions .......................................FAIL! rc= 5
+075 sm774 TSMtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 5
+076 bl774 TBLtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 4
+083 bl8Z3 TBLrst_tools.sh 21p_cncrpsc_do interpinic clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6FAIL! rc= 7
+085 bl953 TBLscript_tools.sh mkmapdata mkmapdata.sh mkmapdata_if10 ..............................FAIL! rc= 7
+090 sm974 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 6
+091 bl974 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 4
+092 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+093 bl9T4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 4
+094 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+095 bl9C4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 4
+ bluefire/CESM testing: All PASS except, compare_hist
+FAIL SMS_RLA.f45_f45.I.bluefire - bug 1459
+FAIL SMS_RLB.f45_f45.I.bluefire - bug 1459
+FAIL SMS_ROA.f45_f45.I.bluefire - bug 1459
+ bluefire/PTCLM testing: All FAIL
+ lynx/pgi: All PASS except...
+004 blC92 TBL.sh _sc_dm clm_std^nl_urb 20030101:3600 4x5 gx3v7 -6 arb_ic ........................FAIL! rc= 7
+007 blD92 TBL.sh _persc_dm clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 7
+011 blF92 TBL.sh 17p_sc_dm clm_std^nl_voc 20021230:1800 4x5 gx3v7 48 cold .......................FAIL! rc= 5
+015 blH52 TBL.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 48 cold ...............FAIL! rc= 7
+019 blJ92 TBL.sh 4p_casasc_dm clm_std^nl_urb 20021230:1800 4x5 gx3v7 48 cold ....................FAIL! rc= 7
+023 blL52 TBL.sh _sc_dm clm_std^nl_urb 20020115:1800 10x15 USGS 48 arb_ic .......................FAIL! rc= 7
+ lynx/pgi interactive testing: All PASS except..
+008 blTZ3 TBL.sh 21p_cncrpsc_do clm_stdIgnYr^nl_crop 20020401:3600 10x15 USGS -10 cold ..........FAIL! rc= 7
+009 smCL4 TSM.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 10x15 USGS -10 cold ..................FAIL! rc= 10
+010 erCL4 TER.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 10x15 USGS -5+-5 cold ................FAIL! rc= 5
+011 brCL4 TBR.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 10x15 USGS -5+-5 cold ................FAIL! rc= 5
+012 blCL4 TBL.sh _sc_ds clm_nortm^nl_ptsmode 20030101:1800 10x15 USGS -10 cold ..................FAIL! rc= 4
+016 blCA4 TBL.sh _sc_ds clm_std^nl_urb 20021001:3600 1x1_camdenNJ navy -90 arb_ic ...............FAIL! rc= 7
+020 blOC4 TBL.sh _vansc_ds clm_urb1pt^nl_urb 19920812:3600 1x1_vancouverCAN navy 331 arb_ic .....FAIL! rc= 5
+024 blNB4 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:3600 1x1_mexicocityMEX navy 158 arb_ic ....FAIL! rc= 5
+025 sm978 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850^tools__ds .....FAIL! rc= 6
+026 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+027 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+ mirage,storm/ifort interactive testing:
+007 blD94 TBL.sh _persc_ds clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 7
+011 blCA4 TBL.sh _sc_ds clm_std^nl_urb 20021001:3600 1x1_camdenNJ navy -90 arb_ic ...............FAIL! rc= 7
+015 blCA8 TBL.sh _sc_ds clm_std^nl_urb 20021230:3600 1x1_asphaltjungleNJ navy -90 arb_ic ........FAIL! rc= 7
+019 blL54 TBL.sh _sc_ds clm_std^nl_urb 20020115:1800 10x15 USGS 48 arb_ic .......................FAIL! rc= 7
+023 blR53 TBL.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:1800 10x15 USGS@1850 48 cold ............FAIL! rc= 7
+ jaguarpf: Currently NOT available:
+ edinburgh: Currently not supported
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_38
+
+===============================================================
+===============================================================
+Tag name: clm4_0_38
+Originator(s): erik (Erik Kluzek)
+Date: Mon Jan 23 13:56:45 MST 2012
+One-line Summary: Fix some minor issues with tools, add high resolution option and
+datasets to mksurfdata, remove crap from clmtype so C13 will work on bluefire, convert
+nans to FillValue for some cases, fix datasets, update doc
+
+Purpose of changes:
+
+Update externals to new version of scripts/Machines. Fix some bugs. Add in maps for:
+ne4np4, ne16np4, ne60np4, and ne240np4 resolutions. Begin adding _FillValue/missing_value
+to restart files. Start adding in new high-resolution datasets for mksurfdata. Add an
+option to mksurfdata.pl to run at hi-res let default be standard half-degree datasets.
+Add in 3x3min PFT dataset for 2000, and 5x5min organic. Add in maps for 3x3min and
+5x5min_ISRIC_WISE to output grids. Separate out wetland and lake datasets, add in 3x3min
+lake dataset. Get mksurfdata to work with T31, fix maps. Have both mksurfdata_map and clm
+check files for consistencies. Add initial version of a script to check that maps in the
+XML database are correct. Make sure keywords are set in tools, and OPT correctly added to
+meta-data. Update gen_domain. Correct some typo's in filenames. Remove some unused data
+in clmtype.F90. Update documentation to cesm1_0_4.
+
+Requirements for tag:
+ Testing on bluefire-only, Fix bugs: 1432 (part X), 1424X, 1423X, 1401 (part)X, 1309,
+ mksurfdata works at regular and hi-res and for f09, and at regular for: 128x256,
+ 512x1024, ne4np4, ne16np4, ne30np4, ne60np4, and ne240np4 resolutions, T31 and T31
+ mksurfdata rcp's work
+
+Test level of tag: critical
+
+Bugs fixed (include bugzilla ID):
+ 1432 (Several resolutions fail for new mksurfdata_map)
+ 1424 (variables written out as gdir)
+ 1423 (Problem building clmtype on bluefire)
+ 1398 (clm and mksurfdata_map needs to check map files -- partial)
+
+Known bugs (include bugzilla ID):
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 935 (RTM warning NOT an error)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1393 (error when running Gregorian calendar)
+ 1397 (c2l_scale_type not specified for many history fields)
+ 1401 (Restart files do NOT have _FillValue/missing_value attributes on fields)
+ 1404 (Inconsistent domain and fatmlndfrc files)
+ 1405 (Problem with irrigation on clm4_0_34 with intel compiler)
+ 1407 (Build problem on jaguar for test_driver.sh with -c option)
+ 1409 (ne120 is having restart trouble on jaguar with NetCDF3)
+ 1410 (Problem running PST.f09_g16.I.jaguarpf)
+ 1411 (ERI_D.ne30_g16.I1850CN.jaguarpf.G.235924 fails on jaguarpf)
+ 1468 (Bad f09, f19 SCRIP Grid files)
+ 1476 (Problem with stand-alone build on bluefire)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 896 (T62 mode does not work)
+ 701 (svn keyword)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ Correct and add new mapping datasets and datasets for mksurfdata_map
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): most
+
+ scripts to scripts4_111108
+ Machines to Machines_111101
+ drv to drvseq4_0_08
+ cism to cism1_111004
+ csm_share to share3_111027
+ timing to timing_111101
+ MCT to MCT2_7_0-111101
+ pio to pio1_3_12
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+ A models/lnd/clm/test/system/config_files/_scnv_ds - Add serial option
+ A models/lnd/clm/bld/namelist_files/checkmapfiles.ncl - check that map files
+ are consistent
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/input_tests_master - Add serial irrig test
+
+ M models/lnd/clm/test/system/test_driver.sh ----- Fix issues on bluefire,
+ update some paths on edinburgh
+
+>>>>>>>>>>>>>> Update documentation
+ M models/lnd/clm/doc/UsersGuide/single_point.xml
+ M models/lnd/clm/doc/UsersGuide/special_cases.xml
+ M models/lnd/clm/doc/UsersGuide/custom.xml
+ M models/lnd/clm/doc/KnownBugs
+
+>>>>>>>>>>>>>> Get svn keywords set on tools, and make sure OPT is set
+ M models/lnd/clm/tools/mkmapdata/mvNimport.sh -------- Fix syntax error
+ M models/lnd/clm/tools/mkmapdata/mkmapdata.sh -------- Add in 3x3 grid and ISRIC-WISE
+ mask, add option to build ocean-land mask, and large-file format option,
+ M models/lnd/clm/tools/interpinic/src/interpinic.F90 - Fix svn keywords
+ M models/lnd/clm/tools/interpinic/src/Makefile ------- Set OPT CPP if OPT=TRUE
+ M models/lnd/clm/tools/mkgriddata/src/Makefile ------- Set OPT CPP if OPT=TRUE
+ M models/lnd/clm/tools/mkdatadomain/src/Makefile ----- Set OPT CPP if OPT=TRUE
+ M models/lnd/clm/tools/mkmapgrids/src/mkmapgrids.F90 - Add more meta-data
+ M models/lnd/clm/tools/mkmapgrids/src/Makefile-------- Set OPT CPP if OPT=TRUE
+
+>>>>>>>>>>>>>> Add call to domain_checksame to check if domains are the same,
+>>>>>>>>>>>>>> split lake and wetland processing, add write statment for each file
+>>>>>>>>>>>>>> opened, add -hires and -allownofile options to mksurfdata.pl
+ M models/lnd/clm/tools/mksurfdata_map/src/mkglcmecMod.F90 ---- Use domain_checksame
+ M models/lnd/clm/tools/mksurfdata_map/src/mkvarctl.F90 ------- Split lake/wetland
+ M models/lnd/clm/tools/mksurfdata_map/src/mkvocefMod.F90 ----- Use domain_checksame
+ M models/lnd/clm/tools/mksurfdata_map/src/mklaiMod.F90 ------- Use domain_checksame
+ M models/lnd/clm/tools/mksurfdata_map/src/mksoilMod.F90 ------ Use domain_checksame
+ increase kmap_max_min from 50 to 90 (so T31 can be run)
+ M models/lnd/clm/tools/mksurfdata_map/src/mkdomainMod.F90 ---- Add domain_checksame,
+ make domain_init private, add metadata if frac/mask set, eliminate
+ lats/n,lone/w, use call abort in place of stop,
+ M models/lnd/clm/tools/mksurfdata_map/src/mksurfdat.F90 ------ Split lake/wetland
+ remove documentation on specific datasets, increase allowed sum of special
+ landunits from 120 to 250
+ M models/lnd/clm/tools/mksurfdata_map/src/mkurbanparMod.F90 -- Use domain_checksame
+ M models/lnd/clm/tools/mksurfdata_map/src/mkharvestMod.F90 --- Use domain_checksame
+ M models/lnd/clm/tools/mksurfdata_map/src/mkfileMod.F90 ------ Fix meta-data,
+ remove lats/n,lone/w
+ M models/lnd/clm/tools/mksurfdata_map/src/mkgridmapMod.F90 --- Add headers, more
+ checking, add gridmap_setptrs method private gridmap_checkifset method,
+
+ M models/lnd/clm/tools/mksurfdata_map/src/mklanwatMod.F90 ---- Split mklanwat
+ into mklakwat/mkwetlnd subroutines, use domain_checksame,
+ M models/lnd/clm/tools/mksurfdata_map/src/Makefile ----------- Set OPT CPP if OPT=TRUE
+ M models/lnd/clm/tools/mksurfdata_map/src/mkpftMod.F90 ------- Use domain_checksame
+ M models/lnd/clm/tools/mksurfdata_map/mksurfdata.pl ---------- Add -hires and
+ -allownofile options, split lake and wetland
+
+ -allownofile Allow the script to run even if one of the input files
+ does NOT exist.
+ -hires If you want to use high-resolution input datasets rather than the default
+ lower resolution datasets (low resolution is typically at half-degree)
+
+ M models/lnd/clm/tools/mksurfdata_map/pftdyn_hist_simyr1850-2005.txt Use $CSMDATA
+ directory path rather than /cgd/tss path
+ M models/lnd/clm/tools/mksurfdata_map/mksurfdata_map.namelist Split lake and wetland
+
+>>>>>>>>>>>>>> Minor changes
+ M models/lnd/clm/bld/build-namelist --------- Move groups earlier, start adding
+ code to handle lnd_inst_counter
+ M models/lnd/clm/bld/listDefaultNamelist.pl - Get rcp list sooner
+
+>>>>>>>>>>>>>> Fix some filename typos, add new mapping files, add hi-res
+>>>>>>>>>>>>>> datasets
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ------ Add
+ ne4np4, ne16np4, ne60np4, ne240np4 mapping files
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml - Correct
+ some typo's in filenames (ne4np4 scripgrid, ngwh mksurfdata pftdyn file
+ for rcp 6 for year 2006
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --- Add mksrf_filename,
+ correct mksrf_* filenames to mksrf_f*, add ISRIC-WISE lmask
+ M models/lnd/clm/bld/namelist_files/datm-build-namelist ------- Only require
+ datm_data_dir for CPLHIST3HrWx
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Add ISRIC-WISE,
+ 3x3min_MODIS mapping datasets, correct some map dataset names,
+ M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl ----- Add more data
+ to output table
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml Correct and
+ add new scrip grid files, add hires option for some data, add
+ mksrf_filename for types of files, add in all mksurfdata raw
+ datasets, add new hires datasets, correct mksrf_fvegtyp filenames,
+
+>>>>>>>>>>>>>> Remove initialization of unused data types (allows C13 on bluefire)
+>>>>>>>>>>>>>> Add option to convert nan to fillvalue on output files
+>>>>>>>>>>>>>> (and vica-versa on input)
+ M models/lnd/clm/src/main/clmtypeInitMod.F90 - Remove initialization of unused
+ data types
+ M models/lnd/clm/src/main/clm_atmlnd.F90 ----- Remove unused pdf variable
+ M models/lnd/clm/src/main/initSurfAlbMod.F90 - Remove unused CNZeroFluxes
+ M models/lnd/clm/src/main/ncdio_pio.F90 ------ Add cnvrtnan2fill option
+ to convert from spval to nan on read and from nan to spval on write
+ M models/lnd/clm/src/main/clmtype.F90 -------- Remove unused variables
+ M models/lnd/clm/src/main/histFldsMod.F90 ---- Add some documentation, change
+ longname of QSOIL, correct: CISUN, CISHA, ALPHAPSNSUN, ALPHAPSNSHA
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90
+
+
+Summary of testing:
+
+ bluefire testing:
+018 brX51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+020 smHO2 TSM.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 4
+021 erHO2 TER.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -3+-7 cold ................FAIL! rc= 5
+022 brHO2 TBR.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -5+-5 cold ................FAIL! rc= 5
+023 blHO2 TBL.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 4
+028 smC45 TSM.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 4
+029 erC45 TER.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -3+-7 arb_ic ............FAIL! rc= 5
+030 brC45 TBR.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -5+-5 arb_ic ............FAIL! rc= 5
+031 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 4
+032 smH52 TSM.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 96 cold ...............FAIL! rc= 4
+033 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 10+38 cold ............FAIL! rc= 5
+034 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:1800 10x15 USGS@2000 72+72 cold .........FAIL! rc= 5
+035 blH52 TBL.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 48 cold ...............FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+031 erAK4 TER.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 7
+032 brAK4 TBR.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 6
+036 brAK8 TBR.sh _nrsc_ds clm_std^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 6
+037 blAK8 TBL.sh _nrsc_ds clm_std^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 4
+067 bl5@4 TBLtools.sh gen_domain tools__ds namelist .............................................FAIL! rc= 7
+003 sm774 TSMtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 5
+004 bl774 TBLtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 4
+008 bl954 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__ds ......FAIL! rc= 6
+011 sm974 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 6
+012 bl974 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 4
+013 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+014 bl9T4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 4
+015 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+016 bl9C4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 4
+ bluefire/CESM testing: All PASS except... (compare tests fail because clm4_0_37 file did not exist)
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_37
+
+===============================================================
+===============================================================
+Tag name: clm4_0_37
+Originator(s): erik (Erik Kluzek)
+Date: Mon Sep 26 10:35:24 MDT 2011
+One-line Summary: Fix unstructured grids history files
+
+Purpose of changes:
+
+Comment out code for writing out fine-mesh lat/lon for unstructured grids. This caused
+the code to blow up when running for HOMME grids such as ne30np4.
+
+Bugs fixed (include bugzilla ID):
+ 1415 (History files can't be written out for HOMME grids)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1393 (error when running Gregorian calendar)
+ 1397 (c2l_scale_type not specified for many history fields)
+ 1398 (clm and mksurfdata_map needs to check map files for consistency)
+ 1401 (Restart files do NOT have _FillValue/missing_value attributes on fields)
+ 1404 (Inconsistent domain and fatmlndfrc files)
+ 1405 (Problem with irrigation on clm4_0_34 with intel compiler)
+ 1407 (Build problem on jaguar for test_driver.sh with -c option)
+ 1409 (ne120 is having restart trouble on jaguar with NetCDF3)
+ 1410 (Problem running PST.f09_g16.I.jaguarpf)
+ 1411 (ERI_D.ne30_g16.I1850CN.jaguarpf.G.235924 fails on jaguarpf)
+ 1423 (Problem building clmtype on bluefire)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/src/main/histFileMod.F90 - Comment out code for fine-mesh
+ lat/lon for unstructured grids
+
+Summary of testing: None!
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_36
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_36
+Originator(s): erik (Erik Kluzek)
+Date: Thu Sep 22 11:05:59 MDT 2011
+One-line Summary: Comment out RTM mapping files for f09 and f19
+
+Purpose of changes:
+
+Comment out the RTM mapping files for f09/f19 so answers are the same as clm4_0_34 and as the f19 mapping
+files cause the fully coupled model to blow up in POP. Add "mv" option to mksurfdata.pl and make -nomv the
+default so it doesn't try to copy files by default. Increase length of filename strings for mksurfdata pftdyn
+files. Add some metadata for some restart file variables. Add "new good wood harvest" datasets
+and option (-new_woodharv) to mksurfdata.pl from Peter Lawrence so can make surface
+datasets with either set of files. New good wood harvest applies to rcp6 and rcp8.5.
+Also add in some new mapping files for: 512x1024,128x256,64x128,32x64,8x16,0.23x0.31,5x5_amazon.
+Add SCRIP grid files for: ne4np4,ne16np4, ne60np4, ne240np4. Add 3x3min resolution
+and 3x3min SCRIP grid file which will be used for high resolution surface dataset
+creation in the future. Use new surface datasets with old fatmgrid values for f09_g16
+for 1850 and 2000 so that answers can be identical to clm4_0_34 without requiring the
+fatmgrid file.
+
+Bugs fixed (include bugzilla ID):
+ 1414 (Answers change @ f09 resolution w/o fatmgrid file)
+ 1413 (re is in incorrect units in mksurfdata_map)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1393 (error when running Gregorian calendar)
+ 1397 (c2l_scale_type not specified for many history fields)
+ 1398 (clm and mksurfdata_map needs to check map files for consistency)
+ 1401 (Restart files do NOT have _FillValue/missing_value attributes on fields)
+ 1404 (Inconsistent domain and fatmlndfrc files)
+ 1405 (Problem with irrigation on clm4_0_34 with intel compiler)
+ 1407 (Build problem on jaguar for test_driver.sh with -c option)
+ 1409 (ne120 is having restart trouble on jaguar with NetCDF3)
+ 1410 (Problem running PST.f09_g16.I.jaguarpf)
+ 1411 (ERI_D.ne30_g16.I1850CN.jaguarpf.G.235924 fails on jaguarpf)
+ 1415 (History files can't be written out for HOMME grids)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ Comment out RTM mapping files for f09/f19
+ Also new mapping files for: 512x1024,128x256,64x128,32x64,8x16,0.23x0.31,5x5_amazon.
+ Add SCRIP grid files for: ne4np4,ne16np4, ne60np4, ne240np4.
+ Add 3x3min SCRIP grid file.
+ New surface datasets with old fatmgrid grid coordinate values for f09/1850/2000
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/tools/mksurfdata_map/src/mksurfdat.F90 -------------- Increase pftdyn file length to 135
+ M models/lnd/clm/tools/mksurfdata_map/src/mkfileMod.F90 -------------- Increase nchar dim to 256
+ M models/lnd/clm/tools/mksurfdata_map/mksurfdata.pl ------------------ Add "mv" option with "nomv" the default
+ M models/lnd/clm/tools/mksurfdata_map/pftdyn_hist_simyr1850-2005.txt - Increse length of strings for files
+
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --- Add 3x3min,
+ ne4np4,ne16np4, ne60np4, ne240np4 as valid resolutions
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Comment out f09/f19 RTM mapping files
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml - Add new
+ good wood harvest pftdyn input files from Peter Lawrence for mksurfdata_map
+ for rcp6 and rcp8.5
+
+ M models/lnd/clm/src/biogeochem/CNrestMod.F90 --------- Add some FillValue to some fields for restart files
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 - Add some FillValue to some fields for restart files
+
+Summary of testing:
+
+ bluefire interactive testing: Following PASS
+001 sm754 TSMtools.sh mksurfdata_map tools__s namelist ..........................................PASS
+003 sm953 TSMscript_tools.sh mkmapdata mkmapdata.sh mkmapdata_if10 ..............................PASS
+005 sm954 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__ds ......PASS
+007 sm9S4 TSMscript_tools.sh ncl_scripts getregional_datasets.pl getregional ....................PASS
+008 bl9S4 TBLscript_tools.sh ncl_scripts getregional_datasets.pl getregional ....................PASS
+ bluefire/CESM testing: All PASS except (compare to clm4_0_34)
+CFAIL ERS_E.T31_g37.I1850.bluefire.GC.125250 (ESMF doesn't work with NetCDF4)
+BFAIL ERB.ne30_g16.I_1948-2004.bluefire.compare.clm4_0_34 (ne30 wasn't in clm4_0_34) (answers are identical to clm4_0_35)
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_34 (I1850SPINUPCN wasn't in clm4_0_34 or before)
+FAIL NCK.f10_f10.I.bluefire -- scripts needs to cleannamelist after changing NINST_LND
+ bluefire extra CESM testing: Following PASS
+PASS ERS.f09_g16.ICN.bluefire
+PASS ERS.f09_g16.ICN.bluefire.generate.clm4_0_36
+PASS ERS.f09_g16.ICN.bluefire.compare_hist.clm4_0_33
+PASS ERS.f09_g16.ICN.bluefire.compare.clm4_0_33
+PASS ERS.f09_g16.I1850CN.bluefire
+PASS ERS.f09_g16.I1850CN.bluefire.generate.clm4_0_36
+PASS ERS.f09_g16.I1850CN.bluefire.compare_hist.clm4_0_33
+PASS ERS.f09_g16.I1850CN.bluefire.compare.clm4_0_33
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_35
+
+Changes answers relative to baseline: f09/f19 now same as clm4_0_34
+
+===============================================================
+===============================================================
+Tag name: clm4_0_35
+Originator(s): erik (Erik Kluzek), mvertens
+Date: Tue Sep 13 22:15:17 MDT 2011
+One-line Summary: Bring in Mariana's non2D grid branch to trunk, enabling HOMME grids: ne30np4/ne120np4
+
+Purpose of changes:
+
+Move Mariana's new non-2D branch to trunk. Extensive changes to mksurfdata, allows 1D-vector surface
+datasets. Mariana change mksurfdata to add unstructured grid format using SCRIP weights. Won't work
+with PTCLM and mksurfdata won't be able to create single-pt/regional surface datasets. Fix reverse
+coordinates on VOC/irrig mksurfdata input file. Update scripts and datm with HOMME grids. Add in
+half-degree pftdyn historical dataset. Partial fix to PTSMODE restart problem. Fix the US-UMB data for PTCLM.
+
+NOTE: File creation process is changed substantially! mksurfdata now requires mapping files to be created first
+ in order to run the new mksurfdata_map. This means you need to do the following:
+
+ 1.) run mkgriddata
+ 2.) run mkmapgrid (add files to XML database) (requires 1)
+ 3.) run mkmapdata (add files to XML database) (requires 2)
+ 4.) run mksurfdata_map (requires 3)
+ 5.) run gen_domain (requires 3 needed for datm)
+
+ See the models/lnd/clm/tools/README file for more help on the process.
+
+WARNING: YOU CAN'T CREATE SINGLE-POINT DATASETS WITH THIS VERSION! You can create frac/grid files with this
+ version and then use an older verison of clm to use mksurfdata to create surface datasets. The mapping
+ for single-point datasets using ESMF does NOT work -- although it does work if you have at least 4 points
+ so you can create regional datasets.
+
+ THIS MEANS PTCLM DOES NOT WORK FOR CREATING NEW DATASETS! It will work for datasets already created however.
+
+CAUTION: Mapping files to allow mksurfdata to work are only provided for: f09, f19, f10, T31, f45, f25, ne30 and ne120
+
+Bugs fixed (include bugzilla ID):
+ 1392 (US-UMB site has some incorrect data)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1393 (error when running Gregorian calendar)
+ 1397 (c2l_scale_type not specified for many history fields)
+ 1398 (clm and mksurfdata_map needs to check map files for consistency)
+ 1401 (Restart files do NOT have _FillValue/missing_value attributes on fields)
+ 1404 (Inconsistent domain and fatmlndfrc files)
+ 1405 (Problem with irrigation on clm4_0_34 with intel compiler)
+ 1407 (Build problem on jaguar for test_driver.sh with -c option)
+ 1409 (ne120 is having restart trouble on jaguar with NetCDF3)
+ 1410 (Problem running PST.f09_g16.I.jaguarpf)
+ 1411 (ERI_D.ne30_g16.I1850CN.jaguarpf.G.235924 fails on jaguarpf)
+ 1413 (re is in incorrect units in mksurfdata_map)
+ 1414 (Answers change @ f09 resolution w/o fatmgrid file)
+ 1415 (History files can't be written out for HOMME grids)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist:
+ Can now read in domain files in place of fatmlndfrac
+ fatmgrid no longer required (use fsurdat to get grid)
+ fmapinp_rtm new namelist item to give mapping for RTM
+
+List any changes to the defaults for the boundary datasets:
+
+ Add: ne30np4/ne120np4 datasets, add 1850-2000 0.47x0.63 fpftdyn file
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, mvertens, sacks
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, csm_share, datm
+
+ scripts to scripts4_110906
+ csm_share to share3_110906
+ datm to datm8_110908
+
+List all files eliminated:
+
+>>>>>>>>>>>> Rename mksurfdata to mksurfdata_map
+ D models/lnd/clm/tools/mksurfdata/*
+
+>>>>>>>>>>>> Move source code to src subdirectory
+ D models/lnd/clm/tools/mkdatadomain/Mkdepends/Srcfiles/Filepath/Makefile/*.F90
+ D models/lnd/clm/tools/mkgriddata/Mkdepends/Srcfiles/Filepath/Makefile/*.F90
+ D models/lnd/clm/tools/interpinic/Mkdepends/Srcfiles/Filepath/Makefile/*.F90
+
+List all files added and what they do:
+
+>>>>>>>>>>>> Rename mksurfdata to mksurfdata_map, create src sub-directory
+ A models/lnd/clm/tools/mksurfdata_map
+ A models/lnd/clm/tools/mksurfdata_map/mksurfdata.pl
+ A models/lnd/clm/tools/mksurfdata_map/mksurfdata_map.namelist
+ A models/lnd/clm/tools/mksurfdata_map/pftdyn_hist_simyr1850-2005.txt
+ A models/lnd/clm/tools/mksurfdata_map/README
+ A models/lnd/clm/tools/mksurfdata_map/src
+ A models/lnd/clm/tools/mksurfdata_map/src/clm_varctl.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/clm_varpar.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/Filepath
+ A models/lnd/clm/tools/mksurfdata_map/src/fileutils.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/Macros.custom
+ A models/lnd/clm/tools/mksurfdata_map/src/Makefile
+ A models/lnd/clm/tools/mksurfdata_map/src/Mkdepends
+ A models/lnd/clm/tools/mksurfdata_map/src/mkdomainMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mkfileMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mkglcmecMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mkgridmapMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mkharvestMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mklaiMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mklanwatMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mkncdio.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mkpftMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mksoilMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mksurfdat.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mkurbanparMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mkvarctl.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mkvarpar.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/mkvocefMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/nanMod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/shr_const_mod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/shr_file_mod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/shr_kind_mod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/shr_log_mod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/shr_string_mod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/shr_sys_mod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/shr_timer_mod.F90
+ A models/lnd/clm/tools/mksurfdata_map/src/Srcfiles
+
+>>>>>>>>>>>> Scripts to use ESMF to create SCRIP mapping files from SCRIP grid files
+ A models/lnd/clm/tools/mkmapdata
+ A models/lnd/clm/tools/mkmapdata/mkmapdata.sh
+ A models/lnd/clm/tools/mkmapdata/mvNimport.sh
+ A models/lnd/clm/tools/mkmapdata/README
+ A models/lnd/clm/tools/mkmapdata/regridbatch.sh
+ A models/lnd/clm/tools/mkmapdata/rmdups.ncl ----- NCL script to remove duplicates
+
+>>>>>>>>>>>> Program to create SCRIP grid files from CLM grid/frac files
+ A models/lnd/clm/tools/mkmapgrids
+ A models/lnd/clm/tools/mkmapgrids/mkmapgrids.csh
+ A models/lnd/clm/tools/mkmapgrids/mkmapgrids.namelist
+ A models/lnd/clm/tools/mkmapgrids/README
+ A models/lnd/clm/tools/mkmapgrids/src
+ A models/lnd/clm/tools/mkmapgrids/src/domainMod.F90
+ A models/lnd/clm/tools/mkmapgrids/src/Filepath
+ A models/lnd/clm/tools/mkmapgrids/src/Macros.custom
+ A models/lnd/clm/tools/mkmapgrids/src/Makefile
+ A models/lnd/clm/tools/mkmapgrids/src/Mkdepends
+ A models/lnd/clm/tools/mkmapgrids/src/mkmapgrids.F90
+ A models/lnd/clm/tools/mkmapgrids/src/nanMod.F90
+ A models/lnd/clm/tools/mkmapgrids/src/shr_file_mod.F90
+ A models/lnd/clm/tools/mkmapgrids/src/shr_kind_mod.F90
+ A models/lnd/clm/tools/mkmapgrids/src/shr_log_mod.F90
+ A models/lnd/clm/tools/mkmapgrids/src/shr_sys_mod.F90
+ A models/lnd/clm/tools/mkmapgrids/src/Srcfiles
+
+>>>>>>>>>>>> Programs to postprocess 1D vector unstructured grids
+ A models/lnd/clm/tools/mkprocdata_map
+ A models/lnd/clm/tools/mkprocdata_map/camhomme
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/mkprocdata_map_in
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/Depends
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/domainMod.F90
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/Filepath
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/fileutils.F90
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/gridmapMod.F90
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/Makefile
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/mkprocdata_map.F90
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/nanMod.F90
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/shr_file_mod.F90
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/shr_kind_mod.F90
+ A models/lnd/clm/tools/mkprocdata_map/camhomme/src/Srcfiles
+ A models/lnd/clm/tools/mkprocdata_map/clm
+ A models/lnd/clm/tools/mkprocdata_map/clm/mkprocdata_map_all
+ A models/lnd/clm/tools/mkprocdata_map/clm/mkprocdata_map_in
+ A models/lnd/clm/tools/mkprocdata_map/clm/mkprocdata_map_wrap
+ A models/lnd/clm/tools/mkprocdata_map/clm/README
+ A models/lnd/clm/tools/mkprocdata_map/clm/src
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/constMod.F90
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/Filepath
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/fileutils.F90
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/fmain.F90
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/gridmapMod.F90
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/Makefile
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/Mkdepends
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/mkprocdata_map.F90
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/nanMod.F90
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/shr_file_mod.F90
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/shr_kind_mod.F90
+ A models/lnd/clm/tools/mkprocdata_map/clm/src/Srcfiles
+
+>>>>>>>>>>>> Make macros files to customize how tools operate (allows all tools to have an identical Makefile)
+ A models/lnd/clm/tools/interpinic/src/Macros.custom
+ A models/lnd/clm/tools/mkgridata/src/Macros.custom
+ A models/lnd/clm/tools/mkdomaindata/src/Macros.custom
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>> Add tests for new grids/tools
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+ M models/lnd/clm/test/system/tests_pretag_jaguarpf
+ M models/lnd/clm/test/system/tests_posttag_mirage
+ M models/lnd/clm/test/system/tests_posttag_intrepid
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+ M models/lnd/clm/test/system/README.testnames --------------- 4/5=mkmapgrids/gen_domain config
+ I=CN spinup, @=ne120np4, #=ne30np4, *=ne16np4 run options
+
+>>>>>>>>>>>> Change build/run for tools, update netcdf on bluefire, update modules on jaguar, add new tests
+ M models/lnd/clm/test/system/TCB.sh ------------- Remove setting of MACFILE not needed
+ M models/lnd/clm/test/system/TCBtools.sh -------- Add src directory, Mkdepends, Macros.custom
+ M models/lnd/clm/test/system/TBLscript_tools.sh - Set CLM_ROOT
+ M models/lnd/clm/test/system/TBLtools.sh -------- Set CLM_ROOT
+ M models/lnd/clm/test/system/TBL.sh ------------- Allow compile-only mode to work
+ M models/lnd/clm/test/system/TSM.sh ------------- Handle multi-instance rpointer files
+ M models/lnd/clm/test/system/test_driver.sh -- Get netcdf4.1.3 working on bluefire, get mirage build working,
+ use glade paths, add ESMFBIN_PATH, update jaguar modules
+ M models/lnd/clm/test/system/input_tests_master - Fill out HM tests, add H#, H@, blJ07, 454, 5@4, 9#2, 953
+ tests, mksurfdata=>mksurfdata_map
+ M models/lnd/clm/test/system/nl_files/clm_spin -- Change case to agree with 1850 MOAR case in CESM scripts
+ M models/lnd/clm/test/system/nl_files/mksrfdt_10x15_irr_1850 ------------ Remove -nomv option
+ M models/lnd/clm/test/system/nl_files/mksrfdt_1x1_vancouverCAN_2000 ----- Remove -nomv option
+ M models/lnd/clm/test/system/nl_files/mksrfdt_1x1_numaIA_mp20irrcr_2000 - Remove -nomv option
+
+>>>>>>>>>>>> Run interpinic and checkin the result
+ M models/lnd/clm/tools/interpinic/clmi.BCN.1949-01-01_10x15_USGS_simyr1850_c100322.nc
+
+>>>>>>>>>>>> Update to glade path, add notes on src subdirectory
+ M models/lnd/clm/tools/interpinic/README ------------------ Add notes about src subdirectory
+ M models/lnd/clm/tools/mkgriddata/mkgriddata.namelist ----- Use glade path
+ M models/lnd/clm/tools/mkgriddata/mkgriddata.regional ----- Use glade path
+ M models/lnd/clm/tools/mkgriddata/mkgriddata.singlept ----- Use glade path
+ M models/lnd/clm/tools/mkgriddata/mkgriddata.cesm_dom ----- Use glade path
+ M models/lnd/clm/tools/mkgriddata/README ------------------ Update with added src sub-directory
+ M models/lnd/clm/tools/mkdatadomain/mkdatadomain.namelist - Use glade path
+ M models/lnd/clm/tools/mkdatadomain/README ---------------- Add notes about src subdirectory
+
+>>>>>>>>>>>> Update tools README information
+ M models/lnd/clm/tools/README.testing ------ Note about src subdirectory required
+ M models/lnd/clm/tools/README -------------- Updated with notes on new process
+ M models/lnd/clm/tools/README.filecopies --- Notes on list of file copies has changed
+
+>>>>>>>>>>>> Changes to tools source codes moved to src subdirectories, Makefile was standardized
+>>>>>>>>>>>> update shr_sys_mod.F90 file to latest csm_share
+ M models/lnd/clm/tools/interpinic/src/interpinic.F90 --- Add metadata on OPT and OMP
+ M models/lnd/clm/tools/interpinic/src/Makefile --------- Standardize
+ M models/lnd/clm/tools/interpinic/src/shr_sys_mod.F90 -- Update
+ M models/lnd/clm/tools/mkdatadomain/src/Makefile ------- Standardize
+ M models/lnd/clm/tools/mkdatadomain/src/Filepath ------- Only use local directory
+ M models/lnd/clm/tools/mkgriddata/src/Makefile --------- Standardize
+ M models/lnd/clm/tools/mkgriddata/src/shr_sys_mod.F90 -- Update
+ M models/lnd/clm/tools/mkgriddata/src/clm_varctl.F90 --- Update
+
+>>>>>>>>>>>> Add RTM mapping file, change some namelist file required logic
+ M models/lnd/clm/bld/listDefaultNamelist.pl - Also get RTM mapping file, and use $CSMDATA if set
+ M models/lnd/clm/bld/build-namelist --------- If can't find a frac file use the datm domain file, only
+ get fatmgrid file for fine-mesh, if RTM on get mapping file, if fine-mesh on and fatmgrid not found
+ use fsurdat file
+ M models/lnd/clm/bld/clm.cpl7.template ------ Clarify documentation for CLM_RTM_RES
+
+>>>>>>>>>>>> Add new files needed for ne30np4/ne120np4 and processing of them
+ M models/lnd/clm/bld/namelist_files/checkdatmfiles.ncl --------- Add test for more resolutions
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml ---- Add: fmapinp_rtm, scripgriddata, mksrf_fglctopo,
+ map, lmask, hgrid
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -- Add: ne30np4/ne120np4 datasets, add
+ 1850-2000 0.47x0.63 fpftdyn file, remove some of the single-point fatmlndgrd files, add mapping files,
+ add lmask/hgrid for different map types
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml - Add ne30np4/ne120np4 domain files
+
+>>>>>>>>>>>> Use llatlon structure in place of lsmlat/lsmlon, required files a bit different, use fsurdat if
+>>>>>>>>>>>> fatmgrid is not given, add RTM mapping file, if fatmlndfrc NOT set set mask/frac to 1.
+ M models/lnd/clm/src/biogeochem/CASAMod.F90 ------------ Use llatlon%ni/nj in place of lsmlon/lat
+ don't allow 1D grids for CASA
+ M models/lnd/clm/src/biogeochem/STATICEcosysDynMod.F90 - Use llatlon structure in place of lsmlon/lat
+ M models/lnd/clm/src/biogeochem/CNDVMod.F90 ------------ Use llatlon structure in place of lsmlon/lat
+ M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90 ---------- asca => ascale
+ M models/lnd/clm/src/main/organicFileMod.F90 ----------- Use llatlon structure in place of lsmlon/lat
+ M models/lnd/clm/src/main/clm_varpar.F90 --------------- Remove lsmlon/lsmlat parameters
+ M models/lnd/clm/src/main/clm_timemanager.F90 ---------- Add some meta-data to restart file, check restart values
+ M models/lnd/clm/src/main/clm_initializeMod.F90 -------- Remove cellarea, surfrd gets area
+ M models/lnd/clm/src/main/fileutils.F90 ---------------- Make iflag required argument
+ M models/lnd/clm/src/main/ndepStreamMod.F90 ------------ Use llatlon structure in place of lsmlon/lat
+ M models/lnd/clm/src/main/iniTimeConst.F90 ------------- Remove start/count lsmlon/lsmlat
+ M models/lnd/clm/src/main/histFileMod.F90 -------------- Add namea grid, replace lsmlon/lat with llatlon
+ handle unstructured grids
+ M models/lnd/clm/src/main/controlMod.F90 --------------- If fatmgrid NOT set, use fsurdat, add fmapinp_rtm
+ if fatmlndfrc NOT set, set mask/frac to 1.
+ M models/lnd/clm/src/main/clm_varctl.F90 --------------- Add fmapinp_rtm
+ M models/lnd/clm/src/main/ncdio_pio.F90 ---------------- Add ncd_inqfdims, io_type public, clmlevel set
+ earlier, remove switchdim from ncd_io_int_var2, handle switchdim in ncd_io_real_var2 read for
+ singlept
+ M models/lnd/clm/src/main/surfrdMod.F90 ---------------- Remove surfrd, add surfrd_get_data hande 1D grids
+ M models/lnd/clm/src/main/domainMod.F90 ---------------- asca=>ascale
+ M models/lnd/clm/src/main/decompMod.F90 ---------------- Add namea remove get_clmlevel_dsize
+ M models/lnd/clm/src/main/clmtype.F90 ------------------ Increase len=8 to len=16
+ M models/lnd/clm/src/riverroute/RtmMod.F90 ------------- Add L2R_Decomp, remove lsmlat/lon for llatlon
+ remove river meta-data
+ M models/lnd/clm/src/riverroute/RtmMapMod.F90 ---------- Pass in fracout
+ M models/lnd/clm/src/biogeophys/UrbanInputMod.F90 ------ Use llatlon in place of lsmlat/lon
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90 -------- asca=>ascale
+
+Summary of testing:
+
+ bluefire: All PASS except...
+015 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:1800 1.9x2.5 gx1v6@1850-2100 -10 cold FAIL! rc= 7
+018 brX51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+020 smHO2 TSM.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 4
+021 erHO2 TER.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -3+-7 cold ................FAIL! rc= 5
+022 brHO2 TBR.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -5+-5 cold ................FAIL! rc= 5
+023 blHO2 TBL.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 4
+028 smC45 TSM.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 4
+029 erC45 TER.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -3+-7 arb_ic ............FAIL! rc= 5
+030 brC45 TBR.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -5+-5 arb_ic ............FAIL! rc= 5
+031 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 4
+032 smH52 TSM.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 96 cold ...............FAIL! rc= 4
+033 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 10+38 cold ............FAIL! rc= 5
+034 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:1800 10x15 USGS@2000 72+72 cold .........FAIL! rc= 5
+035 blH52 TBL.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 48 cold ...............FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+009 blC97 TBL.sh _sc_do clm_spin^nl_urb 20030101:1800 4x5 gx3v7@1850 -6 arb_ic ..................FAIL! rc= 5
+031 erAK4 TER.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 7
+032 brAK4 TBR.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 6
+036 brAK8 TBR.sh _nrsc_ds clm_std^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 6
+037 blAK8 TBL.sh _nrsc_ds clm_std^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 4
+051 blK74 TBL.sh 17p_cndvsc_s clm_std 19971231:1800 1x1_brazil navy -670 arb_ic .................FAIL! rc= 5
+004 blS63 TBL.sh _mec10sc_do clm_glcmec 19980115:1800 1.9x2.5 gx1v6 48 arb_ic ...................FAIL! rc= 7
+005 smQQ4 TSM.sh _nrcnsc_ds clm_drydep 20000214:1800 1x1_brazil navy@2000 -150 cold .............FAIL! rc= 4
+006 erQQ4 TER.sh _nrcnsc_ds clm_drydep 20000214:1800 1x1_brazil navy@2000 -3+-7 cold ............FAIL! rc= 5
+007 brQQ4 TBR.sh _nrcnsc_ds clm_drydep 20000214:1800 1x1_brazil navy@2000 -5+-5 cold ............FAIL! rc= 5
+008 blQQ4 TBL.sh _nrcnsc_ds clm_drydep 20000214:1800 1x1_brazil navy@2000 -150 cold .............FAIL! rc= 4
+012 blH43 TBL.sh 17p_cnsc_do clm_transient_20thC 19790101:1800 1.9x2.5 gx1v6@1850-2000 -10 startup FAIL! rc= 7
+014 bl454 TBLtools.sh mkmapgrids tools__ds namelist .............................................FAIL! rc= 5
+016 bl5@4 TBLtools.sh gen_domain tools__ds namelist .............................................FAIL! rc= 5
+024 bl754 TBLtools.sh mksurfdata_map tools__s namelist ..........................................FAIL! rc= 5
+025 sm774 TSMtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 5
+026 bl774 TBLtools.sh mksurfdata_map tools__ds singlept .........................................FAIL! rc= 4
+033 bl8Z3 TBLrst_tools.sh 21p_cncrpsc_do interpinic clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6FAIL! rc= 5
+037 bl953 TBLscript_tools.sh mkmapdata mkmapdata.sh mkmapdata_if10 ..............................FAIL! rc= 5
+039 bl954 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_10x15_irr_1850^tools__ds ......FAIL! rc= 6
+043 bl974 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds FAIL! rc= 6
+045 bl9T4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+047 bl9C4 TBLscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+ bluefire/CESM testing: All PASS except...
+FAIL ERS_D.f19_g16.IGRCP26CN.bluefire.compare_hist.clm4_0_34
+FAIL ERS_D.f19_g16.IGRCP26CN.bluefire.compare.clm4_0_34
+FAIL ERP.f19_g16.IGRCP60CN.bluefire.compare_hist.clm4_0_34
+FAIL ERP.f19_g16.IGRCP60CN.bluefire.compare.clm4_0_34
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_34
+BFAIL ERB.ne30_g16.I_1948-2004.bluefire.compare.clm4_0_34
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm4_0_34
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm4_0_34
+ bluefire/PTCLM testing: All PASS
+ lynx/pgi testing: All FAIL (build issues)
+ lynx/pgi interactive testing: All PASS except...
+010 erAL4 TER.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -5+-5 cold ................FAIL! rc= 7
+011 brAL4 TBR.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -5+-5 cold ................FAIL! rc= 6
+ lynx/CESM extra testing: ...
+PASS ERS.ne30_g16.I1850CN.lynx_gnu
+PASS ERS.ne30_g16.I1850CN.lynx_intel
+PASS ERS.ne30_g16.I1850CN.lynx_pathscale
+ jaguarpf: All FAIL (system build issue)
+ jaguarpf interactive testing: All PASS up to...
+14 PTCLM.16750_US-UMB_ICN_exit_spinup.PTCLM PASS
+ jaguarpf/CESM testing: All PASS except...
+FAIL PST.f09_g16.I.jaguarpf
+FAIL ERI_D.ne30_g16.I1850CN.jaguarpf
+TFAIL ERH.ne120_g16.I2000CN.jaguarpf.G.235924
+ jaguarpf/CESM additional testing: ...
+FAIL ERH.ne120_g16.ICN.jaguarpf
+PASS SMS.ne120_g16.I.jaguarpf
+FAIL ERS.ne120_g16.I.jaguarpf
+ edinburgh/lf95 interactive testing: All PASS except...
+006 erAL4 TER.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -5+-5 cold ................FAIL! rc= 7
+007 brAL4 TBR.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -5+-5 cold ................FAIL! rc= 6
+008 blAL4 TBL.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -10 cold ..................FAIL! rc= 5
+025 sm978 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_brazil_1850^tools__ds .....FAIL! rc= 6
+026 sm9T4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__dFAIL! rc= 6
+027 sm9C4 TSMscript_tools.sh mksurfdata_map mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds FAIL! rc= 6
+ edinburgh/CESM testing: ...
+PASS ERS.ne30_g16.I1850CN.edinburgh_pgi
+PASS ERS.ne30_g16.I1850CN.edinburgh_lahey
+ edinburgh/PTCLM testing: All PASS up to...
+14 PTCLM.30770_US-UMB_ICN_exit_spinup.PTCLM PASS
+ mirage,storm/ifort interactive testing: All PASS
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_34
+
+Changes answers relative to baseline:
+
+ f09_gx1v6, f19_gx1v6 are similar climate but different because of new RTM mapping files
+
+===============================================================
+===============================================================
+Tag name: clm4_0_34
+Originator(s): erik (Erik Kluzek)
+Date: Thu Aug 18 13:14:01 MDT 2011
+One-line Summary: Bring tcens branch to trunk, fix a few issues
+
+Purpose of changes:
+
+Remove -pftlc to mksurfdata.pl. Correct units of H2OSNOTOP, HC, and HCSOI history fields.
+Remove fget_archdev. Fix single point restarts from Brenden Rogers (although now there
+is a PIO issue). Fix pio error when clm is running at same grid as RTM from Mariana.
+Move Tony's "tcens" DART ensemble branch to trunk. Add save statement to ncdio. Have
+chkdatmfiles.ncl check both grid and frac files.
+Update pio/MCT/scripts/datm/PTCLM/csm_share.
+
+Bugs fixed (include bugzilla ID):
+ 1383 (Remove no-VOC and MAXPFT=4 tests)
+ 1381 (Can't change monthly average files to NOT be one per month)
+ 1372 (pio problem writing out RTM hist fields at RTM res)
+ 1361 (Problem with transient compsets for PTCLM)
+ 1358 (incorrect units for a few history fields)
+ 1025 (SCM mode can NOT use a global finidat file) (partial)
+ 1017 (SCM mode can NOT restart) (partial)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1392 (US-UMB site has some incorrect data)
+ 1393 (error when running Gregorian calendar)
+ 1396 (pio problem reading 2D data with 1st dim=1)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system:
+ Add -ninst_lnd for multiple ensembles of CLM for data assimulation to configure.
+ Build pio and mct as seperate libraries (add -mct_dir/pio_dir to configure).
+
+ Set multiple instances with NINST_LND env variable in env_mach_pes.xml
+ (make sure NTASKS_LND is >= NINST_LND)
+
+Describe any changes made to the namelist:
+ Remove fget_archdev option (don't try to get input files from archival device).
+
+ Set multiple instances of namelists by creating a "user_nl_clm" directory
+
+ Inside of the directory place
+
+ user_nl_clm ---- namelist changes to make for ALL instances
+ user_nl_clm_1 -- namelist changes for first instance
+ user_nl_clm_2 -- namelist changes for first instance
+ user_nl_clm_3 -- namelist changes for third instance
+.
+.
+.
+
+ build-namelist will create a namelist for each instance of the model being run.
+
+List any changes to the defaults for the boundary datasets: domain files updated
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ scripts to scripts4_110812
+ drv to drvseq4_0_04
+ datm to datm8_110811
+ csm_share to share3_110803
+ mct to MCT2_7_0_110804a
+ pio to pio1_3_9
+
+List all files eliminated:
+
+>>>>>>>>>> Delete some 4p and non-VOC tests to reduce testing
+ D models/lnd/clm/test/system/config_files/4p_vorsc_dm
+ D models/lnd/clm/test/system/config_files/17p_scnv_dm
+ D models/lnd/clm/test/system/config_files/4p_vorsc_do
+ D models/lnd/clm/test/system/config_files/17p_scnv_do
+ D models/lnd/clm/test/system/config_files/_scnv_dh
+ D models/lnd/clm/test/system/config_files/4p_vorsc_ds
+ D models/lnd/clm/test/system/config_files/17p_scnv_ds
+ D models/lnd/clm/test/system/config_files/_scnv_dm
+ D models/lnd/clm/test/system/config_files/_scnv_do
+ D models/lnd/clm/test/system/config_files/17p_scnv_m
+ D models/lnd/clm/test/system/config_files/17p_scnv_o
+ D models/lnd/clm/test/system/config_files/17p_scnv_s
+ D models/lnd/clm/test/system/config_files/4p_vorsc_h
+ D models/lnd/clm/test/system/config_files/4p_vorsc_o
+ D models/lnd/clm/test/system/config_files/17p_nrscnv_ds
+ D models/lnd/clm/test/system/config_files/4p_vonrsc_ds
+ D models/lnd/clm/test/system/config_files/4p_vorsc_dh
+ D models/lnd/clm/test/system/config_files/17p_scnv_dh
+
+List all files added and what they do:
+
+>>>>>>>>>> Add tests for multi-instance
+ A models/lnd/clm/test/system/config_files/_nrnil3sc_dh
+ A models/lnd/clm/test/system/config_files/_nrnil3sc_dm
+ A models/lnd/clm/test/system/config_files/_nil3sc_dh
+ A models/lnd/clm/test/system/config_files/_nil3sc_dm
+ A models/lnd/clm/test/system/nl_files/multi_inst/multi_inst
+ A models/lnd/clm/test/system/nl_files/multi_inst/multi_inst_1
+ A models/lnd/clm/test/system/nl_files/multi_inst/multi_inst_2
+ A models/lnd/clm/test/system/nl_files/multi_inst/multi_inst_3
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>> Remove old tests add new multi-instance tests in
+ M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+ M models/lnd/clm/test/system/tests_pretag_bluefire
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_edinburgh
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+ M models/lnd/clm/test/system/tests_posttag_kraken
+ M models/lnd/clm/test/system/tests_posttag_yong
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/tests_pretag_jaguarpf
+ M models/lnd/clm/test/system/tests_pretag_jaguarpf_nompi
+ M models/lnd/clm/test/system/tests_posttag_mirage
+ M models/lnd/clm/test/system/tests_posttag_intrepid_nompi
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+ M models/lnd/clm/test/system/tests_posttag_lynx
+
+>>>>>>>>>> Handle clm* for multi-instance files, remove PFTDATA
+ M models/lnd/clm/test/system/TCB.sh -- Set pio/mct_dir in configure, send MACFILE
+ to make, and create clm exec temp for debug mode
+ M models/lnd/clm/test/system/TBL.sh --- compare clm* for multi-instance history
+ M models/lnd/clm/test/system/TBR.sh --- compare clm* for multi-instance history
+ M models/lnd/clm/test/system/TER.sh --- compare clm* for multi-instance history
+ M models/lnd/clm/test/system/TSMrst_tools.sh - compare clm* for multi-instance history
+ M models/lnd/clm/test/system/TSMpergro.sh - compare clm* for multi-instance history
+ M models/lnd/clm/test/system/TSMscript_tools.sh -- Remove PFTDATA setting
+ M models/lnd/clm/test/system/TSM.sh - compare clm* for multi-instance, cat lnd_in_000? files
+ M models/lnd/clm/test/system/input_tests_master -- add new multi_inst tests remove old
+ M models/lnd/clm/test/system/mknamelist - add quotes
+ M models/lnd/clm/test/system/README - Remove storm
+ M models/lnd/clm/test/system/README.testnames -- Add nil tests remove some 4p no-voc
+ M models/lnd/clm/test/system/TBLrst_tools.sh - compare clm* for multi-instance history
+ M models/lnd/clm/test/system/CLM_runcmnd.sh - remove storm
+ M models/lnd/clm/test/system/test_driver.sh -- use glade paths, add mct/pio_dir
+ add gres setting on jaguarpf, remove PFTDATA, remove storm, update cprnc on lynx
+ M models/lnd/clm/test/system/config_files/README - add nil3 config
+ change x resolution from T31 to f19 (no datasets at T31 for glc)
+
+>>>>>>>>>> Remove PFTDATA
+ M models/lnd/clm/test/system/nl_files/mksrfdt_10x15_irr_1850 --------- Remove PFTDATA
+ M models/lnd/clm/test/system/nl_files/mksrfdt_1x1_brazil_1850-2000 --- Remove PFTDATA
+ M models/lnd/clm/test/system/nl_files/mksrfdt_1x1_vancouverCAN_2000 -- Remove PFTDATA
+ M models/lnd/clm/test/system/nl_files/mksrfdt_1x1_brazil_1850 -------- Remove PFTDATA
+ M models/lnd/clm/test/system/nl_files/mksrfdt_1x1_numaIA_mp20irrcr_2000 -- Remove PFTDATA
+
+>>>>>>>>>> Remove PFTDATA and -p option, add -nobreak to cprnc.pl, print out more info
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl - Remove PFTDATA
+ M models/lnd/clm/tools/ncl_scripts/cprnc.pl ----- Add -nobreak
+ M models/lnd/clm/tools/ncl_scripts/cprnc.ncl ----- Add BREAKONDIFF, print avg/max diffs
+
+>>>>>>>>>> Add NINST_LND and build with new MCT/PIO where need mct_pio_dir
+>>>>>>>>>> Handle user_nl_clm directory for multi-instance
+ M models/lnd/clm/bld/configure - Add ninst_lnd/mct_dir/pio_dir options
+ change to work with new MCT/PIO
+ M models/lnd/clm/bld/config_files/config_definition.xml - add mct_dir/pio_dir/ninst_lnd/ninst_atm
+ M models/lnd/clm/bld/build-namelist - Add ability to write out multiple ensemble
+ namelist files, handle multiple infiles, and infile directories for multiple
+ ensembles
+ M models/lnd/clm/bld/clm.cpl7.template - handle NINST_LND add user_nl_clm directory
+ for multiple ensembles
+
+>>>>>>>>>> Compare grid/frac files, update domain files for datm, handle multiple infiles
+ M models/lnd/clm/bld/namelist_files/checkdatmfiles.ncl - compare grid/frac files
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml - Add glc_pio stuff
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml - New f45, f10, f09,
+ f19, T31 domain files
+ M models/lnd/clm/bld/namelist_files/datm-build-namelist - Be able to handle multiple
+ infiles
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml - Add glc_pio settings
+
+>>>>>>>>>> Handle multi-instance SPMD and files, remove fget_archdev, fix problem
+>>>>>>>>>> of running on RTM grid (mvertens), handle scam restart files (still fails
+>>>>>>>>>> because of PIO problem)
+ M models/lnd/clm/src/biogeochem/CNDVMod.F90 - Add inst_suffix to hv files
+ M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90 - Handle multiple instances and
+ multiple instances lnd_in and lnd_modelio.nml namelist files
+ M models/lnd/clm/src/main/fileutils.F90 -- Remove set_filename and putfil, simplify
+ getfil to NOT do archival retreival
+ M models/lnd/clm/src/main/ndepStreamMod.F90 - Handle multi-instances
+ M models/lnd/clm/src/main/histFileMod.F90 --- Pass mfilt to set_hist_filename, don't
+ require mfilt to be one if nhtfrq=0, only use monthly form of filenames if
+ nhtfrq=0 AND mfilt=1
+ M models/lnd/clm/src/main/restFileMod.F90 - Handle multi-instance files
+ M models/lnd/clm/src/main/controlMod.F90 -- Remove fget_archdev
+ M models/lnd/clm/src/main/clm_varctl.F90 -- Remove fget_archdev, add inst_* vars
+ M models/lnd/clm/src/main/ncdio_pio.F90 --- Fix problem of running on RTM grid, handle
+ multi-instance files, pass vardesc to scam_field_offsets, handle landunit
+ in scam_field_offsets, start/count set for all dims, check that dimension
+ sizes and names are equal in order to share iodesc
+ M models/lnd/clm/src/main/spmdMod.F90 ---- spmd_init has LNDID passed in
+ M models/lnd/clm/src/main/histFldsMod.F90 - Fix units/long_names
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90 - Handle multi-instances and
+ multiple instances lnd_in and lnd_modelio.nml namelist files
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_mct.F90 -- Get LNDID
+
+Summary of testing:
+
+ bluefire: All PASS except TBL tests and...
+>>>>>>> rpointer.lnd_* files empty
+017 erX51 TER.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -3+-2 cold ..............FAIL! rc= 7
+018 brX51 TBR.sh _nil3sc_dh clm_std^multi_inst 20020401:3600 10x15 USGS -2+-3 cold ..............FAIL! rc= 11
+>>>>>>> Build fails
+020 smHO2 TSM.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 4
+021 erHO2 TER.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -3+-7 cold ................FAIL! rc= 5
+022 brHO2 TBR.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -5+-5 cold ................FAIL! rc= 5
+>>>>>>> Build fails
+028 smC45 TSM.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 4
+029 erC45 TER.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -3+-7 arb_ic ............FAIL! rc= 5
+030 brC45 TBR.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -5+-5 arb_ic ............FAIL! rc= 5
+>>>>>>> Build fails
+032 smH52 TSM.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 96 cold ...............FAIL! rc= 4
+033 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 10+38 cold ............FAIL! rc= 5
+034 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:1800 10x15 USGS@2000 72+72 cold .........FAIL! rc= 5
+ bluefire interactive testing: All PASS except... (pio bug 1396)
+031 erAK4 TER.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 7
+032 brAK4 TBR.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 6
+036 brAK8 TBR.sh _nrsc_ds clm_std^nl_ptsmode_ocn 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .........FAIL! rc= 6
+ bluefire/CESM testing: All PASS except...
+FAIL ERS_RLA.f45_f45.I.bluefire -- pio bug 1396
+>>>>>>> Compare fails because of new domain files/new pftdyn
+FAIL SMS_RLA.f45_f45.I.bluefire.compare_hist.clm4_0_33
+FAIL SMS_RLA.f45_f45.I.bluefire.compare.clm4_0_33
+BFAIL ERS_RLA.f45_f45.I.bluefire.generate.clm4_0_34
+BFAIL ERS_RLA.f45_f45.I.bluefire.compare.clm4_0_33
+FAIL SMS_RLB.f45_f45.I.bluefire.compare_hist.clm4_0_33
+FAIL SMS_RLB.f45_f45.I.bluefire.compare.clm4_0_33
+FAIL SMS_ROA.f45_f45.I.bluefire.compare_hist.clm4_0_33
+FAIL SMS_ROA.f45_f45.I.bluefire.compare.clm4_0_33
+FAIL ERS_D.f19_g16.IGRCP26CN.bluefire.compare_hist.clm4_0_33 -- only glc map area?
+FAIL ERP.f19_g16.IGRCP60CN.bluefire.compare_hist.clm4_0_33 ---- only glc map area?
+FAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm4_0_33
+FAIL PST.f10_f10.I20TRCN.bluefire.compare.clm4_0_33
+FAIL PET_PT.f10_f10.I20TRCN.bluefire.compare.clm4_0_33
+FAIL SMS.f10_f10.IRCP45CN.bluefire.compare_hist.clm4_0_33
+FAIL SMS.f10_f10.IRCP45CN.bluefire.compare.clm4_0_33
+ bluefire/PTCLM testing: All PASS
+ jaguarpf interactive testing: All PASS except...
+014 erAK4 TER.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 7
+015 brAK4 TBR.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -5+-5 cold .............FAIL! rc= 6
+ edinburgh/lf95 interactive testing: All PASS, except TBL and... (pio bug 1396)
+006 erAL4 TER.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -5+-5 cold ................FAIL! rc= 5
+007 brAL4 TBR.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -5+-5 cold ................FAIL! rc= 5
+ mirage,storm/ifort interactive testing: All fail -- problem with pio build
+ yong/ifort interactive testing: All fail -- problem with pio build
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_33
+
+Changes answers relative to baseline: No bit-for-bit, except:
+
+ f10_f10, f45_f45, f09_f09, f19_f19, T31_T31, with new domain files
+
+===============================================================
+===============================================================
+Tag name: clm4_0_33
+Originator(s): erik (Erik Kluzek)
+Date: Mon Jul 25 14:34:18 MDT 2011
+One-line Summary: Move changes on release branch over to trunk
+
+Purpose of changes:
+
+Move changes from release branch over to trunk. Update README files and documentation.
+Add new tools testing. Use if masterproc and iulog for output. Move pft mksurfdata into
+inputdata. rh files are t-1. All clm tools namelist items in XML database. Fix tools
+Makefiles. Survey testlists, move tests around. Remove clm* from path, add quotes in test
+scripts, remove CLM_CESMBLD. Remove getfil in mksurfdata, make fdynuse optional. Add
+-nomv to getregional. Cleanup help and improve documentation in scripts and XML database.
+Update datm8/scripts/drv/cism/csm_share. Update pergro data. Changes answers because of
+drv update to cesm1_0_beta22 version (answers are identical to cesm1_0_beta22).
+
+Bugs fixed (include bugzilla ID):
+ 1301 (Add doc on OpenMP fortran tools)
+ 1329 (Add new tool tests)
+ 1338 (Move raw pftdata into inputdata in XML database)
+ 1341 (Error running with crop for a single-point)
+ 1346 (save history namelist to the rh0 files NOT rh1)
+ 1351 (Add all CLM tools namelist items to XML)
+ 1351 (Problem with interpinic on non bluefire machines)
+ 1353 (Huge "ccsm.log" file)
+ 1367 (final_spinup stop time isn't right)
+data)
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1309 (Problem with building T31 rcp pftdyn files)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1339 (Increase streams file limit from 1000 to 2000)
+ 1358 (incorrect units for a few history fields)
+ 1360 (Can't do a ncdump on US-UMB data)
+ 1361 (Problem with transient compsets for PTCLM)
+ 1372 (pio problem writing out RTM hist fields at RTM res)
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1355 (tlai is zero for first two time-steps in CLMSP)
+ 1326 (Crop and irrigation sims give balance check error)
+ 1310 (Restart files different over different tasks)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts/drv/shr/cism/datm
+ scripts to scripts4_110711
+ drv to drvseq3_1_54
+ datm to datm8_110624
+ csm_share to share3_110717
+ cism to cism1_110418
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>>>>>> Add new tests for tools
+ A models/lnd/clm/test/system/TOPtools.sh ------- Ensure different number of threads
+ give the same answers for tools
+ A models/lnd/clm/test/system/TBLscript_tools.sh Comparison test for script tools
+ A models/lnd/clm/test/system/TBLrst_tools.sh --- Comparison test for rst_tools
+ A models/lnd/clm/test/system/config_files/tools__do ---- Add OpenMP debug config
+ A models/lnd/clm/test/system/config_files/17p_nrscnv_ds Add non-RTM debug serial CN
+
+>>>>>>>>>>> Make copies of existing files to inside of
+>>>>>>>>>>> individual tools so that tools can be standalone
+ A models/lnd/clm/tools/mksurfdata/clm_varpar.F90
+ A models/lnd/clm/tools/mksurfdata/shr_file_mod.F90
+ A models/lnd/clm/tools/mksurfdata/shr_timer_mod.F90
+ A models/lnd/clm/tools/mksurfdata/shr_log_mod.F90
+ A models/lnd/clm/tools/mksurfdata/fileutils.F90
+ A models/lnd/clm/tools/mksurfdata/shr_const_mod.F90
+ A models/lnd/clm/tools/mksurfdata/shr_string_mod.F90
+ A models/lnd/clm/tools/mksurfdata/clm_varctl.F90
+ A models/lnd/clm/tools/mksurfdata/shr_sys_mod.F90
+ A models/lnd/clm/tools/mksurfdata/shr_kind_mod.F90
+ A models/lnd/clm/tools/mksurfdata/nanMod.F90
+ A models/lnd/clm/tools/mksurfdata/Mkdepends
+ A models/lnd/clm/tools/mksurfdata/clm_varpar.F90
+ A models/lnd/clm/tools/mkgriddata/mkvarpar.F90
+ A models/lnd/clm/tools/mkgriddata/clm_varctl.F90
+ A models/lnd/clm/tools/mkgriddata/clm_varpar.F90
+ A models/lnd/clm/tools/mkgriddata/shr_sys_mod.F90
+ A models/lnd/clm/tools/mkgriddata/shr_log_mod.F90
+ A models/lnd/clm/tools/mkgriddata/ncdio.F90
+ A models/lnd/clm/tools/mkgriddata/shr_kind_mod.F90
+ A models/lnd/clm/tools/mkgriddata/shr_const_mod.F90
+ A models/lnd/clm/tools/mkgriddata/domainMod.F90
+ A models/lnd/clm/tools/mkgriddata/areaMod.F90
+ A models/lnd/clm/tools/mkgriddata/nanMod.F90
+ A models/lnd/clm/tools/mkgriddata/Mkdepends
+ A models/lnd/clm/tools/mkdatadomain/Mkdepends
+ A models/lnd/clm/tools/mkdatadomain/shr_kind_mod.F90
+ A models/lnd/clm/tools/mkdatadomain/shr_const_mod.F90
+
+>>>>>>>>>>> Add new README files to talk about testing and file copies
+ A models/lnd/clm/tools/README.testing
+ A models/lnd/clm/tools/README.filecopies
+
+>>>>>>>>>>> Add a new chapter for PTCLM
+ A models/lnd/clm/doc/UsersGuide/ptclm.xml
+
+List all existing files that have been modified, and describe the changes:
+
+
+>>>>>>>>>>> Remove CLM_CESMBLD, remove clm* in pathname, add quotes in tests
+>>>>>>>>>>> fix some spelling and unused vars, add new scripts tests
+M models/lnd/clm/test/system/TCB.sh -------------- Remove CLM_CESMBLD
+M models/lnd/clm/test/system/TSMncl_tools.sh ----- Remove clm* in pathname
+M models/lnd/clm/test/system/TBL.sh -------------- Remove clm* in pathname
+M models/lnd/clm/test/system/README.testnames ---- Update for new tests
+M models/lnd/clm/test/system/TBR.sh -------------- Remove unused cfgdir
+M models/lnd/clm/test/system/TCBtools.sh --------- Remove clm* in pathname, fix spelling
+M models/lnd/clm/test/system/TER.sh -------------- Remove unused cfgdir
+M models/lnd/clm/test/system/test_driver.sh ------ Remove CLM_CESMBLD, change temp on lynx
+M models/lnd/clm/test/system/TSMrst_tools.sh ----- Remove unused cfgdir, add quotes
+ in comparison
+M models/lnd/clm/test/system/nl_files/getregional - Add -nomv option in
+M models/lnd/clm/test/system/nl_files/mksrfdt_10x15_irr_1850 - Put -exedir last
+M models/lnd/clm/test/system/input_tests_master --- Add TBLtools, TOPtools,
+ TBLrst_tools, TBLscript_tools tests in
+M models/lnd/clm/test/system/TSMtools.sh ---------- Add CLM_RERUN (needed for
+ TOPtools which runs the same test over for different threads)
+ Remove clm* from path add quotes to some if tests
+M models/lnd/clm/test/system/TBLtools.sh ---------- Remove clm* from path
+
+>>>>>>>>>>> Move tests around a bit
+M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi - Add TOP test
+M models/lnd/clm/test/system/tests_pretag_jaguarpf_nompi
+M models/lnd/clm/test/system/tests_posttag_nompi_regression
+
+>>>>>>>>>>> Make fdynuse file optional and remove use of getfil
+M models/lnd/clm/tools/mksurfdata/mklaiMod.F90 ---- Remove fdynuse file,
+ remove use of getfil, all averaging is the same (no *_pft options)
+M models/lnd/clm/tools/mksurfdata/mksoilMod.F90 --- Remove use of getfil
+M models/lnd/clm/tools/mksurfdata/mkharvestMod.F90 Remove use of getfil
+M models/lnd/clm/tools/mksurfdata/creategridMod.F90 Remove use of getfil
+M models/lnd/clm/tools/mksurfdata/mkglcmecMod.F90 - Remove use of getfil
+M models/lnd/clm/tools/mksurfdata/mkvocefMod.F90 -- Remove use of getfil
+M models/lnd/clm/tools/mksurfdata/mkglacierMod.F90 Remove use of getfil
+M models/lnd/clm/tools/mksurfdata/mkurbanparMod.F90 Remove use of getfil
+M models/lnd/clm/tools/mksurfdata/areaMod.F90 ----- Remove _pft methods
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 ---- Make mksrf_fdynuse optional
+ remove use of getfil
+M models/lnd/clm/tools/mksurfdata/mklanwatMod.F90 - Remove use of getfil
+M models/lnd/clm/tools/mksurfdata/mkpftMod.F90 ---- Remove use of getfil
+M models/lnd/clm/tools/mksurfdata/pftdyn_hist_simyr1850-2005.txt -- new paths
+
+>>>>>>>>>>> Updated RMS differences, and add -nomv option to getregional_datasets
+M models/lnd/clm/tools/ncl_scripts/RMSbluefire.dat ---------- Updated RMS differences
+M models/lnd/clm/tools/ncl_scripts/getregional_datasets.pl -- Add -nomv option
+M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl - Add NOMV env var
+
+>>>>>>>>>>> Fix bug 1351
+M models/lnd/clm/tools/interpinic/interpinic.F90 -------- Make sure: htop_var,
+ fpcgrid_var, present_var, itypveg_var are set
+M models/lnd/clm/tools/interpinic/interpinic.runoptions - Update the input file to use
+
+>>>>>>>>>>> Work on formatting, remove use of getfils
+M models/lnd/clm/tools/mkgriddata/mkgriddata.F90 - Work on formatting a bit,
+ removed use of fileutils
+M models/lnd/clm/tools/mkgriddata/areaMod.F90 ---- Remove use of getfil
+M models/lnd/clm/tools/mkdatadomain/create_domain.F90 - Work on output write
+
+>>>>>>>>>>> Update documentation in README files
+M models/lnd/clm/test/system/config_files/README
+M models/lnd/clm/bld/namelist_files/use_cases/README
+M models/lnd/clm/test/system/README
+M models/lnd/clm/tools/mkgriddata/README
+M models/lnd/clm/tools/mkdatadomain/README
+M models/lnd/clm/tools/interpinic/README
+M models/lnd/clm/tools/README.testing
+M models/lnd/clm/tools/README
+M models/lnd/clm/bld/README
+
+>>>>>>>>>>> Sync up tools Makefile, make Filepath standalone (only includes .)
+>>>>>>>>>>> Work on formatting, set OPT default, add TOOLROOT default
+>>>>>>>>>>> compare to null instead of strip
+M models/lnd/clm/tools/mksurfdata/Makefile
+M models/lnd/clm/tools/mksurfdata/Filepath
+M models/lnd/clm/tools/mksurfdata/Srcfiles - Remove spmdMod,fileutils,
+ abortutils/shr_cal_mod, ESMF, mpi, shr_mpi_mod
+M models/lnd/clm/tools/interpinic/Makefile
+M models/lnd/clm/tools/mkgriddata/Filepath
+M models/lnd/clm/tools/mkgriddata/Srcfiles - Remove fileutils,spmdMod,
+ abortutils,shr_timer_mod,shr_mpi_mod,shr_file_mod,MPI
+M models/lnd/clm/tools/mkgriddata/Makefile
+M models/lnd/clm/tools/mkdatadomain/Filepath
+M models/lnd/clm/tools/mkdatadomain/Makefile
+
+>>>>>>>>>>> Cleanup help and documentation
+M models/lnd/clm/bld/configure --------------- Cleanup help, remove cesm_bld
+M models/lnd/clm/bld/queryDefaultNamelist.pl - Cleanup help
+M models/lnd/clm/bld/listDefaultNamelist.pl -- Add more description, documentation
+ set maxpft for crop
+M models/lnd/clm/bld/build-namelist ---------- Cleanup help, add papi_inparm
+ remove some list options for non-CLM vars
+M models/lnd/clm/bld/clm.cpl7.template ------- Remove clm* in path
+ remove warning about CAM and CLM dtime, remove comment about *.h files
+
+>>>>>>>>>>> Make sure all 1x1 files are in supported single-point res
+M models/lnd/clm/bld/config_files/config_definition.xsl - Add CLM in descriptions
+M models/lnd/clm/bld/config_files/config_definition.xml - Add
+ 1x1_numaIA,1x1_smallvilleIA to supported single-point resolutions
+ cleanup spelling and a few descriptions
+
+>>>>>>>>>>> Work on documentation descriptions, document all tools namelist items
+M models/lnd/clm/bld/namelist_files/checklatsfiles.ncl --------- Add doc, continue
+ if file NOT found rather than abort
+M models/lnd/clm/bld/namelist_files/checkdatmfiles.ncl --------- Add doc, continue
+ if file NOT found rather than abort
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml ---- Work on descriptions,
+ add in all mksurfdata/mkdatadomain/mkgriddata namelist vars, add in
+ new driver namelist vars (so documented in table in UG),
+ add HCN,CH3CN to drydep
+M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml - Add datasource
+ small formatting change
+M namelist_files/namelist_defaults_drv.xml --------------------- Fix final_spinup
+ (bug 1367)
+M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl ------ Add attributes
+ to output for: crop, irrig, ad_spinup, and source
+M models/lnd/clm/bld/namelist_files/namelist_definition.xsl ---- Work to improve
+ output formatting of table
+M models/lnd/clm/bld/namelist_files/datm-build-namelist -------- Cleanup help / source
+M models/lnd/clm/bld/namelist_files/checktopofiles.ncl --------- Change res list,
+ add documentation, continue rather than abort if file not found
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml Update paths
+ to landuse for mksurfdata as all in repo now, add default values for
+ other mksurfdata namelist items
+
+>>>>>>>>>>> Update documentation for latest release
+M models/lnd/clm/doc/UsersGuide/trouble_shooting.xml
+M models/lnd/clm/doc/UsersGuide/single_point.xml
+M models/lnd/clm/doc/UsersGuide/special_cases.xml
+M models/lnd/clm/doc/UsersGuide/tools.xml
+M models/lnd/clm/doc/UsersGuide/limitLineLen.pl
+M models/lnd/clm/doc/UsersGuide/preface.xml
+M models/lnd/clm/doc/UsersGuide/clm_ug.xml
+M models/lnd/clm/doc/UsersGuide/adding_files.xml
+M models/lnd/clm/doc/UsersGuide/appendix.xml
+M models/lnd/clm/doc/UsersGuide/custom.xml
+M models/lnd/clm/doc/UsersGuide/Makefile
+
+>>>>>>>>>>> Update documentation for latest release
+M models/lnd/clm/doc/Quickstart.userdatasets
+M models/lnd/clm/doc/IMPORTANT_NOTES
+M models/lnd/clm/doc/Quickstart.GUIDE
+M models/lnd/clm/doc/CodeReference/Filepath
+M models/lnd/clm/doc/KnownLimitations
+M models/lnd/clm/doc/KnownBugs
+M models/lnd/clm/doc/README
+M README
+
+>>>>>>>>>>> Add if masterproc, work on documentation, use iulog NOT unit 6
+M models/lnd/clm/src/biogeochem/CropRestMod.F90 ---------- Add if masterproc
+M models/lnd/clm/src/biogeochem/CASAMod.F90 -------------- Cleanup endrun statement
+M models/lnd/clm/src/biogeochem/STATICEcosysDynMod.F90 --- Use iulog NOT unit 6
+M models/lnd/clm/src/biogeochem/CNDVMod.F90 -------------- Use iulog NOT unit 6
+M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90 ------- Use iulog NOT unit 6
+M models/lnd/clm/src/biogeochem/CNAllocationMod.F90 ------ Ensure arepr is initialized
+ (bug 1341)
+M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90 - Comment out debug write
+M models/lnd/clm/src/biogeochem/CNDVEstablishmentMod.F90 - Use iulog NOT unit 6
+
+>>>>>>>>>>> Add if masterproc, work on documentation, rh files are t-1
+M models/lnd/clm/src/main/fileutils.F90 --- Add if masterproc
+M models/lnd/clm/src/main/pftdynMod.F90 --- Add if masterproc (fix bug 1353)
+M models/lnd/clm/src/main/histFileMod.F90 - Add if masterproc, rh files are t-1
+ (bug 1346)
+M models/lnd/clm/src/main/clmtype.F90 ----- Work on documentation
+
+
+Summary of testing:
+
+ bluefire: All PASS except... (up to 43)
+004 blC91 TBL.sh _sc_dh clm_std^nl_urb 20030101:3600 4x5 gx3v7 -6 arb_ic ........................FAIL! rc= 5
+008 blTZ1 TBL.sh 21p_cncrpsc_dh clm_stdIgnYr^nl_crop 20020401:3600 10x15 USGS -10 cold ..........FAIL! rc= 5
+011 blD91 TBL.sh _persc_dh clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 5
+015 blEH1 TBL.sh 4p_vorsc_dh clm_std^nl_urb 20021231:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic ......FAIL! rc= 5
+019 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:1800 1.9x2.5 gx1v6@1850-2100 -10 cold FAIL! rc= 5
+023 blHO2 TBL.sh 17p_cnsc_dm clm_drydep 20000704:1800 10x15 USGS@2000 -90 cold ..................FAIL! rc= 5
+027 blHo1 TBL.sh 17p_cnsc_dh clm_drydep 20000101:1800 10x15 USGS@2000 -10 cold ..................FAIL! rc= 5
+028 smG41 TSM.sh 17p_scnv_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ...........FAIL! rc= 10
+029 erG41 TER.sh 17p_scnv_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic .........FAIL! rc= 5
+030 brG41 TBR.sh 17p_scnv_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic .........FAIL! rc= 5
+031 blG41 TBL.sh 17p_scnv_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ...........FAIL! rc= 4
+035 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 5
+039 blG61 TBL.sh _scnv_dh clm_std^nl_urb 20020101:1800 1.9x2.5 gx1v6 48 startup .................FAIL! rc= 5
+043 blH52 TBL.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 48 cold ...............FAIL! rc= 5
+ bluefire interactive testing: All PASS except TBL tests
+ bluefire/CESM testing: All PASS except compare tests to clm4_0_32
+ bluefire/CESM extra testing (show that answers are identical with cesm1_0_beta22):
+PASS ERI.T31_g37.IGCN.bluefire
+PASS ERI.T31_g37.IGCN.bluefire.compare.cesm1_0_alpha22a
+PASS ERS.T31_g37.ITEST.bluefire
+PASS ERS.T31_g37.ITEST.bluefire.compare_hist.cesm1_0_alpha22a
+PASS ERS.T31_g37.ITEST.bluefire.compare.cesm1_0_alpha22a
+PASS ERS.f19_g16.IGCN.bluefire
+PASS ERS.f19_g16.IGCN.bluefire.compare_hist.cesm1_0_alpha22a
+PASS ERS.f19_g16.IGCN.bluefire.compare.cesm1_0_alpha22a
+PASS ERS.f45_g37.I4804.bluefire
+PASS ERS.f45_g37.I4804.bluefire.compare_hist.cesm1_0_alpha22a
+PASS ERS.f45_g37.I4804.bluefire.compare.cesm1_0_alpha22a
+PASS SMS_RLA.f45_f45.I.bluefire
+PASS SMS_RLA.f45_f45.I.bluefire.compare_hist.cesm1_0_alpha22a
+PASS SMS_RLA.f45_f45.I.bluefire.compare.cesm1_0_alpha22a
+PASS SMS_RLB.f45_f45.ITEST.bluefire
+PASS SMS_RLB.f45_f45.ITEST.bluefire.compare_hist.cesm1_0_alpha22a
+PASS SMS_RLB.f45_f45.ITEST.bluefire.compare.cesm1_0_alpha22a
+ jaguarpf interactive testing: All PASS except...
+004 blA74 TBL.sh _nrsc_ds clm_std^nl_urb 20030101:1800 1x1_brazil navy -10 arb_ic ...............FAIL! rc= 5
+008 blTZ3 TBL.sh 21p_cncrpsc_do clm_stdIgnYr^nl_crop 20020401:3600 10x15 USGS -10 cold ..........FAIL! rc= 5
+012 blVU4 TBL.sh 21p_cncrpsc_ds clm_stdIgnYr^nl_crop 20020101:3600 1x1_smallvilleIA test -1100 cold FAIL! rc= 5
+014 blAK4 TBL.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -10 cold ...............FAIL! rc= 5
+015 smG43 TSM.sh 17p_scnv_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ...........FAIL! rc= 10
+016 erG43 TER.sh 17p_scnv_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic .........FAIL! rc= 5
+017 brG43 TBR.sh 17p_scnv_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic .........FAIL! rc= 5
+018 blG43 TBL.sh 17p_scnv_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ...........FAIL! rc= 4
+022 blK74 TBL.sh 17p_cndvsc_s clm_std 19971231:1800 1x1_brazil navy -670 arb_ic .................FAIL! rc= 5
+026 blSn3 TBL.sh _mec10sc_do clm_transient_glcMEC_rcp8.5 20331231:1800 1.9x2.5 gx1v6@1850-2100 48 arFAIL! rc= 5
+030 blQQ4 TBL.sh _nrcnsc_ds clm_drydep 20000214:1800 1x1_brazil navy@2000 -150 cold .............FAIL! rc= 5
+034 blH43 TBL.sh 17p_cnsc_do clm_transient_20thC 19790101:1800 1.9x2.5 gx1v6@1850-2000 -10 startup FAIL! rc= 5
+038 blS63 TBL.sh _mec10sc_do clm_glcmec 19980115:1800 1.9x2.5 gx1v6 48 arb_ic ...................FAIL! rc= 5
+040 bl8Z3 TBLrst_tools.sh 21p_cncrpsc_do interpinic clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6FAIL! rc= 5
+042 bl954 TBLscript_tools.sh mksurfdata mksurfdata.pl mksrfdt_10x15_irr_1850^tools__ds ..........FAIL! rc= 6
+046 bl9T4 TBLscript_tools.sh mksurfdata mksurfdata.pl mksrfdt_1x1_numaIA_mp20irrcr_2000^tools__ds FAIL! rc= 6
+050 bl9C4 TBLscript_tools.sh mksurfdata mksurfdata.pl mksrfdt_1x1_vancouverCAN_2000^tools__ds ...FAIL! rc= 6
+ edinburgh/lf95 interactive testing: All PASS except...
+006 blAL4 TBL.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -10 cold ..................FAIL! rc= 7
+014 blOC4 TBL.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:3600 1x1_vancouverCAN navy 331 arb_ic ...FAIL! rc= 7
+ edinburgh/lf95 testing: All PASS except TBL tests
+ lynx/intel testing: All PASS except TBL tests
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_32
+
+Changes answers relative to baseline: Yes (Driver change)
+
+ But, answers are identical to cesm1_0_beta22 where the driver change
+ was already in effect.
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers:
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change: larger than roundoff/same climate
+
+===============================================================
+===============================================================
+Tag name: clm4_0_32
+Originator(s): erik (Erik Kluzek)
+Date: Thu May 19 15:18:49 MDT 2011
+One-line Summary: Make I1850SPINUPCN compset use MOAR data, various bug fixes, work on test lists
+
+Purpose of changes:
+
+Update datm and scripts so can run I1850SPINUPCN compset with MOAR data. Fix CN units.
+Fix some documentation for crop. Add attribute that notes that flux variables are NOT
+multiplied by landfrac. Change align year for I4804 and I4804CN compsets, add append/warn
+option to xmlchange. Some clarifications to clm namelist. build-namelist can run list
+options without a config_cache file. Add comment/title to output files. Remove the
+2.65x3.33 grid, no longer supported. Work on test lists a bit.
+
+Bugs fixed (include bugzilla ID):
+ 1337 (have ISPINUPCN compset use MOAR data)
+ 1336 (evaluate CLM testing for release)
+ 1327 (correct documentation of CN variable units)
+ 1158 (make 4804 compsets consistent with 1850 etc.)
+ 1151 (remove co2_ppmv when co2_type is NOT constant)
+ 1140 (build-namelist -list options die with config file)
+ 1108 (have append/warn mode for xmlchange)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1367 (final_spinup stop time isn't right)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: critical
+
+Describe any changes made to build system:
+
+ I1850SPINUP compset changed to use MOAR data
+
+ DATM_CPL_* variables added to env_conf.xml to set casename, years run over
+ I4804 compsets ALIGN year changed to agree with doc. and I1850 compsets
+
+ New options to xmlchange -- allow you to append (-a) to the end of something already
+ there and another option (-w) warn you and abort if already set.
+
+Describe any changes made to the namelist: Add options to build-namelist
+
+ Add -co2_ppmv and -rtm_tstep options to set co2_ppmv when co2_type is constant
+ and set rtm time-step when RTM is on.
+
+ This way co2_ppmv and rtm_nsteps do NOT show up in the namelist if they aren't needed.
+
+List any changes to the defaults for the boundary datasets: Remove 2.65x3.33 datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, slevis (units change), sacks (crop doc)
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, csm_share, datm
+ scripts to scripts4_110517
+ csm_share to share3_110516
+ datm to datm8_110517b
+
+List all files eliminated:
+
+>>>>>>>>>>>>> Remove as no longer needed to get lists for documentation
+ D models/lnd/clm/doc/UsersGuide/config_cache.xml
+
+List all files added and what they do:
+
+>>>>>>>>>>>>> Add new test configurations
+ A models/lnd/clm/test/system/config_files/_scnv_dm
+ A models/lnd/clm/test/system/config_files/17p_scnv_dm
+ A models/lnd/clm/test/system/config_files/17p_scnv_ds
+ A models/lnd/clm/test/system/config_files/17p_nrcnsc_do
+ A models/lnd/clm/test/system/config_files/17p_nrcnsc_ds
+ A models/lnd/clm/test/system/config_files/17p_scnv_m
+ A models/lnd/clm/test/system/config_files/17p_scnv_o
+ A models/lnd/clm/test/system/config_files/17p_scnv_s
+ A models/lnd/clm/test/system/config_files/17p_cnnfsc_dh -- turn on NOFIRE
+ A models/lnd/clm/test/system/config_files/17p_cnnfsc_dm -- turn on NOFIRE
+ A models/lnd/clm/test/system/config_files/17p_cnnfsc_do -- turn on NOFIRE
+ A models/lnd/clm/test/system/config_files/21p_nrcncrpsc_s
+ A models/lnd/clm/test/system/config_files/21p_nrcncrpsc_ds
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/config_files/README
+
+>>>>>>>>>>>>> Change tests a bit to make them more consistent with naming convention
+>>>>>>>>>>>>> make sure tests are covered, and have no-RTM tests for single-point
+ M models/lnd/clm/test/system/README.testnames
+ M models/lnd/clm/test/system/nl_files/clm_spin --- Use MOAR data on bluefire
+ M models/lnd/clm/test/system/input_tests_master
+
+>>>>>>>>>>>>> Change test lists
+ M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+ M models/lnd/clm/test/system/tests_pretag_bluefire
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_edinburgh
+ M models/lnd/clm/test/system/tests_posttag_kraken
+ M models/lnd/clm/test/system/tests_posttag_yong
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/tests_pretag_jaguarpf
+ M models/lnd/clm/test/system/tests_pretag_jaguarpf_nompi
+ M models/lnd/clm/test/system/tests_posttag_mirage
+ M models/lnd/clm/test/system/tests_posttag_intrepid
+ M models/lnd/clm/test/system/tests_posttag_intrepid_nompi
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+ M models/lnd/clm/test/system/tests_posttag_lynx
+
+>>>>>>>>>>>>> Add -co2_ppmv, and -rtm_tstep options to build-namelist
+>>>>>>>>>>>>> Don't require config file for build-namelist list options
+>>>>>>>>>>>>> Remove 2.65x3.33 files, add capability to handle MOAR data
+ M models/lnd/clm/bld/build-namelist
+ M models/lnd/clm/bld/clm.cpl7.template
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml
+ M models/lnd/clm/bld/namelist_files/datm-build-namelist
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+
+>>>>>>>>>>>>> Correct documentation of units from kg to g
+ M models/lnd/clm/src/biogeochem/CNMRespMod.F90
+ M models/lnd/clm/src/biogeochem/CNPhenologyMod.F90 -- Also documentation changes
+ from Bill Sacks
+ M models/lnd/clm/src/biogeochem/CNDecompMod.F90
+ M models/lnd/clm/src/biogeochem/CNAllocationMod.F90
+ M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90
+
+>>>>>>>>>>>>> Add title and comment attributes to output files
+ M models/lnd/clm/src/main/histFileMod.F90
+ M models/lnd/clm/src/main/restFileMod.F90
+
+Summary of testing:
+
+ bluefire interactive testing: All PASS up to...
+006 smC97 TSM.sh _sc_do clm_spin^nl_urb 20030101:1800 4x5 gx3v7@1850 -6 arb_ic ..................FAIL! rc= 10
+ bluefire/CESM testing: All PASS except...
+BFAIL PST.f45_g37.I1850CN.bluefire.compare.clm4_0_31
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_31
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_31
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_31
+Originator(s): erik (Erik Kluzek)
+Date: Fri May 13 17:11:38 MDT 2011
+One-line Summary: Fix answers for transient_CN, fix interpinic
+
+Purpose of changes:
+
+Fix interpinic test with finidat files. Fix CNPrecisionControl so answers with transient
+CN are same as clm4_0_26 without crop.
+
+Bugs fixed (include bugzilla ID):
+ 1335 (transient_CN sometimes different than clm4_0_26)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1318 (interpinic has trouble with new restart files)
+ 1319 (interpinic doesn't interpolate *_PERIOD)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1276 (urbanc_alpha site does not work)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1367 (final_spinup stop time isn't right)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>> Add transient 20th Century namelist config
+ A models/lnd/clm/test/system/nl_files/clm_transient_20thC
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>> Add f19 transient-CN tests that start in 1979 (which showed bug 1335)
+ M models/lnd/clm/test/system/input_tests_master - Add f19 transient CN tests
+ Also make glc_nec interpinic test run for f09@1850
+ M models/lnd/clm/test/system/README.testnames --- Add run-4 for f19 transient CN
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi ------ Add f19 transient_CN
+test
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression - Add f19 transient_CN
+test
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression -- Add f19 transient_CN
+test
+ M models/lnd/clm/test/system/tests_pretag_jaguarpf_nompi ------ Add f19 transient_CN
+test
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression --- Add f19 transient_CN
+test
+
+>>>>>>>>>>>>>> Put changes from clm4_0_27 back in except those that cause runs to fail
+ M models/lnd/clm/tools/interpinic/interpinic.F90
+ M models/lnd/clm/tools/interpinic/Srcfiles ------ Add shr_const_mod.F90 back in
+
+>>>>>>>>>>>>>>
+ M models/lnd/clm/src/biogeochem/CNPrecisionControlMod.F90 - Add if ( crop_prog ) to
+ a crop change that needed it
+ M models/lnd/clm/src/biogeochem/CNCStateUpdate1Mod.F90 ---- Remove pft_ctrunc not used
+ M models/lnd/clm/src/biogeochem/CNC13StateUpdate1Mod.F90 -- Remove pft_ctrunc not used
+ M models/lnd/clm/src/biogeochem/DryDepVelocity.F90 -------- Set wesveg for crop_prog
+
+Summary of testing:
+
+ bluefire extra interactive testing:
+001 bl853 TBLtools.sh interpinic tools__o runoptions ............................................PASS
+001 sm893 TSMrst_tools.sh _sc_do interpinic clm_std^nl_urb 20000101:1800 1.9x2.5 gx1v6 4x5 gx3v7 -1 PASS
+001 sm857 TSMrst_tools.sh 17p_cnsc_o interpinic clm_std^nl_urb 18500101:1800 1.9x2.5 gx1v6@1850 10x1PASS
+002 sm8Z3 TSMrst_tools.sh 21p_cncrpsc_do interpinic clm_stdIgnYr^nl_crop 20000101:1800 1.9x2.5 gx1v6PASS
+003 sm813 TSMrst_tools.sh 17p_cndvsc_do interpinic clm_std^nl_urb 18500101:1800 1.9x2.5 gx1v6@1850 4PASS
+>>>>> This test compares to clm4_0_30 and rightly shows that answers change
+001 blH43 TBL.sh 17p_cnsc_do clm_transient_20thC 19790101:1800 1.9x2.5 gx1v6@1850-2000 -10 startup FAIL! rc= 7
+ bluefire/CESM testing: All PASS except... (why did these comparisons PASS in clm4_0_27)
+BFAIL ERP.f19_g16.IGRCP60CN.bluefire.compare.clm4_0_30
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm4_0_30
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm4_0_30
+ bluefire/CESM testing compared to cesm1_0_beta19:
+PASS SMS_D.f09_g16.BRCP45CN.bluefire
+PASS SMS_D.f09_g16.BRCP45CN.bluefire.compare_hist.cesm1_0_beta19
+PASS SMS_D.f09_g16.BRCP45CN.bluefire.compare.cesm1_0_beta19
+PASS ERS.f09_f09.FAMIPCN.bluefire
+PASS ERS.f09_f09.FAMIPCN.bluefire.compare_hist.cesm1_0_beta19
+PASS ERS.f09_f09.FAMIPCN.bluefire.compare.cesm1_0_beta19
+PASS ERS.f19_f19.FAMIPC5.bluefire
+PASS ERS.f19_f19.FAMIPC5.bluefire.compare_hist.cesm1_0_beta19
+PASS ERS.f19_f19.FAMIPC5.bluefire.compare.cesm1_0_beta19
+PASS ERS.f19_f19.FAMIPCN.bluefire
+PASS ERS.f19_f19.FAMIPCN.bluefire.compare_hist.cesm1_0_beta19
+PASS ERS.f19_f19.FAMIPCN.bluefire.compare.cesm1_0_beta19
+PASS ERS.f09_g16.BRCP45CN.lynx_pgi
+PASS ERS.f09_g16.BRCP45CN.lynx_pgi.compare_hist.cesm1_0_beta19
+PASS ERS.f09_g16.BRCP45CN.lynx_pgi.compare.cesm1_0_beta19
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_30
+
+Changes answers relative to baseline: Some transient_CN tests are different
+ because of bug 1335 introduced in clm4_0_27
+
+ With these changes answers are the same as clm4_0_26
+
+===============================================================
+===============================================================
+Tag name: clm4_0_30
+Originator(s): erik (Erik Kluzek)
+Date: Wed May 11 14:32:19 MDT 2011
+One-line Summary: New finidat/fsurdat files for T31
+
+Purpose of changes:
+
+Externals update, fix some PTCLM problems. New finidat/fsurdat files for T31, make sure
+works.
+
+Bugs fixed (include bugzilla ID):
+ 1279 (Latest version of PTCLM requires python2.5)
+ 1248 (PTCLM can only go to 2005)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1276 (urbanc_alpha site does not work)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1318 (interpinic has trouble with new restart files)
+ 1319 (interpinic doesn't interpolate *_PERIOD)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1335 (transient_CN sometimes different than clm4_0_26)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ New finidat files for T31
+ New fsurdat file for T31@2000
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, mct, pio
+
+ scripts to scripts4_110511
+ mct to MCT2_7_0_110504a
+ pio to pio1_3_0
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+ A models/lnd/clm/test/system/config_files/17p_cnsc_h
+ A models/lnd/clm/test/system/config_files/17p_cnsc_o
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/README.testnames --- Add R f19 rcp4.5 resol
+ M models/lnd/clm/test/system/input_tests_master - Make some tests startup, add
+ some rcp tests, change some tests from T31 to f19
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Add new finidat
+ files for T31@1850/2000, add new surdata file for T31@2000, remove empty
+ half-degree pftdyn file
+
+Summary of testing:
+
+ bluefire: All PASS except...
+024 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+025 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+026 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+027 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+049 smLI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+050 erLI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+051 brLI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+052 blLI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+054 erJ61 TER.sh 4p_casasc_dh clm_std^nl_urb 20021230:1800 1.9x2.5 gx1v6 10+38 cold .............FAIL! rc= 5
+055 brJ61 TBR.sh 4p_casasc_dh clm_std^nl_urb_br 20021230:1800 1.9x2.5 gx1v6 72+72 cold ..........FAIL! rc= 5
+056 blJ61 TBL.sh 4p_casasc_dh clm_std^nl_urb 20021230:1800 1.9x2.5 gx1v6 48 cold ................FAIL! rc= 4
+ bluefire interactive testing: All PASS
+ bluefire extra interactive testing:
+001 smE13 TSM.sh 17p_vorsc_do clm_std^nl_urb 20021230:1800 48x96 gx3v7 96 startup ...............PASS
+001 smH13 TSM.sh 17p_cnsc_do clm_ndepdyn^nl_cn_conly 20020101:1800 48x96 gx3v7@1850-2000 96 startup PASS
+ bluefire/CESM testing: All PASS except...
+FAIL SMS_RLA.f45_f45.I.bluefire.compare_hist.clm4_0_29
+FAIL SMS_RLA.f45_f45.I.bluefire.compare.clm4_0_29
+FAIL SMS_RLB.f45_f45.I.bluefire.compare_hist.clm4_0_29
+FAIL SMS_RLB.f45_f45.I.bluefire.compare.clm4_0_29
+FAIL SMS_ROA.f45_f45.I.bluefire.compare_hist.clm4_0_29
+FAIL SMS_ROA.f45_f45.I.bluefire.compare.clm4_0_29
+FAIL ERS_D.f45_g37.I.bluefire.compare_hist.clm4_0_29
+FAIL ERS_D.f45_g37.I.bluefire.compare.clm4_0_29
+BFAIL ERS_E.T31_g37.I1850.bluefire.compare.clm4_0_29
+BFAIL ERI.T31_g37.IG1850.bluefire.compare.clm4_0_29
+BFAIL ERS_D.f19_g16.IGRCP26CN.bluefire.compare.clm4_0_29
+
+ bluefire/CESM rcps extra testing: All PASS...
+PASS SMS.f09_g16.IRCP26CN.bluefire
+PASS SMS.f09_g16.IRCP45CN.bluefire
+PASS SMS.f09_g16.IRCP60CN.bluefire
+PASS SMS.f09_g16.IRCP85CN.bluefire
+PASS SMS.f09_g16.IGRCP26CN.bluefire
+PASS SMS.f09_g16.IGRCP45CN.bluefire
+PASS SMS.f09_g16.IGRCP60CN.bluefire
+PASS SMS.f09_g16.IGRCP85CN.bluefire
+PASS SMS.f19_g16.IRCP26CN.bluefire
+PASS SMS.f19_g16.IRCP45CN.bluefire
+PASS SMS.f19_g16.IRCP60CN.bluefire
+PASS SMS.f19_g16.IGRCP45CN.bluefire
+PASS SMS.f19_g16.IGRCP85CN.bluefire
+
+ bluefire/PTCLM testing: All PASS
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_29
+
+Changes answers relative to baseline: no bit-for-bit (except T31 with new files)
+
+===============================================================
+===============================================================
+Tag name: clm4_0_29
+Originator(s): erik (Erik Kluzek)
+Date: Thu May 5 14:19:04 MDT 2011
+One-line Summary: Backout interpinic changes to one that works
+
+Purpose of changes:
+
+Backout interpinic to Mariana's non2dgrid version. Won't work for new
+files (have to remove fields to get it to work). Adds back in bugs 1318 and 1319.
+Add more comparison tests for tools and add cprnc.pl/ncl scripts to compare files that
+don't have a time-axis.
+
+Bugs fixed (include bugzilla ID):
+ 1328 (interpinic gives bad results that can NOT be used!)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1276 (urbanc_alpha site does not work)
+ 1279 (Latest version of PTCLM requires python2.5)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1318 (interpinic has trouble with new restart files)
+ 1319 (interpinic doesn't interpolate *_PERIOD)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1335 (transient_CN sometimes different than clm4_0_26)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>> Add scripts to compare two NetCDF files and report if different
+>>>>>>>>>>>>>> This mimic's the cprnc program, but also works on files without
+>>>>>>>>>>>>>> a time coordinate. For big files it's considerably slower as well.
+ A models/lnd/clm/tools/ncl_scripts/cprnc.pl
+ A models/lnd/clm/tools/ncl_scripts/cprnc.ncl
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>> Add tool comparison tests, use cprnc.pl for tool comparison
+ M models/lnd/clm/test/system/input_tests_master - Add TBLtools test for:
+ mkgriddata, mksurfdata, mkdatadomain, and interpinic, remove pftdyn
+ mksurfdata test
+ M models/lnd/clm/test/system/CLM_compare.sh ----- Remove unused variable
+ M models/lnd/clm/test/system/TSMtools.sh -------- Copy .txt files over if exist
+ M models/lnd/clm/test/system/TBLtools.sh -------- Use cprnc.pl in place of cprnc binary
+
+>>>>>>>>>>>>>> Add tool comparison tests
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+ M models/lnd/clm/test/system/tests_posttag_yong
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+
+>>>>>>>>>>>>>> Add notes on cprnc tools
+ M models/lnd/clm/tools/ncl_scripts/README
+
+>>>>>>>>>>>>>> Move back to Mariana's version of interpinic in non2dgrid08_clm4_0_26
+>>>>>>>>>>>>>> This means it won't work for new files, but will work for older files
+>>>>>>>>>>>>>> and gives the same answers as the non2dgrid version.
+ M models/lnd/clm/tools/interpinic/interpinic.F90
+ M models/lnd/clm/tools/interpinic/Srcfiles
+
+
+Summary of testing:
+
+ bluefire interactive testing: These PASS
+
+002 bl853 TBLtools.sh interpinic tools__o runoptions ............................................PASS (same as non2dgrid08_clm4_0_26)
+001 bl754 TBLtools.sh mksurfdata tools__s namelist ..............................................PASS
+002 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................PASS
+001 bl654 TBLtools.sh mkgriddata tools__ds namelist .............................................PASS
+001 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................PASS
+002 blZ94 TBLtools.sh mkdatadomain tools__ds namelist ...........................................PASS
+
+ yong/ifort interactive testing: These PASS
+
+001 bl853 TBLtools.sh interpinic tools__o runoptions ............................................PASS (same as non2dgrid08_clm4_0_26)
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_28
+
+Changes answers relative to baseline: no bit-for-bit (except interpinic)
+
+===============================================================
+===============================================================
+Tag name: clm4_0_28
+Originator(s): erik (Erik Kluzek)
+Date: Tue May 3 09:14:24 MDT 2011
+One-line Summary: Remove DUST/PROGSSLT in land coupler layer, update driver and scripts
+
+Purpose of changes:
+
+Update drv to branch version, fix ram1/fv issue (remove DUST/PROGSSLT #ifdef's in
+lnd_comp_*). Answers will then be identical to clm4_0_26 (except air density sent to
+cpl). Don't allow both -irrig and -crop to be on at same time in scripts.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1276 (urbanc_alpha site does not work)
+ 1279 (Latest version of PTCLM requires python2.5)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1328 (interpinic gives bad results that can NOT be used!)
+ 1335 (transient_CN sometimes different than clm4_0_26)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): drv, scripts
+ scripts to scripts4_110428a
+ drv to branch version: t3148b_tags/t3148b02_drvseq3_1_48
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/bld/build-namelist - Don't allow crop and irrig on at same time
+
+ M models/lnd/clm/tools/interpinic/interpinic.F90 -------- Move input read up
+ M models/lnd/clm/tools/interpinic/interpinic.runoptions - Use latest input file
+
+ M models/lnd/clm/test/system/tests_pretag_bluefire -- Remove some tests
+ M models/lnd/clm/test/system/README.testnames ------- Don't mix crop and irrig
+ M models/lnd/clm/test/system/input_tests_master ----- Change irrig+crop tests to
+ just crop
+
+
+>>>>>>>>>> Remove DUST, PROGSSLT and VOC #ifdef's
+ M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90
+
+Summary of testing:
+
+ bluefire: All PASS except...
+024 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+025 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+026 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+027 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+049 smLI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+050 erLI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+051 brLI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+052 blLI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+058 sm8Z3 TSMrst_tools.sh 21p_cncrpsc_do interpinic clm_irrig^nl_crop 20000101:1800 1.9x2.5 gx1v6 10FAIL! rc= 4
+ bluefire/CESM testing: All PASS except... (compare to clm4_0_26 with updated datm)
+SFAIL ERS_D.T31_g37.IGRCP26CN.bluefire.GC.160557
+SFAIL ERP.T31_g37.IGRCP60CN.bluefire.GC.160557
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm4_0_26_datmdens
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm4_0_26_datmdens
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_27
+
+Changes answers relative to baseline: No bit-for-bit
+ Although coupler log file will show changes in fv and ram1 to clm4_0_27, but
+ will be identical to clm4_0_26 (although then dens in atm changes)
+
+===============================================================
+===============================================================
+Tag name: clm4_0_27
+Originator(s): erik (Erik Kluzek)
+Date: Mon May 2 09:37:57 MDT 2011
+One-line Summary: Move crop branch over to trunk
+
+Purpose of changes:
+
+Move crop branch to trunk. Add crop and noio options to configure. maxpft option to
+configure can now only be a number (removing numpft+1 option to it). Add datasets for
+crop. Add T31 historical and rcp2.6 transient dynpft datasets. Remove some of the CPP
+tokens (DUST, PROGSSLT, etc.) Bring Marian Vertensteins version of interpinic over to the
+trunk as well. This version is faster and is able to run for higher resolution cases.
+Remove scaled_harvest and carbon_only namelist options and add suplnitro option
+(supplemental Nitrogen which can be: NONE, PROG_CROP_ONLY, or ALL). Add number parameters
+for the different nsrest settings, and have only one copy of is_restart in
+clm_time_manager. Update to ESMF interface from Tony.
+
+Bugs fixed (include bugzilla ID):
+ 1323 (Remove some unused items)
+ 1319 (interpinic doesn't interpolate *_PERIOD)
+ 1318 (interpinic has trouble with new restart files)
+ 1303 (remove complexity of no-urban in interpinic)
+ 1298 (Can NOT turn RTM off in CLM_CONFIG_OPTS)
+ 901 (remove some CPP tokens)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1276 (urbanc_alpha site does not work)
+ 1279 (Latest version of PTCLM requires python2.5)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1325 (GDDHARV on hist causes model to die in debug)
+ 1328 (interpinic gives bad results that can NOT be used!)
+ 1335 (transient_CN sometimes different than clm4_0_26)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system: Yes
+
+ Add -crop and -noio options to configure, remove -dust and -progsslt options
+ Remove "numpft+1" option to -maxpft. maxpft can go up to 17 without crop and
+ needs to be 21 for crop.
+
+ -crop adds the CROP #ifdef. Removes the DUST, PROGSSLT, CLAMP CPP tokens.
+ Also remove: DISTURB, COUP_WRF, NO_DAYLEN_VCMAX, TCX_REMOVE_SEE_NOTES_ABOVE, and
+ L2R_Decomp, and some testing/debug CPP defines
+
+Describe any changes made to the namelist: Yes
+
+ Remove Carbon_only and scaled_harvest options
+ Add suplnitro option which can be set to: NONE, PROG_CROP_ONLY, or ALL
+
+ Add new history output variables:
+
+ A5TMIN 5-day running mean of min 2-m temperature (K)
+ A10TMIN 10-day running mean of min 2-m temperature (K)
+ GDD0 Growing degree days base 0C from planting (ddays)
+ GDD8 Growing degree days base 8C from planting (ddays)
+ GDD10 Growing degree days base 10C from planting (ddays)
+ GDD020 Twenty year average of growing degree days base 0C from planting (ddays)
+ GDD820 Twenty year average of growing degree days base 8C from planting (ddays)
+ GDD1020 Twenty year average of growing degree days base 10C from planting (ddays)
+ GDDPLANT Accumulated growing degree days past planting date for crop (ddays)
+ GDDHARV Growing degree days (gdd) needed to harvest (ddays)
+ GDDTSOI Growing degree-days from planting (top two soil layers) (ddays)
+
+List any changes to the defaults for the boundary datasets:
+ New point mode for crop: 1x1_numaIA and 1x1_smallvilleIA
+ pftcon: pft-physiology.c110425.nc
+ surface datasets for crop mode for: f19, f10, 1x1_numaIA, and 1x1_smallvilleIA
+ (also crop datasets with crop AND irrigation on)
+ finidat file for crop for f19
+ New T31 pftdyn file for historical and rcp2.6
+ Raw veg and lai datasets for mksurfdata for crop
+
+Describe any substantial timing or memory changes: Crop adds some additional variables
+ and if checks that may make small differences in run-time and/or memory
+
+Code reviewed by: self, slevis
+
+List any svn externals directories updated (csm_share, mct, etc.): Almost all
+
+ scripts to scripts4_110421
+ share to share3_110411
+ drv to drvseq3_1_53
+ datm to datm8_110419
+ stubs to stubs1_2_04
+
+List all files eliminated:
+
+>>>>>>>>>>>>>>> Remove test configs that explicitly have dust
+ D models/lnd/clm/test/system/config_files/4p_vodsrsc_dh
+ D models/lnd/clm/test/system/config_files/4p_vodsrsc_dm
+ D models/lnd/clm/test/system/config_files/4p_vodsrsc_do
+ D models/lnd/clm/test/system/config_files/4p_vodsrsc_ds
+ D models/lnd/clm/test/system/config_files/17p_vodsrsc_h
+ D models/lnd/clm/test/system/config_files/17p_vodsrsc_m
+ D models/lnd/clm/test/system/config_files/17p_vodsrsc_o
+ D models/lnd/clm/test/system/config_files/17p_vodsrsc_dh
+ D models/lnd/clm/test/system/config_files/4p_vodsnrsc_ds
+ D models/lnd/clm/test/system/config_files/17p_vodsrsc_dm
+ D models/lnd/clm/test/system/config_files/17p_vodsrsc_do
+ D models/lnd/clm/test/system/config_files/4p_vodsrsc_h
+ D models/lnd/clm/test/system/config_files/17p_vodsrsc_ds
+ D models/lnd/clm/test/system/config_files/4p_vodsrsc_o
+
+>>>>>>>>>>>>>>> Remove test for scaled_harvest namelist item
+ D models/lnd/clm/test/system/nl_files/nl_noicertm_sclharv
+
+>>>>>>>>>>>>>>> Remove sample namelists and always use mksurfdata.pl script
+ D models/lnd/clm/tools/mksurfdata/mksurfdata.globalirrig
+ D models/lnd/clm/tools/mksurfdata/mksurfdata.pftdyn
+
+>>>>>>>>>>>>>>> Remove these two from changes that mvertens applied
+ D models/lnd/clm/tools/interpinic/addglobal.F90
+ D models/lnd/clm/tools/interpinic/wrap_nf.F90
+
+>>>>>>>>>>>>>>> Update sample IC file
+ D models/lnd/clm/tools/interpinic/clmi.IQ.1953-01-01_10x15_USGS_simyr2000_c081202.nc
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>>> Add crop test configs
+ A models/lnd/clm/test/system/config_files/21p_cncrpsc_do
+ A models/lnd/clm/test/system/config_files/21p_cncrpsc_s
+ A models/lnd/clm/test/system/config_files/21p_cncrpsc_ds
+ A models/lnd/clm/test/system/config_files/21p_cndvcrpsc_dh
+ A models/lnd/clm/test/system/config_files/21p_cndvcrpsc_dm
+ A models/lnd/clm/test/system/config_files/21p_cndvcrpsc_do
+ A models/lnd/clm/test/system/config_files/21p_cndvcrpsc_ds
+ A models/lnd/clm/test/system/config_files/21p_cndvcrpsc_h
+ A models/lnd/clm/test/system/config_files/21p_cndvcrpsc_m
+ A models/lnd/clm/test/system/config_files/21p_cndvcrpsc_o
+ A models/lnd/clm/test/system/config_files/21p_cndvcrpsc_s
+ A models/lnd/clm/test/system/config_files/21p_cncrpsc_h
+ A models/lnd/clm/test/system/config_files/21p_cncrpsc_dh
+ A models/lnd/clm/test/system/config_files/21p_cncrpsc_m
+ A models/lnd/clm/test/system/config_files/21p_cncrpsc_o
+ A models/lnd/clm/test/system/config_files/21p_cncrpsc_dm
+>>>>>>>>>>>>>>> Add test configs without dust
+ A models/lnd/clm/test/system/config_files/4p_vorsc_dm
+ A models/lnd/clm/test/system/config_files/4p_vorsc_do
+ A models/lnd/clm/test/system/config_files/4p_vorsc_ds
+ A models/lnd/clm/test/system/config_files/17p_vorsc_h
+ A models/lnd/clm/test/system/config_files/17p_vorsc_m
+ A models/lnd/clm/test/system/config_files/17p_vorsc_o
+ A models/lnd/clm/test/system/config_files/4p_vorsc_h
+ A models/lnd/clm/test/system/config_files/4p_vorsc_o
+ A models/lnd/clm/test/system/config_files/17p_vorsc_dm
+ A models/lnd/clm/test/system/config_files/17p_vorsc_dh
+ A models/lnd/clm/test/system/config_files/17p_vorsc_do
+ A models/lnd/clm/test/system/config_files/17p_vorsc_ds
+ A models/lnd/clm/test/system/config_files/4p_vorsc_dh
+
+>>>>>>>>>>>>>>> Add crop restart variables
+ A models/lnd/clm/src/biogeochem/CropRestMod.F90
+
+>>>>>>>>>>>>>>> Add namelist for crop, and mksurfdata to create crop single point
+ A models/lnd/clm/test/system/nl_files/nl_crop
+ A models/lnd/clm/test/system/nl_files/nl_cn_conly
+ A models/lnd/clm/test/system/nl_files/clm_stdIgnYr
+ A models/lnd/clm/test/system/nl_files/mksrfdt_1x1_numaIA_mp20irrcr_2000
+
+ A models/lnd/clm/test/system/TSMrst_tools.sh - Add test to use finidat files
+ run interpinic on it and then make sure you can startup from the result
+
+>>>>>>>>>>>>>>> Explicitly add csm_share files into interpinic build
+ A models/lnd/clm/tools/interpinic/Mkdepends
+ A models/lnd/clm/tools/interpinic/shr_sys_mod.F90
+ A models/lnd/clm/tools/interpinic/shr_log_mod.F90
+ A models/lnd/clm/tools/interpinic/shr_kind_mod.F90
+ A models/lnd/clm/tools/interpinic/shr_const_mod.F90
+
+>>>>>>>>>>>>>>> Add the latest 10x15 initial conditions file to test on
+ A models/lnd/clm/tools/interpinic/clmi.BCN.1949-01-01_10x15_USGS_simyr1850_c100322.nc
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>> Set maxpft to number
+ M models/lnd/clm/test/system/config_files/README
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_m
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_o
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_do
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_s
+ M models/lnd/clm/test/system/config_files/_nrmexsc_ds
+ M models/lnd/clm/test/system/config_files/17p_cnsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnsc_m
+ M models/lnd/clm/test/system/config_files/17p_cnsc_dm
+ M models/lnd/clm/test/system/config_files/_nrvansc_ds
+ M models/lnd/clm/test/system/config_files/17p_cnsc_do
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_do
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_do
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_do
+ M models/lnd/clm/test/system/config_files/4p_vonrsc_ds
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_h
+
+>>>>>>>>>>>>>>> Remove some tests add new crop tests
+ M models/lnd/clm/test/system/README.testnames ----------------
+ M models/lnd/clm/test/system/tests_posttag_lynx_nompi --------
+ M models/lnd/clm/test/system/tests_pretag_bluefire -----------
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi -----
+ M models/lnd/clm/test/system/tests_pretag_edinburgh ----------
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi ----
+ M models/lnd/clm/test/system/tests_posttag_yong --------------
+ M models/lnd/clm/test/system/tests_pretag_jaguarpf -----------
+ M models/lnd/clm/test/system/tests_pretag_jaguarpf_nompi -----
+ M models/lnd/clm/test/system/tests_posttag_mirage ------------
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression -
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression --
+
+ M models/lnd/clm/test/system/TCBtools.sh ------- Add TOOL_ROOT
+ M models/lnd/clm/test/system/test_driver.sh ---- Use path to glade, update path
+ M models/lnd/clm/test/system/mknamelist -------- Add ability to set finidat file on
+ startup
+ M models/lnd/clm/test/system/input_tests_master Change out vodsrsc for vorsc,
+ add crop tests, add interpinic restart tests
+ M models/lnd/clm/test/system/tests_posttag_lynx Add sm9T4 test
+ M models/lnd/clm/test/system/CLM_runcmnd.sh ---- Remove -d
+
+ M models/lnd/clm/test/system/nl_files/mksrfdt_1x1_brazil_1850 Change from 1850-2000
+ to just 1850
+ M models/lnd/clm/test/system/nl_files/clm_irrig -------------- Use ignore_ic_year
+ instead of ignore_ic_date
+
+>>>>>>>>>>>>>>> Add ability to add crop in, add -crop to mksurfdata.pl which sets the
+>>>>>>>>>>>>>>> numpft=20 namelist item
+ M models/lnd/clm/tools/mksurfdata/mkvarpar.F90 - Add numstdpft
+ M models/lnd/clm/tools/mksurfdata/mkvarctl.F90 - Add numpft
+ M models/lnd/clm/tools/mksurfdata/ncdio.F90 ---- Add nf_get_att_double/nf_get_var_text
+ M models/lnd/clm/tools/mksurfdata/mklaiMod.F90 - Use numpft
+ M models/lnd/clm/tools/mksurfdata/mkfileMod.F90 Add mksrf_flai/mksrf_firrig to file
+ M models/lnd/clm/tools/mksurfdata/areaMod.F90 -- Put numpft in mkvarctl
+ M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 - Add numpft to namelist
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl Handle crop and irrig and change
+ names accordingly
+ M models/lnd/clm/tools/mksurfdata/mkpftMod.F90 - Add numpft and add to namelist
+ if numpft = 20 add crop in
+
+>>>>>>>>>>>>>>> Bring in interpinic version from Mariana Vertenstein
+>>>>>>>>>>>>>>> Make faster and use less memory, update NetCDF interface, make
+>>>>>>>>>>>>>>> standalone so not dependant on other directories
+ M models/lnd/clm/tools/interpinic/interpinic.F90 - Make faster by saving indices,
+ use less memory, update to F90 NetCDF interface, make standalone
+ M models/lnd/clm/tools/interpinic/fmain.F90 ------ Add -a option to NOT override missing
+ M models/lnd/clm/tools/interpinic/Srcfiles ------- Remove mpi files
+ M models/lnd/clm/tools/interpinic/Filepath ------- Make standalone
+ M models/lnd/clm/tools/interpinic/Makefile ------- Use local MkDepends, compare
+ to null, change interface for testing a bit
+ M models/lnd/clm/tools/interpinic/README --------- Add note about SMP, update clmi file
+ M models/lnd/clm/tools/interpinic/interpinic.runoptions Use new file
+
+>>>>>>>>>>>>>>> Add numpft
+ M models/lnd/clm/tools/mkgriddata/mkvarctl.F90
+
+>>>>>>>>>>>>>>> Add crop/noio remove dust and progsslt and CLAMP setting
+ M models/lnd/clm/bld/configure ------------- Add -crop/-noio remove -dust/-progsslt
+ turn RTM off for sitespf_pt, error check crop, maxpft, remove CLAMP setting
+ M models/lnd/clm/bld/listDefaultNamelist.pl Add loop for crop
+ M models/lnd/clm/bld/build-namelist -------- Sense crop=on/off, add suplnitro remove
+ Carbon_only
+ M models/lnd/clm/bld/clm.cpl7.template ----- Change order of $CLM_CONFIG_OPTS
+ so will be done last and override other settings
+ M models/lnd/clm/bld/config_files/config_definition.xml Add crop/noio, remove dust/progsslt
+ have maxpft only allow numbers up to 21
+
+>>>>>>>>>>>>>>> New files for crop, remove old namelist items add new, add crop datasets
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --------- Remove
+ Carbon_only add suplnitro, remove scaled_harvest, correct spellings
+ 1x1_numaIA,1x1_smallvilleIA resolutions
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml --- Add
+ 1x1_numaIA,1x1_smallvilleIA
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml ------ Add
+ 1x1_numaIA,1x1_smallvilleIA
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ------- Add
+ settings for suplnitro, new fpftcon, finidat for crop f19, add crop parameters
+ files for crop for f19,f10,1x1_numaIA,1x1_smallvilleIA, fix T31 files
+ turn create_crop_landunit on for crop
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml - Add
+ mksrf_flai/mksrf_fvegtyp for crop add crop=on/off for those files
+
+>>>>>>>>>>>>>>> Use nsrest parameters, use secspday/days_per_year, handle prognostic crop
+>>>>>>>>>>>>>>> Remove CLAMP/is_restart/DUST/DISTURB/CLAMP
+>>>>>>>>>>>>>>> if soil also check if crop, use vegetation indices, add initialization
+>>>>>>>>>>>>>>> subroutines, pass crop filters down, suplementatal Nitrogen can be for
+>>>>>>>>>>>>>>> nothing, just for crop, or for all.
+ M models/lnd/clm/src/biogeochem/CASAPhenologyMod.F90 ----- Use nsrest parameters, secspday
+ M models/lnd/clm/src/biogeochem/CNGapMortalityMod.F90 ---- Use secspday
+ M models/lnd/clm/src/biogeochem/CNGRespMod.F90 ----------- Handle crop
+ M models/lnd/clm/src/biogeochem/CNNStateUpdate1Mod.F90 --- Handle crop
+ M models/lnd/clm/src/biogeochem/CNFireMod.F90 ------------ Use secspday/days_per_year
+ M models/lnd/clm/src/biogeochem/CNMRespMod.F90 ----------- If crop add livestem
+ M models/lnd/clm/src/biogeochem/CASAMod.F90 -------------- Remove CLAMP is_restart
+ if soil or crop, nsrest parameters, use veg indices
+ M models/lnd/clm/src/biogeochem/CNPrecisionControlMod.F90 Handle crop
+ M models/lnd/clm/src/biogeochem/CNSummaryMod.F90 --------- Remove CLAMP, add crop
+ M models/lnd/clm/src/biogeochem/DUSTMod.F90 -------------- Remove DUST, if soil or crop
+ M models/lnd/clm/src/biogeochem/CNPhenologyMod.F90 ------- Add init and crop-Phenology
+ M models/lnd/clm/src/biogeochem/CNCStateUpdate1Mod.F90 --- Add prog crop
+ M models/lnd/clm/src/biogeochem/CNDecompMod.F90 ---------- Pass crop filter down
+ use secspday
+ M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90 ------- Remove extra use
+ M models/lnd/clm/src/biogeochem/CNrestMod.F90 ------------ Remove is_restart
+ M models/lnd/clm/src/biogeochem/CNAnnualUpdateMod.F90 ---- Add CROP #ifdef to CNDV
+ M models/lnd/clm/src/biogeochem/CNNDynamicsMod.F90 ------- Use dayspyr and secspday
+ M models/lnd/clm/src/biogeochem/CNAllocationMod.F90 ------ Add init subroutine
+ section for prognostic crop, supplemental Nitrogen can be on for nothing,
+ crop only, or everything
+ M models/lnd/clm/src/biogeochem/CNEcosystemDynMod.F90 ---- Add init subroutine,
+ add crop filters
+ M models/lnd/clm/src/biogeochem/CNSetValueMod.F90 -------- Set crop vars remove CLAMP
+ M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90 - Add if section for crop
+ M models/lnd/clm/src/biogeochem/CNDVEstablishmentMod.F90 - Remove DISTURB
+
+>>>>>>>>>>>>>>> Use nsrest parameters, update ESMF interface
+ M models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90 --- Use nsrest parameters
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90 - Update interface from Tony Craig
+ Use nsrest parameters, use phase as a keyword.
+ M models/lnd/clm/src/cpl_esmf/lnd_comp_mct.F90 -- Update interface from Tony Craig
+ compare success to ESMF_SUCCESS rather than 0.
+
+>>>>>>>>>>>>>>> Add istcrop and if statements for istsoil also test for istcrop
+>>>>>>>>>>>>>>> Remove CLAMP/DUST/PROGSSLT/debug ifdef/vcmx25/dw_fcel/dw_flig
+>>>>>>>>>>>>>>> /scaled_harv/ bad hist indices. Set if prog_crop in surfrdMod
+>>>>>>>>>>>>>>> Use secspday and days_per_year, more vars on pft-physiology file
+>>>>>>>>>>>>>>> Add parameters for nsrest settings, new hist vars, error check CROP
+ M models/lnd/clm/src/main/clm_varcon.F90 -------- Add istcrop
+ M models/lnd/clm/src/main/clm_varpar.F90 -------- Add numveg and mxpft
+ M models/lnd/clm/src/main/CNiniTimeVar.F90 ------ Remove CLAMP, if soil or crop
+ also set some crop vars
+ M models/lnd/clm/src/main/dynlandMod.F90 -------- If soil or crop
+ M models/lnd/clm/src/main/accumulMod.F90 -------- Remove is_restart, add missing to _PERIOD
+ M models/lnd/clm/src/main/clm_initializeMod.F90 - Remove CLAMP/DUST, use nsrest parameters
+ add call to CNEcosystemDynInit
+ M models/lnd/clm/src/main/subgridRestMod.F90 ---- Remove incorrect grid indices (bug 1310)
+ M models/lnd/clm/src/main/accFldsMod.F90 -------- Add GDD0/8/10/PLANT/HARV/TSOI/TDM5/10
+ M models/lnd/clm/src/main/clmtypeInitMod.F90 ---- Remove CLAMP
+ CROP & C13 is not valid, add crop vars, remove vcmx25/dw_fcel/dw_flig
+ M models/lnd/clm/src/main/ndepStreamMod.F90 ----- Use secspday in place of 86400
+ M models/lnd/clm/src/main/pftdynMod.F90 --------- Use days_per_year in place of 365
+ move pconv/pprod10/pprod100 to pft-physiology file, if soil or crop
+ use nsrest parameters, remove scaled_harvest
+ M models/lnd/clm/src/main/iniTimeConst.F90 ------ Add graincn, remove: vcmx25/dw_fcel/dw_flig
+ M models/lnd/clm/src/main/histFileMod.F90 ------- Use secspday in place of 86400
+ fix Conventions, use nsrest parameters, comment out indices (bug 1310)
+ M models/lnd/clm/src/main/clm_atmlnd.F90 -------- Remove DUST/PROGSSLT/1==1
+ M models/lnd/clm/src/main/restFileMod.F90 ------- Use nsrest parameters, add CropRest
+ fix Conventions
+ M models/lnd/clm/src/main/controlMod.F90 -------- Remove scaled_harvest/Carbon_only
+ use nsrest parameters, add suplnitro
+ M models/lnd/clm/src/main/initSurfAlbMod.F90 ---- if soil or crop, send crop filters
+ to CNEcosystemDyn
+ M models/lnd/clm/src/main/clm_time_manager.F90 -- Remove COUP_WRF, add get_driver_start_ymd
+ M models/lnd/clm/src/main/filterMod.F90 --------- Add filter for prognostic-crop
+ if soil or crop
+ M models/lnd/clm/src/main/clm_varctl.F90 -------- Add parameters for nsrest valid
+ values: nsrStartup, nsrContinue, nsrBranch, remove scaled_harvest
+ make sure crop allocates all PFT's
+ M models/lnd/clm/src/main/clm_driver.F90 -------- Remove DUST send crop filters
+ to CNEcosystemDyn
+ M models/lnd/clm/src/main/initGridCellsMod.F90 -- If crop send istcrop to set_landunit_crop_noncompete
+ M models/lnd/clm/src/main/CASAiniTimeVarMod.F90 - Remove CLAMP
+ M models/lnd/clm/src/main/pftvarcon.F90 --------- Add crop vars, corn,
+ temperate sping/winter cereal, and soybean, remove vcmx25/dw_flig/dw_fcel
+ add new variables for crop add npcropmin, npcropmax and error checking
+ M models/lnd/clm/src/main/ncdio_pio.F90 --------- Add logical field support
+ M models/lnd/clm/src/main/spmdMod.F90 ----------- Add MPI_LOR
+ M models/lnd/clm/src/main/surfrdMod.F90 --------- Add crop_prog as public module data
+ Remove TCX_REMOVE_SEE_NOTES_ABOVE, error checking if prognostic crops avail
+ and CROP not defined and vice versa
+ M models/lnd/clm/src/main/clmtype.F90 ----------- New variables for CROP, remove CLAMP
+ Remove dw_fcel, dw_flig, vcmx25
+ M models/lnd/clm/src/main/histFldsMod.F90 ------- Remove CLAMP and DUST, T10 output
+ for CNDV or CROP, add A5TMIN, A10TMIN, GDD0, GDD8, GDD10, GDD020, GDD820,
+ GDD1020, GDDPLANT, GDDTSOI and GDDHARV for crop (as inactive)
+
+ M models/lnd/clm/src/main/mkarbinitMod.F90 ------ If soil or crop
+
+ M models/lnd/clm/src/riverroute/RtmMod.F90 - Remove L2R_Decomp #ifdef, and #if (1 ==
+ Remove is_restart and use clm_time_manager version.
+0)
+
+>>>>>>>>>>>>>>> Change if statements on "if soil" to "if soil or crop"
+>>>>>>>>>>>>>>> Remove DUST, NO_DAYLEN_VCMAX #ifdefs, is_restart, vcmx25, avcmx,
+>>>>>>>>>>>>>>> and SNICAR stats. vcmx calc is different for crop and btran for soybean
+ M models/lnd/clm/src/biogeophys/SurfaceRadiationMod.F90 - If soil or crop
+ M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90 -- If soil or crop
+ M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90 ---- If soil or crop
+ M models/lnd/clm/src/biogeophys/Biogeophysics1Mod.F90 --- If soil or crop
+ M models/lnd/clm/src/biogeophys/Biogeophysics2Mod.F90 --- If soil or crop
+ M models/lnd/clm/src/biogeophys/FrictionVelocityMod.F90 - Remove DUST #ifdef
+ M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90 ---- If soil or crop
+ M models/lnd/clm/src/biogeophys/Hydrology1Mod.F90 ------- If soil, urb, wet or crop
+ M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 ------- If soil or crop
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 --- Remove is_restart use
+ clm_time_manger version, use nsrest parameters
+ M models/lnd/clm/src/biogeophys/SNICARMod.F90 ----------- Remove commented out SNICAR stats
+ M models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90 - If soil or crop
+ M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90 ----- Remove NO_DAYLEN_VCMAX,
+ and vcmx25,avcmx, vcmx calc different for crop and btran for soybean.
+
+Summary of testing:
+
+ bluefire: All PASS except...
+008 blAZ1 TBL.sh 21p_cncrpsc_dh clm_irrig^nl_crop 20020401:3600 10x15 USGS -10 cold .............FAIL! rc= 5
+015 blE91 TBL.sh 4p_vorsc_dh clm_std^nl_urb 20021230:1800 4x5 gx3v7 48 arb_ic ...................FAIL! rc= 5
+020 blF92 TBL.sh 17p_vorsc_dm clm_std^nl_urb 20021230:1800 4x5 gx3v7 48 cold ....................FAIL! rc= 5
+024 blEH1 TBL.sh 4p_vorsc_dh clm_std^nl_urb 20021231:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic ......FAIL! rc= 5
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+037 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ..............FAIL! rc= 10
+041 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:3600 10x15 USGS@1850-2000 -10 arb_ic ..............FAIL! rc= 7
+059 smLI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+060 erLI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+061 brLI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+062 blLI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+032 blF93 TBL.sh 17p_vorsc_do clm_std^nl_urb 20021230:1800 4x5 gx3v7 48 cold ....................FAIL! rc= 5
+056 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+057 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 6
+058 sm8Z3 TSMrst_tools.sh 21p_cncrpsc_do interpinic clm_irrig^nl_crop 20000101:1800 1.9x2.5 gx1v6 10FAIL! rc= 4
+ bluefire/CESM testing: All PASS except... (dens, fv and ram1 change)
+FAIL SMS_RLA.f45_f45.I.bluefire.compare_hist.clm4_0_26
+FAIL SMS_RLA.f45_f45.I.bluefire.compare.clm4_0_26
+FAIL SMS_RLB.f45_f45.I.bluefire.compare_hist.clm4_0_26
+FAIL SMS_RLB.f45_f45.I.bluefire.compare.clm4_0_26
+FAIL SMS_ROA.f45_f45.I.bluefire.compare_hist.clm4_0_26
+FAIL SMS_ROA.f45_f45.I.bluefire.compare.clm4_0_26
+FAIL ERS_D.f45_g37.I.bluefire.compare_hist.clm4_0_26
+FAIL ERS_D.f45_g37.I.bluefire.compare.clm4_0_26
+FAIL PST.f45_g37.I1850.bluefire.compare.clm4_0_26
+FAIL PET_PT.f45_g37.I1850.bluefire.compare.clm4_0_26
+FAIL ERS_E.f19_g16.I1850.bluefire.compare_hist.clm4_0_26
+FAIL ERS_E.f19_g16.I1850.bluefire.compare.clm4_0_26
+FAIL ERI.f19_g16.IG1850.bluefire.compare.clm4_0_26
+SFAIL ERS_D.T31_g37.IGRCP26CN.bluefire.GC.231059
+SFAIL ERP.T31_g37.IGRCP60CN.bluefire.GC.231059
+FAIL ERB.f09_g16.I_1948-2004.bluefire.compare.clm4_0_26
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_26
+FAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm4_0_26
+FAIL PST.f10_f10.I20TRCN.bluefire.compare.clm4_0_26
+FAIL PET_PT.f10_f10.I20TRCN.bluefire.compare.clm4_0_26
+FAIL SMS.f10_f10.IRCP45CN.bluefire.compare_hist.clm4_0_26
+FAIL SMS.f10_f10.IRCP45CN.bluefire.compare.clm4_0_26
+ bluefire/PTCLM testing: All PASS up to..
+US-Ha1_ICN_ad_spinup.PTCLM PASS
+ jaguarpf interactive testing: All PASS up to...
+008 blAZ3 TBL.sh 21p_cncrpsc_do clm_irrig^nl_crop 20020401:3600 10x15 USGS -10 cold .............FAIL! rc= 5
+011 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+012 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+013 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+014 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+035 sm8Z3 TSMrst_tools.sh 21p_cncrpsc_do interpinic clm_irrig^nl_crop 20000101:1800 1.9x2.5 gx1v6 10FAIL! rc= 4
+ edinburgh/lf95 interactive testing: All PASS except...
+004 blA74 TBL.sh _nrsc_ds clm_std^nl_urb 20030101:1800 1x1_brazil navy -10 arb_ic ...............FAIL! rc= 7
+010 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:3600 1x1_camdenNJ navy -90 arb_ic .............FAIL! rc= 7
+014 blOC4 TBL.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:3600 1x1_vancouverCAN navy 331 arb_ic ...FAIL! rc= 7
+018 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:3600 1x1_mexicocityMEX navy 158 arb_ic ..FAIL! rc= 7
+026 blL74 TBL.sh _nrsc_s clm_std^nl_urb 20020101:1800 1x1_brazil navy -10 arb_ic ................FAIL! rc= 7
+ mirage,storm/ifort interactive testing: All PASS except...
+020 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+021 erG53 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 10+38 arb_ic ...........FAIL! rc= 5
+022 brG53 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_26
+
+Changes answers relative to baseline: no bit-for-bit
+ with the exception that the new crop mode is NOT in previous tags
+
+===============================================================
+===============================================================
+Tag name: clm4_0_26
+Originator(s): erik (Erik Kluzek)
+Date: Wed Mar 23 11:43:00 MDT 2011
+One-line Summary: Update externals, driver update changes answers, drydep changes from fvitt, fix bugs
+
+Purpose of changes:
+
+Update externals to latest pre-cesm1_0_beta17 version. driver to beyond cesm1_0_beta16
+version -- so answers change. Always update ndep_interp in clm_driver -- so restarts are
+exact. Bring in Francis Vitt drydep changes. Remove bad T31 pftdyn datasets add in
+a new T31 rcp2.6 T31 dataset. Fix interpinic _var bug. Remove HIRES from bld. Change
+tools Makefile's so that you can set env variables. Change test_driver to use newer
+version of cprnc.
+
+Bugs fixed (include bugzilla ID):
+ 1284 (Crop restart test fails)
+ 1304 (bug in interpinic *_var)
+ 1308 (tools Make doesn't allow setting env vars)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1165 (Restart trouble for scaled harvest test on mirage)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1276 (urbanc_alpha site does not work)
+ 1279 (Latest version of PTCLM requires python2.5)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1298 (Can NOT turn RTM off in CLM_CONFIG_OPTS)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1318 (interpinic has trouble with new restart files)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: more fields added for drydep namelist
+
+List any changes to the defaults for the boundary datasets:
+ Remove bad T31 pftdyn datasets
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self, drydep changes from fvitt and JFL
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, cprnc
+
+ scripts to scripts4_110314
+ drv to drvseq3_1_51
+ cprnc to cprnc_110310
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+ A README_EXTERNALS -- Describes how to work with externals (similar to cam file)
+
+List all existing files that have been modified, and describe the changes:
+
+ M README - Update with new
+
+ M models/lnd/clm/test/system/CLM_compare.sh - Update for latest cprnc which
+ doesn't have a "completed successfully line at the end"
+ M models/lnd/clm/test/system/test_driver.sh - Use newer cprnc on bluefire
+
+>>>>>>>>>>> Change tools build so that you can set env variables for SMP/USER_FC/CC
+ M models/lnd/clm/tools/mksurfdata/Makefile -------- Compare to ,null rather than strip
+ M models/lnd/clm/tools/interpinic/Makefile -------- Compare to ,null rather than strip
+ M models/lnd/clm/tools/mkgriddata/Makefile -------- Compare to ,null rather than strip
+ M models/lnd/clm/tools/mkdatadomain/Makefile ------ Compare to ,null rather than strip
+
+ M models/lnd/clm/tools/interpinic/interpinic.F90 -- Make sure htop_var/fpcgrid_var
+ are initialized to false each time comes into routine (bug 1304)
+
+ M models/lnd/clm/bld/configure -- Remove HIRES setting for stand-alone testing
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - New rcp2.6 T31
+ pftdyn dataset remove rcp4.5,6,8.5 T31 pftdyn files as they only go to 2035
+
+ M models/lnd/clm/src/biogeochem/DryDepVelocity.F90 - Pick wesveg and index_season
+ differently for special landunits, add max for rc, assume no surface
+ resistance for SO2 over water, use has_rain logical (from fvitt)
+ M models/lnd/clm/src/main/clm_driver.F90 ----------- Always call ndep_interp
+ even if (stream_year_first_ndep /= stream_year_last_ndep) as can change
+ answers if not
+
+Summary of testing:
+
+ bluefire: All PASS except TBL tests and...
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+037 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ..............FAIL! rc= 10
+063 smLI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+064 erLI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+065 brLI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+ bluefire interactive testing: All PASS except TBL tests
+ bluefire/CESM testing: All PASS (even the comparision tests)
+ bluefire/PTCLM testing: All PASS up to...
+US-Ha1_ICN_ad_spinup.PTCLM PASS
+ edinburgh/lf95 interactive testing: All PASS except...
+021 brVx3 TBR.sh _mec10sc_do clm_transient_glcMEC_rcp4.5^nl_urb_br 20331231:1800 48x96 gx3v7@1850-21FAIL! rc= 10
+ mirage,storm/ifort interactive testing: All PASS except TBL tests and...
+016 smVx3 TSM.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -10 arbFAIL! rc= 10
+017 erVx3 TER.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -4+-6 aFAIL! rc= 5
+018 brVx3 TBR.sh _mec10sc_do clm_transient_glcMEC_rcp4.5^nl_urb_br 20331231:1800 48x96 gx3v7@1850-21FAIL! rc= 5
+024 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+025 erG53 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 10+38 arb_ic ...........FAIL! rc= 5
+026 brG53 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+ yong/ifort interactive testing: All PASS except...
+011 smD94 TSM.sh _persc_ds clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_25
+
+Changes answers relative to baseline: Yes! Greater than roundoff
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change:(larger than roundoff/same climate
+
+ driver mapping changes, drydep code has changes, ndep_interp is always called
+ which changes answers on some platforms/compilers (such as intel compiler).
+
+===============================================================
+===============================================================
+Tag name: clm4_0_25
+Originator(s): erik (Erik Kluzek)
+Date: Tue Mar 22 10:13:08 MDT 2011
+One-line Summary: Always output restart-history files add more meta-data to them,
+ fix urbanc_alpha and 2.5x3.33 datasets, Changes from Keith O on SNOWLIQ/SNOWICE
+
+Purpose of changes:
+
+Move history namelist information to restart history files and always output them. Add
+attributes and meta-data to the restart history files. Fix urbanc_alpha test site surface
+dataset. Fix datm namelist for urban cases. Use new crop pft-physiology file. Update
+scripts and csm_share. Changes from Keith O on SNOWLIQ/SNOWICE so goes to zero rather
+than missing value. Update 2.5x3.33 datasets. Fix dvolrdt units documentation, call
+mksoifmaxInit.
+
+Bugs fixed (include bugzilla ID):
+ 1247 (Some changes to ncd_pio in clm)
+ 1306 (mksoifmaxInit is NOT called)
+ 1305 (dvolrdt is documented with the wrong units)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1165 (Restart trouble for scaled harvest test on mirage)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1276 (urbanc_alpha site does not work)
+ 1279 (Latest version of PTCLM requires python2.5)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1298 (Can NOT turn RTM off in CLM_CONFIG_OPTS)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1304 (bug in interpinic *_var)
+ 1318 (interpinic has trouble with new restart files)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ New pft-physiology file with fields for prognostic crop
+ New surface dataset for urbanc_alpha
+ New grid/topo/frac/domain files for 2.5x3.33
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, snowliq/snowice changes by oleson
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, datm, cism, csm_share
+
+ scripts to scripts4_110204
+ datm to datm8_110210
+ cism to cism1_110220
+ csm_share to share3_110201
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/input_tests_master - Change start dates of urban tests
+
+>>>>>>>>>>>> Move mksoil*Init subroutines private to mksoilMod, and call a mksoilInit
+>>>>>>>>>>>> routine from mksrfdata, making sure mksoifmaxInit is called.
+ M models/lnd/clm/tools/mksurfdata/mksoilMod.F90 - Add mksoilInit to call
+ mksoitexInit/mksoicolInit and mksoifmaxInit (mksoifmaxInit was missing)
+ fix mksoifmaxInit, and make mksoitex/col/fmaxInit routines private
+ M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 -- Call mksoilInit,
+ remove mksoicol/texInit
+
+>>>>>>>>>>>> Add notes about setting path to NetCDF, and other gmake options
+ M models/lnd/clm/tools/mksurfdata/README
+ M models/lnd/clm/tools/interpinic/README
+ M models/lnd/clm/tools/mkgriddata/README
+ M models/lnd/clm/tools/mkdatadomain/README
+
+ M models/lnd/clm/bld/queryDefaultNamelist.pl - Remove white-space from input options
+ M models/lnd/clm/bld/listDefaultNamelist.pl -- Also list datm_internal files
+ M models/lnd/clm/bld/build-namelist ---------- Add drv_final_spinup from PTCLM
+ document precidence of the different env_conf.xml
+
+>>>>>>>>>>>> Fix 2.5x3.33 and urbanc_alpha files, change some settings for CLM1PT
+>>>>>>>>>>>> or pt1_pt1 resolution, remove ndepsrc.
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --------- Add
+ taxmode and dtlimit, add 2.5x3.33 resolution
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml --- Add
+ sim_year="2000" sim_year_range="constant" for pft1_pt1 datm_presaero files
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml ------ Add
+ 1x1_numaIA and 1x1_smallvilleIA domain/preseaero files, update 2.5x3.33 domain
+ make mapalgo nn for CLM1PT, set taxmode and tintalgo appropriately
+ if CLM1PT is set. Add transient presaero file for 1x1_tropicAtl.
+ M models/lnd/clm/bld/namelist_files/use_cases/stdurbpt_pd.xml ------- Set
+ dtime to 1800 for 1x1_urbanc_alpha
+ M models/lnd/clm/bld/namelist_files/datm-build-namelist ------------- Set
+ tintalgo, mapalgo by datm_source, and set taxmode as well. Remove
+ option for datm_presaero="none". Set mapalgo=nn for datm_presaero=pt1_pt1.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ------- Update
+ urbanc_alpha surface dataset, 2.5x3.33 grid/topo/frac datasets
+ Remove ndepsrc="stream" in ndepmapalgo settings as doesn't exist anymore.
+ Use latest pft-physiology file from CROP branch (has extra data needed
+ for prognostic crop)
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml ------- Set
+ atm_cpl_dt=1800 for urbanc_alpha, set stop_option/stop_n for
+ urban sites carefully (add 1 time-step to stop_n, double for urbanc_alpha).
+ Use "test" mask for urbanc_alpha
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml - Remove
+ ndepsrc="stream"
+
+>>>>>>>>>>>> Add meta-data to restart hist files, put history restart data on
+>>>>>>>>>>>> restart hist files and off master restart files (so initial condition
+>>>>>>>>>>>> files aren't cluttered with information only needed for continue runs).
+>>>>>>>>>>>> Some changes to SNOWLIQ/SNOWICE, document dvolrdt units.
+ M models/lnd/clm/src/main/histFileMod.F90 - Use htape_create for restart_hist
+ files, modify hist_restart_ncd so that namelist vars on one restart
+ history files and they are always output, add more metadata to
+ restart hist files, remove some temp arrays. Restart history files
+ now always needed for continue runs, but not for other run types,
+ and restart history information does not clutter the master restart
+ files. The only history variables on master restart files are the
+ history and restart filenames. Comments on the files make this clear.
+ M models/lnd/clm/src/main/restFileMod.F90 - Change hist_restart_ncd calls
+ M models/lnd/clm/src/main/ncdio_pio.F90 --- Add ncd_io_log_var0_nf interface
+ add options for attributes: comment, flag_values, flag_meanings, and
+ nvalid_range for variables., fix an issue in ncd_io_int_var0_nf
+ M models/lnd/clm/src/main/histFldsMod.F90 - Change default for SNOWLIQ/SNOWICE
+ to "Average" rather than "Instant" (from oleson).
+ M models/lnd/clm/src/riverroute/RtmMod.F90 ---- Document dvolrdt conversion
+ M models/lnd/clm/src/riverroute/RunoffMod.F90 - Document dvolrdt units
+ correctly.
+ M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 - Initialize snowice/snowliq
+ to zero over lake filter (from oleson).
+
+Summary of testing:
+
+ bluefire: All PASS except TBL tests and...
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+037 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ..............FAIL! rc= 10
+063 smLI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+064 erLI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+065 brLI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+ bluefire interactive testing: All PASS except all TBL tests fail
+ bluefire/CESM testing: All PASS except...
+FAIL SMS_RLA.f45_f45.I.bluefire.generate.clm4_0_25
+FAIL SMS_RLB.f45_f45.I.bluefire.generate.clm4_0_25
+FAIL SMS_ROA.f45_f45.I.bluefire.generate.clm4_0_25
+FAIL ERS_D.f45_g37.I.bluefire.generate.clm4_0_25
+BFAIL ERS_D.f45_g37.I.bluefire.compare.clm4_0_23
+FAIL PST.f45_g37.I1850.bluefire.generate.clm4_0_25
+FAIL PET_PT.f45_g37.I1850.bluefire.generate.clm4_0_25
+FAIL ERS_E.f19_g16.I1850.bluefire.generate.clm4_0_25
+FAIL ERI.f19_g16.IG1850.bluefire.generate.clm4_0_25
+FAIL ERS_D.T31_g37.IGRCP26CN.bluefire.generate.clm4_0_25
+FAIL ERP.T31_g37.IGRCP60CN.bluefire.generate.clm4_0_25
+BFAIL ERP.T31_g37.IGRCP60CN.bluefire.compare.clm4_0_23
+FAIL ERB.f09_g16.I_1948-2004.bluefire.generate.clm4_0_25
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm4_0_25
+FAIL ERH_D.f10_f10.I1850CN.bluefire.generate.clm4_0_25
+BFAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm4_0_23
+FAIL PST.f10_f10.I20TRCN.bluefire.generate.clm4_0_25
+FAIL PET_PT.f10_f10.I20TRCN.bluefire.generate.clm4_0_25
+FAIL SMS.f10_f10.IRCP45CN.bluefire.generate.clm4_0_25
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.generate.clm4_0_25
+BFAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm4_0_23
+ jaguarpf interactive testing:
+002 erA74 TER.sh _nrsc_ds clm_std^nl_urb 20030101:1800 1x1_brazil navy -5+-5 arb_ic .............FAIL! rc= 13
+003 brA74 TBR.sh _nrsc_ds clm_std^nl_urb_br 20030101:1800 1x1_brazil navy -5+-5 arb_ic ..........FAIL! rc= 11
+006 erAZ3 TER.sh _sc_do clm_irrig 20020401:3600 10x15 USGS -3+-7 cold ...........................FAIL! rc= 13
+007 brAZ3 TBR.sh _sc_do clm_irrig 20020401:3600 10x15 USGS -5+-5 cold ...........................FAIL! rc= 11
+011 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+012 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+013 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+016 erJ74 TER.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:3600 1x1_tropicAtl test -10+-10 arb_ic ..FAIL! rc= 13
+017 brJ74 TBR.sh 4p_nrcasasc_ds clm_std^nl_urb_br 10001230:3600 1x1_tropicAtl test -3+-3 arb_ic .FAIL! rc= 11
+020 erK74 TER.sh 17p_cndvsc_s clm_std 19971231:1800 1x1_brazil navy -334+-336 arb_ic ............FAIL! rc= 13
+021 brK74 TBR.sh 17p_cndvsc_s clm_std 19971231:1800 1x1_brazil navy -334+-336 arb_ic ............FAIL! rc= 11
+024 erVx3 TER.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -4+-6 aFAIL! rc= 13
+025 brVx3 TBR.sh _mec10sc_do clm_transient_glcMEC_rcp4.5^nl_urb_br 20331231:1800 48x96 gx3v7@1850-21FAIL! rc= 11
+028 erHQ4 TER.sh _nrcnsc_ds clm_drydep 20000214:1800 1x1_brazil navy@2000 -3+-7 cold ............FAIL! rc= 13
+029 brHQ4 TBR.sh _nrcnsc_ds clm_drydep 20000214:1800 1x1_brazil navy@2000 -5+-5 cold ............FAIL! rc= 11
+032 erV63 TER.sh _mec10sc_do clm_glcmec 19980115:1800 1.9x2.5 gx1v6 10+38 arb_ic ................FAIL! rc= 13
+ jaguarpf/CESM testing: All PASS including comparision tests except...
+FAIL PST.f10_f10.I20TRCN.jaguarpf
+ edinburgh/lf95 interactive testing:
+002 erA74 TER.sh _nrsc_ds clm_std^nl_urb 20030101:1800 1x1_brazil navy -5+-5 arb_ic .............FAIL! rc= 13
+003 brA74 TBR.sh _nrsc_ds clm_std^nl_urb_br 20030101:1800 1x1_brazil navy -5+-5 arb_ic ..........FAIL! rc= 11
+004 blA74 TBL.sh _nrsc_ds clm_std^nl_urb 20030101:1800 1x1_brazil navy -10 arb_ic ...............FAIL! rc= 7
+006 blAL4 TBL.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -10 cold ..................FAIL! rc= 7
+008 erCA4 TER.sh _nrsc_ds clm_std^nl_urb 20021001:3600 1x1_camdenNJ navy -45+-45 arb_ic .........FAIL! rc= 13
+009 brCA4 TBR.sh _nrsc_ds clm_std^nl_urb_br 20021001:3600 1x1_camdenNJ navy -10+-10 arb_ic ......FAIL! rc= 11
+010 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:3600 1x1_camdenNJ navy -90 arb_ic .............FAIL! rc= 7
+012 erOC4 TER.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:3600 1x1_vancouverCAN navy 115+115 arb_ic FAIL! rc= 13
+013 brOC4 TBR.sh _nrvansc_ds clm_urb1pt^nl_urb_br 19920812:3600 1x1_vancouverCAN navy 72+72 arb_ic FAIL! rc= 11
+014 blOC4 TBL.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:3600 1x1_vancouverCAN navy 331 arb_ic ...FAIL! rc= 5
+016 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:3600 1x1_mexicocityMEX navy 58+100 arb_ic FAIL! rc= 13
+017 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:3600 1x1_mexicocityMEX navy 72+72 arb_ic FAIL! rc= 11
+018 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:3600 1x1_mexicocityMEX navy 158 arb_ic ..FAIL! rc= 5
+020 erVx3 TER.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -4+-6 aFAIL! rc= 13
+021 brVx3 TBR.sh _mec10sc_do clm_transient_glcMEC_rcp4.5^nl_urb_br 20331231:1800 48x96 gx3v7@1850-21FAIL! rc= 11
+022 blVx3 TBL.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -10 arbFAIL! rc= 7
+024 erL74 TER.sh _nrsc_s clm_std^nl_urb 20020101:1800 1x1_brazil navy -5+-5 arb_ic ..............FAIL! rc= 13
+025 brL74 TBR.sh _nrsc_s clm_std^nl_urb_br 20020101:1800 1x1_brazil navy -10+-10 arb_ic .........FAIL! rc= 11
+026 blL74 TBL.sh _nrsc_s clm_std^nl_urb 20020101:1800 1x1_brazil navy -10 arb_ic ................FAIL! rc= 7
+ edinburgh/CESM testing: All PASS including comparision tests
+ yong/intel testing:
+011 smD94 TSM.sh _persc_ds clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 2
+012 erD94 TER.sh _persc_ds clm_per^nl_per 20021231:1200 4x5 gx3v7 72+72 cold ....................FAIL! rc= 5
+019 erH43 TER.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 10+38 cold FAIL! rc= 13
+020 brH43 TBR.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 72+72 cold FAIL! rc= 11
+022 smV24 TSM.sh _mec10sc_ds clm_glcmec 19980115:1800 48x96 gx3v7 96 arb_ic .....................FAIL! rc= 10
+023 erV24 TER.sh _mec10sc_ds clm_glcmec 19980115:1800 48x96 gx3v7 10+38 arb_ic ..................FAIL! rc= 5
+024 brV24 TBR.sh _mec10sc_ds clm_glcmec^nl_urb_br 19980115:1800 48x96 gx3v7 72+72 arb_ic ........FAIL! rc= 5
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_24
+
+Changes answers relative to baseline: answers bit-for-bit, but SNOWLIQ/SNOWICE on history
+ files change.
+
+===============================================================
+===============================================================
+Tag name: clm4_0_24
+Originator(s): erik (Erik Kluzek)
+Date: Wed Feb 9 13:20:39 MST 2011
+One-line Summary: Fix mksurfdata and add ability to override soil_fmax
+
+Purpose of changes:
+
+Fix mksurfdata for urban. Add soil_fmx to mksurfdata. Add attributes to suface datasets
+that tell you the special namelist settings (such as all_urban, soil_, pft_). Add -irrig
+as option to mksurfdata.pl. Update datm with new datasets for urbanc_alpha. Add new frac
+dataset for urbanc_alpha. Update documentation to cesm1_0_rel_09_clm4_0_14 tag. Change
+test_driver from jaguar to jaguarpf. Fix bug in build-namelist creating namelist
+with clm_usr_name option.
+
+Bugs fixed (include bugzilla ID):
+ 1281 (bug in mksurfdata for urban_only case)
+ 1280 (improve modularity of mksurfdata) [partial]
+ 1276 (urbanc_alpha site does not work) [partial]
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1165 (Restart trouble for scaled harvest test on mirage)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1276 (urbanc_alpha site does not work)
+ 1279 (Latest version of PTCLM requires python2.5)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1283 (CLM with glacier-MEC fails running on intel)
+ 1298 (Can NOT turn RTM off in CLM_CONFIG_OPTS)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1304 (bug in interpinic *_var)
+ 1306 (mksoifmaxInit is NOT called)
+ 1305 (dvolrdt is documented with the wrong units)
+ 1318 (interpinic has trouble with new restart files)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ Update frac and domain file for urbanc_alpha site
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ scripts, drv and csm_share to cesm1_0_beta15 versions
+
+ scripts to scripts4_110204
+ drv to drvseq3_1_48
+ csm_share to share3_110201
+ datm to datm8_110204
+
+List all files eliminated:
+
+ D models/lnd/clm/tools/mksurfdata/mkfmaxMod.F90 --- Put inside of mksoilMod.F90
+
+ D models/lnd/clm/test/system/tests_pretag_jaguar ------- rename to jaguarpf
+ D models/lnd/clm/test/system/tests_pretag_jaguar_nompi - rename to jaguarpf
+
+>>>>>>>>>>>> Remove files that were no longer used
+>>>>>>>>>>>> (they are already in mksoilMod or mkpftMod)
+ D models/lnd/clm/tools/mksurfdata/mkorganic.F90
+ D models/lnd/clm/tools/mksurfdata/mkrank.F90
+ D models/lnd/clm/tools/mksurfdata/mkirrig.F90
+ D models/lnd/clm/tools/mksurfdata/mksoicolMod.F90
+
+List all files added and what they do:
+
+>>>>>>>>>>>> Add an irrigation and urban test for mksurfdata
+ A models/lnd/clm/test/system/nl_files/mksrfdt_10x15_irr_1850
+ A models/lnd/clm/test/system/nl_files/mksrfdt_1x1_vancouverCAN_2000
+
+ A models/lnd/clm/test/system/tests_pretag_jaguarpf ------- rename from jaguar
+ A models/lnd/clm/test/system/tests_pretag_jaguarpf_nompi - rename from jaguar
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>> Add all_urban and irrigation mksurfdata tests
+>>>>>>>>>>>> Update jaguar to jaguarpf
+ M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+ M models/lnd/clm/test/system/tests_posttag_yong
+ M models/lnd/clm/test/system/tests_pretag_jaguar_nompi
+ M models/lnd/clm/test/system/input_tests_master
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+ M models/lnd/clm/test/system/TCBtools.sh ---- Remove copy of *.h files
+ M models/lnd/clm/test/system/test_driver.sh - change jaguar to jaguarpf, update
+ modules to agree with scripts
+ M models/lnd/clm/test/system/CLM_runcmnd.sh - change jaguar to jaguarpf
+
+>>>>>>>>>>>> Add soil_fmax option and soil_fmx, soil_col and irrig option to
+>>>>>>>>>>>> mksurfdata.pl. Add attributes to file for override cases.
+>>>>>>>>>>>> Put mkfmax inside of mksoilMod, add mksoilAtt and mkpftAtt methods.
+ M models/lnd/clm/tools/mksurfdata/Srcfiles -------- Remove unused files
+ M models/lnd/clm/tools/mksurfdata/mksoilMod.F90 --- Move mkfmax inside here
+ add mksoifmaxInit, mkfmax, and mksoilAtt interfaces, add soil_fmax as
+ an override setting
+ M models/lnd/clm/tools/mksurfdata/mkpftMod.F90 ---- Fix bug in if statement
+ (needed to also ask if .not. zerod_out). Create mkpftAtt interface, move
+ settings from mkfileMod.F90 to there.
+ M models/lnd/clm/tools/mksurfdata/mkfileMod.F90 --- Move soil and pft specific
+ declarations to either mksoilAtt or mkpftAt interfaces
+ if all_urban is set add all_urban=TRUE attribute to file
+ M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 ---- Add soil_fmax to namelist
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl --- Add ability to set soil_col,
+ soil_fmax and irrig on command line
+ bring irrigation, setting of numpft and query of lai file from crop branch
+
+>>>>>>>>>>>> Update urbanc_alpha domain/frac files
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml -------- Add mksrf_flai
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml -- Set
+ urbanc_alpha default mask to test
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml ----- urbanc_alpha
+ domain file
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ------ urbanc_alpha
+ frac file
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml Add in
+ mksrf_flai file
+
+>>>>>>>>>>>> Update to cesm1_0_rel_09_clm4_0_14 documentation (includes info on new bugs)
+ M models/lnd/clm/doc/UsersGuide/single_point.xml
+ M models/lnd/clm/doc/UsersGuide/tools.xml
+ M models/lnd/clm/doc/KnownBugs
+
+Summary of testing:
+
+ bluefire interactive testing:
+009 blHS3 TBL.sh _nrcnsc_do clm_usrdat 20030101:1800 13x12pt_f19_alaskaUSA gx1v6 -6 arb_ic ......FAIL! rc= 5
+ jaguarpf interactive testing:
+011 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+012 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+013 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+014 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+026 blVx3 TBL.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -10 arbFAIL! rc= 5
+034 blV63 TBL.sh _mec10sc_do clm_glcmec 19980115:1800 1.9x2.5 gx1v6 48 arb_ic ...................FAIL! rc= 7
+ edinburgh/lf95 interactive testing:
+022 blVx3 TBL.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -10 arbFAIL! rc= 5
+ mirage/intel interactive testing: All PASS except...
+017 erVx3 TER.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -4+-6 aFAIL! rc= 5
+018 brVx3 TBR.sh _mec10sc_do clm_transient_glcMEC_rcp4.5^nl_urb_br 20331231:1800 48x96 gx3v7@1850-21FAIL! rc= 5
+019 blVx3 TBL.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -10 arbFAIL! rc= 4
+021 erR53 TER.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:1800 10x15 USGS@1850 10+38 cold .........FAIL! rc= 13
+022 brR53 TBR.sh 17p_cnc13sc_do clm_std^nl_urb_br 20020115:1800 10x15 USGS@1850 72+72 cold ......FAIL! rc= 11
+024 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+025 erG53 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 10+38 arb_ic ...........FAIL! rc= 5
+026 brG53 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+027 blG53 TBL.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+029 erH43 TER.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 10+38 cold FAIL! rc= 13
+030 brH43 TBR.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 72+72 cold FAIL! rc= 11
+ yong/intel interactive testing:
+011 smD94 TSM.sh _persc_ds clm_per^nl_per 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 10
+012 erD94 TER.sh _persc_ds clm_per^nl_per 20021231:1200 4x5 gx3v7 72+72 cold ....................FAIL! rc= 5
+019 erH43 TER.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 10+38 cold FAIL! rc= 13
+020 brH43 TBR.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 72+72 cold FAIL! rc= 11
+022 smV24 TSM.sh _mec10sc_ds clm_glcmec 19980115:1800 48x96 gx3v7 96 arb_ic .....................FAIL! rc= 10
+023 erV24 TER.sh _mec10sc_ds clm_glcmec 19980115:1800 48x96 gx3v7 10+38 arb_ic ..................FAIL! rc= 5
+024 brV24 TBR.sh _mec10sc_ds clm_glcmec^nl_urb_br 19980115:1800 48x96 gx3v7 72+72 arb_ic ........FAIL! rc= 5
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_23
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_23
+Originator(s): erik (Erik Kluzek)
+Date: Thu Feb 3 13:42:17 MST 2011
+One-line Summary: Add in new glacier-MEC use-cases
+
+Purpose of changes:
+
+Add in new datasets and use-cases for glc_mec to support glc_nec=10 for 1850, 2000,
+1850-2000, and 1850-2100 for all 4 rcp's. Standardize naming convention for use-cases.
+Use scripts branch that has new compsets in it that access the new use-cases. Make sure
+ncdpio is used for all I/O. Work with PTCLM a bit, and PTCLM testing. Change precedence
+for build-namelist so that use-case is lower after user_nl_clm.
+
+Bugs fixed (include bugzilla ID):
+ 1273 (fix pts_mode problem on jaguar)
+ 1256 (fix PTCLM testcases.csh on jaguar to use netcdf/3)
+ 1254 (PTCLM add .nc and date to pft-physiology file copy)
+ 1250 (add scratchroot in PTCLM for generic machines)
+ 1247 (some changes in ncdio_pio) [partial]
+ 1224 (Fix -aerdepgrid/ndepgrid options in PTCLM.py)
+ 1168 (Change precedence so user_nl_clm used over use-case)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1165 (Restart trouble for scaled harvest test on mirage)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1276 (urbanc_alpha site does not work)
+ 1279 (Latest version of PTCLM requires python2.5)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1298 (Can NOT turn RTM off in CLM_CONFIG_OPTS)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1304 (bug in interpinic *_var)
+ 1306 (mksoifmaxInit is NOT called)
+ 1305 (dvolrdt is documented with the wrong units)
+ 1318 (interpinic has trouble with new restart files)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: Change order of precedence so that
+ use_case is AFTER -namelist -infile and clm_usr_name options.
+ Thus values in your user_nl_clm file will be used instead of what's in
+ the use_case.
+
+ New precedence is...
+ 1. values set on the command-line using the -namelist option,
+ 2. values read from the file specified by -infile,
+ 3. datasets from the -clm_usr_name option,
+ 4. values set from a use-case scenario, e.g., -use_case
+ 5. values from the namelist defaults file.
+List any changes to the defaults for the boundary datasets:
+ New datasets for glc_nec="10"
+ surfdata for 1850@(f09,f19,T31)
+ pftdyn for 1850-2000@(f09,f19,T31)
+ pftdyn for 1850-2100@(f09,f19,T31) rcp (2.6,4.5,6,8.5)
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, datm, cism
+
+ scripts to glccsbr01_scripts4_110111
+ datm to datm8_110124
+ cism to cism1_110125
+
+List all files eliminated:
+
+ Rename use-cases to versions with an _pd ending or 2000_*_control form
+
+ models/lnd/clm/bld/namelist_files/use_cases/...
+ D .../use_cases/stdurbpt.xml ----- rename to stdurbpt_pd
+ D .../use_cases/glacier_mec.xml -- rename to 2000_glacierMEC_control
+ D .../use_cases/pergro.xml ------- rename to pergro_pd
+ D .../use_cases/pergro0.xml ------ rename to pergro0_pd
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>> Add tests for all new glacier-MEC use-cases
+ A models/lnd/clm/test/system/nl_files/clm_glcmec
+ A models/lnd/clm/test/system/nl_files/clm_transient_glcMEC_rcp2.6
+ A models/lnd/clm/test/system/nl_files/clm_transient_glcMEC_rcp4.5
+ A models/lnd/clm/test/system/nl_files/clm_transient_glcMEC_rcp8.5
+ A models/lnd/clm/test/system/nl_files/clm_transient_glcMEC_rcp6
+
+ A models/lnd/clm/test/system/nl_files/nl_per - PERGRO testing namelist
+
+>>>>>>>>>>>>>>
+ models/lnd/clm/bld/namelist_files/use_cases/...
+ A .../use_cases/pergro_pd.xml -------------- Renamed from pergro
+ A .../use_cases/2000_glacierMEC_control.xml Copy of glacier_mec_pd
+ A .../use_cases/stdurbpt_pd.xml ------------ Renamed from stdurbpt
+ A .../use_cases/pergro0_pd.xml ------------- Renamed from pergro0
+ A .../use_cases/README --------------------- Add README file to describe
+ naming convention for use_cases
+
+>>>>>>>>>>>>>> Add new glacier_MEC use_cases
+ models/lnd/clm/bld/namelist_files/use_cases/...
+ A .../use_cases/1850_glacierMEC_control.xml
+ A .../use_cases/20thC_glacierMEC_transient.xml
+ A .../use_cases/1850-2100_rcp6_glacierMEC_transient.xml
+ A .../use_cases/1850-2100_rcp2.6_glacierMEC_transient.xml
+ A .../use_cases/1850-2100_rcp4.5_glacierMEC_transient.xml
+ A .../use_cases/1850-2100_rcp8.5_glacierMEC_transient.xml
+ A .../use_cases/glacierMEC_pd.xml --- renamed from glacier_mec
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>> Add some new glacierMEC use-case tests
+ M models/lnd/clm/test/system/README.testnames ------------- Add n,w,x,y glcMEC resolutions
+ M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+ M models/lnd/clm/test/system/tests_pretag_bluefire
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+ M models/lnd/clm/test/system/tests_posttag_kraken
+ M models/lnd/clm/test/system/tests_posttag_yong
+ M models/lnd/clm/test/system/tests_pretag_jaguar_nompi
+ M models/lnd/clm/test/system/tests_posttag_mirage
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/tests_posttag_intrepid_nompi
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+
+>>>>>>>>>>>>>> Get glcmec branch testing to work and pergro testing working with
+>>>>>>>>>>>>>> build-namelist precedence change
+ M models/lnd/clm/test/system/TBR.sh ------------- Match history files
+ NOT restart-history files
+ M models/lnd/clm/test/system/nl_files/clm_per0 -- Change use-case name
+ M models/lnd/clm/test/system/nl_files/nl_urb_br - Add hist_fincl2 to remove
+ any secondardy history files from use-case
+ M models/lnd/clm/test/system/nl_files/clm_per --- Change use-case name
+ M models/lnd/clm/test/system/input_tests_master - Add new tests
+ M models/lnd/clm/test/system/TSM.sh ------------- Make restart file touched
+ with .nc extension, remove bit about deleting clm.i files
+
+>>>>>>>>>>>>>> Use mksrf_glacier files from XML database, add glc_nec to mksurfdata.pl,
+>>>>>>>>>>>>>> allow glc_nec=0, and don't write out glcmec fields if glc_nec=0.
+ M models/lnd/clm/tools/mksurfdata/mkfileMod.F90 --- Don't define glc_nec
+ fields if nglcec == 0.
+ M models/lnd/clm/tools/mksurfdata/mkglcmecMod.F90 - Set nglcec=0 by default,
+ add ability to handle nglcec=0
+ M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 ---- Don't write out or call
+ glc-mec stuff if nglcec == 0.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl --- Add ability to set glc_nec
+ get mksrf_glacier file from XML database
+
+>>>>>>>>>>>>>> Add in new glc_nec=10 datasets, change precedence order in
+>>>>>>>>>>>>>> build-namelist so use_case is AFTER -namelist/-infile/-clm_usr_name.
+>>>>>>>>>>>>>> Add mksrf_glacier files to XML database
+ M models/lnd/clm/bld/listDefaultNamelist.pl - Make faster and add settings
+ for glc_nec and glc_grid, also add loop over sim_year_range
+ M models/lnd/clm/bld/build-namelist --------- Change precedence order so
+ that use-cases are after namelist and infile (thus user_nl_clm files
+ are used in place of the use-case. Check that the use-cases follow
+ a strict naming convention (ensures will work with PTCLM.py).
+ This is the new order of precedence
++ 1. values set on the command-line using the -namelist option,
++ 2. values read from the file specified by -infile,
++ 3. datasets from the -clm_usr_name option,
++ 4. values set from a use-case scenario, e.g., -use_case
++ 5. values from the namelist defaults file.
+
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --------- Add
+ mksrf_glacier file for mksurfdata.pl to XML database
+ M models/lnd/clm/bld/namelist_files/datm-build-namelist ------------- Change
+ order of precedence so that use_case is after infile and namelist
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ------- New glc datasets
+ surfdata for 1850@(f09,f19,T31)
+ pftdyn for 1850-2000@(f09,f19,T31)
+ pftdyn for 1850-2100@(f09,f19,T31) rcp (2.6,4.5,6,8.5)
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml - Add
+ mksrf_glacier datasets for glc_nec=0 (previous version), glc_nec=3,5,10
+ use older glacier dataset that Bill Lipscomb says has better glacier
+ representation over greenland
+>>>>>>>>>>>>>> Remove direct calls to pio -- use ncdio_pio module for all NetCDF
+>>>>>>>>>>>>>> read/write/define/query. Write out at initialization if there is no
+>>>>>>>>>>>>>> land and won't be running CLM.
+ M models/lnd/clm/src/main/clm_initializeMod.F90 - Set subname and write out
+ if no land exists and clm will NOT be run
+ M models/lnd/clm/src/main/iniTimeConst.F90 ------ Use ncd_io to read in mxsoil_color
+ (remove direct calls to pio)
+ M models/lnd/clm/src/main/histFileMod.F90 ------- Use ncd_io to read/write
+ everything (remove direct calls to pio). Add max_nFields function.
+ M models/lnd/clm/src/main/restFileMod.F90 ------- Change use of PIO_GLOBAL
+ to NCD_GLOBAL
+ M models/lnd/clm/src/main/ncdio_pio.F90 --------- Remove making pio interfaces
+ public, add new interfaces to ncd_io global, add dimexist as optional
+ argument to ncd_inqdid, and name as optional argument to ncd_inqdlen
+ change ncd_io interfaces that could NOT need to call scam_field_offsets
+ so that they don't. Initialize data_offset and pfts to bigint NOT nan.
+ New interfaces:
++ module procedure ncd_io_char_var1_nf
++ module procedure ncd_io_char_var3_nf
++ module procedure ncd_io_char_varn_strt_nf
+ M models/lnd/clm/src/main/surfrdMod.F90 --------- Use ncd_inqdid and ncd_inqvid
+ instead of pio interfaces directly.
+
+Summary of testing:
+
+ bluefire: All PASS except...
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+037 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ..............FAIL! rc= 10
+057 blV61 TBL.sh _mec10sc_dh clm_glcmec 19980115:1800 1.9x2.5 gx1v6 48 arb_ic ...................FAIL! rc= 7
+061 blVn1 TBL.sh _mec10sc_dh clm_transient_glcMEC_rcp8.5 20331231:1800 1.9x2.5 gx1v6@1850-2100 48 arFAIL! rc= 5
+063 smLI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+064 erLI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+065 brLI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+066 blLI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+
+glcmec TBL tests fail since they didn't exist in previous version
+
+ bluefire interactive testing:
+006 smHS3 TSM.sh _nrcnsc_do clm_usrdat 20030101:1800 13x12pt_f19_alaskaUSA gx1v6 -6 arb_ic ......FAIL! rc= 8
+007 erHS3 TER.sh _nrcnsc_do clm_usrdat 20030101:1800 13x12pt_f19_alaskaUSA gx1v6 -3+-3 arb_ic ...FAIL! rc= 5
+008 brHS3 TBR.sh _nrcnsc_do clm_usrdat 20030101:1800 13x12pt_f19_alaskaUSA gx1v6 -3+-3 arb_ic ...FAIL! rc= 5
+009 blHS3 TBL.sh _nrcnsc_do clm_usrdat 20030101:1800 13x12pt_f19_alaskaUSA gx1v6 -6 arb_ic ......FAIL! rc= 4
+044 blV63 TBL.sh _mec10sc_do clm_glcmec 19980115:1800 1.9x2.5 gx1v6 48 arb_ic ...................FAIL! rc= 7
+060 sm974 TSMscript_tools.sh mksurfdata mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds ....FAIL! rc= 6
+ bluefire/CESM testing: All PASS except... (new compare tests fail since didn't exist before)
+BFAIL ERI.f19_g16.IG1850.bluefire.compare.clm4_0_22
+BFAIL ERS_D.T31_g37.IGRCP26CN.bluefire.compare.clm4_0_22
+BFAIL PST.f10_f10.I20TRCN.bluefire.compare.clm4_0_22
+BFAIL PET_PT.f10_f10.I20TRCN.bluefire.compare.clm4_0_22
+BFAIL SMS.f10_f10.IRCP45CN.bluefire.compare.clm4_0_22
+
+PEND ERS_D.f45_g37.I.bluefire.GC.003008
+PEND ERP.T31_g37.IGRCP60CN.bluefire.GC.003008
+PEND ERH_D.f10_f10.I1850CN.bluefire.GC.003008
+PEND ERS_D.f19_g16.IRCP85CN.bluefire.GC.003008
+
+ bluefire/CESM Extra testing:
+PASS ERI.f19_g16.IG1850.bluefire
+PASS ERI.f19_g16.IG1850CN.bluefire
+PASS ERS.T31_g37.IGRCP45CN.bluefire
+PASS ERS.T31_g37.IGRCP85CN.bluefire
+
+PASS ERS_D.T31_g37.IG.bluefire
+PASS ERS_D.f19_g16.IGCN.bluefire
+
+ bluefire/CESM Extra testing for coupled with CAM and CAM/POP:
+PASS ERI.f19_g16.BGCN.bluefire
+PASS SMS_D.f19_g16.BG1850CN.bluefire
+PASS ERP.f09_g16.BG20TRCN.bluefire
+PASS ERS.T31_g37.BGRCP26CN.bluefire
+RUN ERS_D.T31_g37.BGRCP45CN.bluefire.111336 --- takes too long
+PASS ERS.T31_g37.BGRCP60CN.bluefire
+RUN ERS_D.T31_g37.BGRCP85CN.bluefire.111336 --- takes too long
+FAIL SMS.f19_f19.EGCN.bluefire ----------------- seg-fault
+FAIL SMS.T31_T31.EG1850CN.bluefire ------------- seg-fault
+PASS ERI.f09_f09.FGCN.bluefire
+PASS SMS.f19_f19.FG1850CN.bluefire
+PASS SMS.T31_T31.FG20TRCN.bluefire
+FAIL SMS.T31_g37.TG.bluefire
+
+ bluefire/PTCLM testing:
+PTCLM.631306_1x1_mexicocityMEX_ICN.PTCLM PASS
+PTCLM.631306_1x1_mexicocityMEX_I.PTCLM PASS
+PTCLM.631306_1x1_mexicocityMEX_I_QIAN.PTCLM PASS
+PTCLM.631306_US-Ha1_I_1850.PTCLM PASS
+PTCLM.631306_US-Ha1_I20TR.PTCLM PASS
+PTCLM.631306_US-Ha1_I20TRCN.PTCLM PASS
+PTCLM.631306_US-Ha1_ICN.PTCLM PASS
+PTCLM.631306_US-Ha1_I1850CN.PTCLM PASS
+PTCLM.631306_US-Ha1_IRCP85CN.PTCLM PASS
+PTCLM.631306_US-Ha1_I.PTCLM PASS
+PTCLM.631306_US-Ha1_I_QIAN.PTCLM PASS
+PTCLM.631306_US-Ha1_I.PTCLM PASS
+PTCLM.631306_US-UMB_I.PTCLM PASS
+PTCLM.631306_US-UMB_I_QIAN.PTCLM PASS
+PTCLM.631306_US-UMB_I.PTCLM PASS
+US-Ha1_ICN_ad_spinup.PTCLM PASS
+ jaguar interactive testing: All PASS except...
+011 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+012 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+013 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+014 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+026 blVx3 TBL.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -10 arbFAIL! rc= 5
+ jaguar/PTCLM testing:
+PTCLM.4224_1x1_mexicocityMEX_ICN.PTCLM PASS
+PTCLM.4224_1x1_mexicocityMEX_I.PTCLM PASS
+PTCLM.4224_1x1_mexicocityMEX_I_QIAN.PTCLM PASS
+PTCLM.4224_US-Ha1_I_1850.PTCLM FAIL 0
+PTCLM.4224_US-Ha1_I20TR.PTCLM FAIL 0
+PTCLM.4224_US-Ha1_I20TRCN.PTCLM FAIL 0
+PTCLM.4224_US-Ha1_ICN.PTCLM FAIL 0
+PTCLM.4224_US-Ha1_I1850CN.PTCLM FAIL 0
+PTCLM.4224_US-Ha1_IRCP85CN.PTCLM FAIL 0
+PTCLM.4224_US-Ha1_I.PTCLM FAIL 0
+PTCLM.4224_US-Ha1_I_QIAN.PTCLM FAIL 0
+PTCLM.4224_US-Ha1_I.PTCLM FAIL 0
+PTCLM.4224_US-UMB_I.PTCLM FAIL 0
+PTCLM.4224_US-UMB_I_QIAN.PTCLM FAIL 0
+PTCLM.4224_US-UMB_I.PTCLM FAIL 0
+US-Ha1_ICN_ad_spinup.PTCLM PASS
+ edinburgh/lf95 interactive testing: All PASS up to...
+022 blVx3 TBL.sh _mec10sc_do clm_transient_glcMEC_rcp4.5 20331231:1800 48x96 gx3v7@1850-2100 -10 arbFAIL! rc= 5
+ edinburgh/PTCLM testing: Fails because Python is too OLD (2.4 when needs 2.5)
+ yong/intel/PTCLM testing: Following PASS...
+PTCLM.4900_1x1_mexicocityMEX_ICN.PTCLM PASS
+PTCLM.4900_1x1_mexicocityMEX_I.PTCLM PASS
+PTCLM.4900_1x1_mexicocityMEX_I_QIAN.PTCLM PASS
+PTCLM.4900_US-Ha1_I_1850.PTCLM PASS
+PTCLM.4900_US-Ha1_I20TR.PTCLM PASS
+PTCLM.4900_US-Ha1_I20TRCN.PTCLM PASS
+PTCLM.4900_US-Ha1_ICN.PTCLM PASS
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_22
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_22
+Originator(s): erik (Erik Kluzek)
+Date: Thu Jan 20 13:17:56 MST 2011
+One-line Summary: Move coupler field indicies to clm, move cpl_* directories up a level, add the cpl_share directory
+
+Purpose of changes:
+
+Move cpl_* directories up a level, add cpl_shr directory. Update driver, move coupler
+field indicies to clm, and allow fields to be passed in driver with just names added to
+namelist. Make is_restart() public in clm_time_manager.F90. Fix PTS_MODE. Don't pass
+Sl_landfrac to driver in run-phase.
+
+Bugs fixed (include bugzilla ID):
+ 1271 (Problem in PTS_MODE with clm)
+ 1270 (Make is_restart public in clm_time_manager.F90)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1165 (Restart trouble for scaled harvest test on mirage)
+ 1224 (Fix -aerdepgrid/ndepgrid options in PTCLM.py)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1273 (fix pts_mode problem on jaguar)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1298 (Can NOT turn RTM off in CLM_CONFIG_OPTS)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1304 (bug in interpinic *_var)
+ 1306 (mksoifmaxInit is NOT called)
+ 1305 (dvolrdt is documented with the wrong units)
+ 1318 (interpinic has trouble with new restart files)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1110 (dtlimit error in datm8 with partial year forcing)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system: Change Filepath
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, mvertens (most code changes originate from mvertens)
+ I made some tweaks after the review and added protex header documentation to the
+ new clm_cpl_indices file.
+
+List any svn externals directories updated (csm_share, mct, etc.): datm, cism
+ datm to datm8_110118
+ cism to cism1_100913
+
+List all files eliminated:
+
+>>>>>>>>> Move to directories up a level
+ D models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90
+ D models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90
+ D models/lnd/clm/src/main/cpl_esmf/lnd_comp_mct.F90
+
+List all files added and what they do:
+
+>>>>>>>>> Use this local version of indices rather than seq_indices_mod.F90
+ A models/lnd/clm/src/cpl_share/clm_cpl_indices.F90
+
+>>>>>>>>> Move to directories up a level, use local version of indices rather
+>>>>>>>>> than seq_* version and remove sending landfrac at run phase.
+ A models/lnd/clm/src/cpl_mct/lnd_comp_mct.F90
+ A models/lnd/clm/src/cpl_esmf/lnd_comp_esmf.F90
+ A models/lnd/clm/src/cpl_esmf/lnd_comp_mct.F90
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/doc/README -- Update directory info.
+
+>>>>>>>>> Change pts_mode test so that RTM is not turned on.
+ M models/lnd/clm/test/system/input_tests_master
+
+>>>>>>>>> Change Filepath
+ M models/lnd/clm/bld/configure
+
+>>>>>>>>> Make is_restart() method public
+ M models/lnd/clm/src/main/clm_time_manager.F90
+
+>>>>>>>>> Fix PTS_MODE.
+ M models/lnd/clm/src/main/pftvarcon.F90 ------- Pass posNOTonfile=.true. down
+ to ncd_io methods so won't check for lat/lon
+ M models/lnd/clm/src/main/ncdio_pio.F90 ------- Add posNOTonfile option to global
+ reads so that if set, won't try to find nearest lat/lon to PTS_MODE point
+ (for files that are global data NOT spatial).
+ M models/lnd/clm/src/biogeophys/SNICARMod.F90 - Pass posNOTonfile=.true. down
+ to ncd_io methods so won't check for lat/lon
+
+Summary of testing:
+
+ bluefire: All PASS except...
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+037 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ..............FAIL! rc= 10
+059 smLI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+060 erLI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+061 brLI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+062 blLI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+>>>>>>>> Test was changed to remove RTM
+025 blAK4 TBL.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 1.9x2.5 gx1v6 -10 cold ...............FAIL! rc= 5
+ bluefire/CESM testing: All PASS except...
+FAIL SMS_RLA.f45_f45.I.bluefire.compare_hist.clm4_0_20
+FAIL SMS_RLA.f45_f45.I.bluefire.compare.clm4_0_20
+FAIL SMS_RLB.f45_f45.I.bluefire.compare_hist.clm4_0_20
+FAIL SMS_RLB.f45_f45.I.bluefire.compare.clm4_0_20
+FAIL ERS_D.f45_g37.I.bluefire.compare_hist.clm4_0_20
+FAIL ERS_D.f45_g37.I.bluefire.compare.clm4_0_20
+FAIL PST.f45_g37.I1850.bluefire.compare.clm4_0_20
+FAIL PET_PT.f45_g37.I1850.bluefire.compare.clm4_0_20
+FAIL ERS_E.f19_g16.I1850.bluefire.compare.clm4_0_20
+FAIL ERS_E.f19_g16.I1850.bluefire.compare_hist.clm4_0_20
+FAIL ERI.f19_g16.IG.bluefire.compare.clm4_0_20
+FAIL ERB.f09_g16.I_1948-2004.bluefire.compare.clm4_0_20
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_20
+FAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm4_0_20
+FAIL PST.f10_f10.I8520CN.bluefire.compare.clm4_0_20
+FAIL PET_PT.f10_f10.I8520CN.bluefire.compare.clm4_0_20
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm4_0_20
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm4_0_20
+>>>>>>>> Compare tests fail because Sl_landfrac is missing on new case
+>>>>>>>> Everything else is identical
+ bluefire/PTCLM testing: All PASS
+ edinburgh/lf95 interactive testing: All PASS except...
+006 blAL4 TBL.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -10 cold ..................FAIL! rc= 5
+ yong/intel interactive testing:
+006 blAL4 TBL.sh _nrsc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -10 cold ..................FAIL! rc= 5
+011 smD94 TSM.sh _persc_ds clm_per^nl_urb 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 10
+012 erD94 TER.sh _persc_ds clm_per^nl_urb 20021231:1200 4x5 gx3v7 72+72 cold ....................FAIL! rc= 5
+013 blD94 TBL.sh _persc_ds clm_per^nl_urb 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 4
+019 erH43 TER.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 10+38 cold FAIL! rc= 13
+020 brH43 TBR.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 72+72 cold FAIL! rc= 11
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_21
+
+Changes answers relative to baseline: bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_21
+Originator(s): jedwards (Jim Edwards)
+Date: Wed Jan 12 14:50:45 MST 2011
+One-line Summary: Remove includes, finish PIO transition
+
+Purpose of changes:
+
+Code cleanup
+
+Remove misc.h/preproc.h, update SNICARMod to use ncdio_pio calls rather than NetCDF
+directly.
+
+Bugs fixed (include bugzilla ID):
+ 394 (misc.h and preproc.h NOT used at all anymore)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1165 (Restart trouble for scaled harvest test on mirage)
+ 1224 (Fix -aerdepgrid/ndepgrid options in PTCLM.py)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1271 (Problem in PTS_MODE with clm)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1298 (Can NOT turn RTM off in CLM_CONFIG_OPTS)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1304 (bug in interpinic *_var)
+ 1306 (mksoifmaxInit is NOT called)
+ 1305 (dvolrdt is documented with the wrong units)
+ 1318 (interpinic has trouble with new restart files)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1110 (dtlimit error when a full year isn't available)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system: Removed generation of files misc.h and preproc.h
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, Erik K
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: misc.h, preproc.h
+
+D models/lnd/clm/tools/mksurfdata/misc.h
+D models/lnd/clm/tools/mksurfdata/preproc.h
+D models/lnd/clm/tools/mkdatadomain/preproc.h
+D models/lnd/clm/tools/mkdatadomain/misc.h
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ models/lnd/clm/tools/mkgriddata/README
+ models/lnd/clm/tools/mksurfdata/README
+ models/lnd/clm/tools/mkdatadomain/README
+ models/lnd/clm/tools/README
+ models/lnd/clm/bld/configure
+ models/lnd/clm/bld/clm.cpl7.template
+ models/lnd/clm/src/biogeochem/CNCStateUpdate2Mod.F90
+ models/lnd/clm/src/biogeochem/CASAPhenologyMod.F90
+ models/lnd/clm/src/biogeochem/CNGapMortalityMod.F90
+ models/lnd/clm/src/biogeochem/CNC13StateUpdate2Mod.F90
+ models/lnd/clm/src/biogeochem/CNGRespMod.F90
+ models/lnd/clm/src/biogeochem/CNNStateUpdate1Mod.F90
+ models/lnd/clm/src/biogeochem/CNBalanceCheckMod.F90
+ models/lnd/clm/src/biogeochem/CNNStateUpdate3Mod.F90
+ models/lnd/clm/src/biogeochem/CNFireMod.F90
+ models/lnd/clm/src/biogeochem/CNPrecisionControlMod.F90
+ models/lnd/clm/src/biogeochem/CNSummaryMod.F90
+ models/lnd/clm/src/biogeochem/CNDVLightMod.F90
+ models/lnd/clm/src/biogeochem/CNPhenologyMod.F90
+ models/lnd/clm/src/biogeochem/CNCStateUpdate1Mod.F90
+ models/lnd/clm/src/biogeochem/CNCStateUpdate3Mod.F90
+ models/lnd/clm/src/biogeochem/CNC13StateUpdate1Mod.F90
+ models/lnd/clm/src/biogeochem/CNC13StateUpdate3Mod.F90
+ models/lnd/clm/src/biogeochem/CNDVEcosystemDynIniMod.F90
+ models/lnd/clm/src/biogeochem/CNAnnualUpdateMod.F90
+ models/lnd/clm/src/biogeochem/C13SummaryMod.F90
+ models/lnd/clm/src/biogeochem/CNNStateUpdate2Mod.F90
+ models/lnd/clm/src/biogeochem/CNNDynamicsMod.F90
+ models/lnd/clm/src/biogeochem/CNAllocationMod.F90
+ models/lnd/clm/src/biogeochem/CNC13FluxMod.F90
+ models/lnd/clm/src/biogeochem/CNEcosystemDynMod.F90
+ models/lnd/clm/src/biogeochem/CNSetValueMod.F90
+ models/lnd/clm/src/biogeochem/CNDVEstablishmentMod.F90
+ models/lnd/clm/src/main/organicFileMod.F90
+ models/lnd/clm/src/main/dynlandMod.F90
+ models/lnd/clm/src/main/accFldsMod.F90
+ models/lnd/clm/src/main/fileutils.F90
+ models/lnd/clm/src/main/pftdynMod.F90
+ models/lnd/clm/src/main/pft2colMod.F90
+ models/lnd/clm/src/main/restFileMod.F90
+ models/lnd/clm/src/main/clm_varsur.F90
+ models/lnd/clm/src/main/controlMod.F90
+ models/lnd/clm/src/main/initSurfAlbMod.F90
+ models/lnd/clm/src/main/filterMod.F90
+ models/lnd/clm/src/main/clm_varorb.F90
+ models/lnd/clm/src/main/initGridCellsMod.F90
+ models/lnd/clm/src/main/pftvarcon.F90
+ models/lnd/clm/src/main/spmdMod.F90
+ models/lnd/clm/src/main/domainMod.F90
+ models/lnd/clm/src/riverroute/RunoffMod.F90
+ models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90
+ models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90
+ models/lnd/clm/src/biogeophys/Biogeophysics1Mod.F90
+ models/lnd/clm/src/biogeophys/Biogeophysics2Mod.F90
+ models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90
+ models/lnd/clm/src/biogeophys/QSatMod.F90
+ models/lnd/clm/src/biogeophys/clm_driverInitMod.F90
+ models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90
+
+ models/lnd/clm/src/biogeophys/SNICARMod.F90
+ models/lnd/clm/src/main/ncdio_pio.F90
+
+ Removed reference to preproc.h and misc.h in all files. Converted snicarmod to use pio
+ and added support for a 3d non-decomposed real variable in ncdio.
+
+Summary of testing:
+
+ bluefire: All PASS except...
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+037 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ..............FAIL! rc= 10
+059 smLI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+060 erLI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+061 brLI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+062 blLI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+NOTE: pts_mode tests failed... (bug 1271)
+ bluefire/CESM testing: All PASS
+
+ jaguarpf: All pass except ...
+007 brB91 TBR.sh _scsnf_dh clm_std^nl_urb 20021230:1800 4x5 gx3v7 72+72 arb_ic ..................FAIL! rc= 10
+022 erH92 TER.sh 17p_cnsc_dm clm_ndepdyn^nl_cn_conly 20020101:1800 4x5 gx3v7@1850-2000 10+38 cold FAIL! rc= 13
+023 brH92 TBR.sh 17p_cnsc_dm clm_ndepdyn^nl_cn_conly 20020101:1800 4x5 gx3v7@1850-2000 72+72 cold FAIL! rc= 11
+038 smLI2 TSM.sh _sc_dm clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+039 erLI2 TER.sh _sc_dm clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+040 brLI2 TBR.sh _sc_dm clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+041 blLI2 TBL.sh _sc_dm clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+042 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 20020115:1800 10x15 USGS 96 arb_ic ....................FAIL! rc= 10
+043 erL58 TER.sh _sc_dh clm_std^nl_crcrop 20020115:1800 10x15 USGS 72+72 arb_ic .................FAIL! rc= 5
+044 brL58 TBR.sh _sc_dh clm_std^nl_crcrop 20020115:1800 10x15 USGS 72+72 arb_ic .................FAIL! rc= 5
+045 blL58 TBL.sh _sc_dh clm_std^nl_crcrop 20020115:1800 10x15 USGS 48 arb_ic ....................FAIL! rc= 4
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_19
+
+Changes answers relative to baseline: bit-for-bit
+
+===============================================================
+Tag name: clm4_0_20
+Originator(s): erik (Erik Kluzek)
+Date: Tue Jan 11 11:18:30 MST 2011
+One-line Summary: Update for ESMF metadata, update doc. from release branch,
+ bug fixes (doc of qflx_evap_tot, threading CNDV, aer/ndepregrid)
+
+Purpose of changes:
+
+Update externals, fix in datm speeds up single-point simulations, update for esmf
+metadata capability. Update documentation from Release branch (cesm1_0_rel07_clm4_0_14).
+Fix documentation of qflx_evap_tot. Fix ndepregrid/aerdepregrid scripts. Fix threading
+problem with CNDV.
+
+Bugs fixed (include bugzilla ID):
+ 1266 (Threading problem with CNDV)
+ 1265 (Fix ndep/aerdepregrid.ncl)
+ 1264 (Incorrect doc of qflx_evap_tot in the code)
+ 1258 (runinit_ibm.csh needs to be updated)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1165 (Restart trouble for scaled harvest test on mirage)
+ 1224 (Fix -aerdepgrid/ndepgrid options in PTCLM.py)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1281 (bug in mksurfdata for urban_only case)
+ 1282 (Trouble running to last CLM1PT atm time-step)
+ 1298 (Can NOT turn RTM off in CLM_CONFIG_OPTS)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1304 (bug in interpinic *_var)
+ 1306 (mksoifmaxInit is NOT called)
+ 1305 (dvolrdt is documented with the wrong units)
+ 1318 (interpinic has trouble with new restart files)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1110 (dtlimit error when a full year isn't available)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes:
+ Small section of clm_driver was moved to a OMP loop. This should
+ improve threading performance slightly.
+
+Code reviewed by: self, doc of qflx_evap_tot by Keith Oleson
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv,
+ stubs, datm, csm_share, timing
+
+ scripts to scripts4_110108
+ drv to drvseq3_1_47
+ sice to stubs1_2_03
+ socn to stubs1_2_03
+ sglc to stubs1_2_03
+ datm to datm8_110106
+ csm_share to share3_101231
+ timing to timing_101215
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>> Fix deposition regrid scripts so they will work (from crop04)
+ M models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl - Fix XML queries so will
+ work, using the datm_internal namelist now
+ M models/lnd/clm/tools/ncl_scripts/ndepregrid.ncl --- Fix XML queries so will
+ work, using stream_fldfilename_ndep in the ndepdyn_nml namelist.
+
+>>>>>>>>> Fix so will work (from rel07)
+ M models/lnd/clm/tools/interpinic/runinit_ibm.csh - Fix config options so will work
+ M models/lnd/clm/bld/config_files/config_definition.xsl ---- Remove extra empty rows
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xsl Fix missing ">"
+
+>>>>>>>>> Update documentation (from rel07)
+ M models/lnd/clm/doc/UsersGuide/single_point.xml - Change how supported single-point
+ cases are handled and add documentation on setting start/stop times
+ M models/lnd/clm/doc/UsersGuide/tools.xml -------- Add new options to mksurfdata.pl
+ add notes about bugs, add notes that aer/ndepregid is optional
+ M models/lnd/clm/doc/UsersGuide/preface.xml ------ Update what_is_new section
+ M models/lnd/clm/doc/UsersGuide/clm_ug.xml ------- Add more versions in quicklist
+ M models/lnd/clm/doc/UsersGuide/appendix.xml ----- Add note about runinit_ibm.csh
+ problem
+ M models/lnd/clm/doc/UsersGuide/custom.xml ------- Remove DATM_PRESAERO=none option,
+ remove hist_crtinic, and use_ndepstream namelist settings
+
+>>>>>>>>> Update documentation (from rel07)
+ M models/lnd/clm/doc/KnownLimitations - Add doc on dtlimit error
+ M models/lnd/clm/doc/KnownBugs -------- Add bug 1168, remove bug 498
+ M models/lnd/clm/doc/README ----------- Rework what's new
+ M models/lnd/clm/doc/index.shtml ------ Add link to KnownLimitations
+ M README ------------------------------ Rework what's new
+
+>>>>>>>>> Fix threading problem with CNDV, by adding an OMP loop in a section
+>>>>>>>>> in clm_driver that didn't have one, pass down beg/end c|g|p indices
+>>>>>>>>> as needed
+ M models/lnd/clm/src/biogeochem/CNSetValueMod.F90 - Pass in begc/endc, begp/endp
+ M models/lnd/clm/src/main/clm_initializeMod.F90 --- Add OMP loop over setFilters
+ M models/lnd/clm/src/main/pftdynMod.F90 ----------- Pass down beg/end indices as needed
+ pftdyn_wbal_init, pftdyn_cnbal, pftwt_interp,
+ M models/lnd/clm/src/main/filterMod.F90 ----------- Pass clump index down to setFilters
+ remove OMP from inside
+ M models/lnd/clm/src/main/clm_driver.F90 ---------- Add OMP loop around section that
+ wasn't inside an OMP loop
+
+>>>>>>>>> Fix the documentation of the qflx_evap_tot field
+ M models/lnd/clm/src/main/clmtype.F90 -------------------- Fix qflx_evap_tot doc
+ M models/lnd/clm/src/main/clm_atmlnd.F90 ----------------- Fix qflx_evap_tot doc
+ M models/lnd/clm/src/main/histFldsMod.F90 ---------------- Fix qflx_evap_tot doc
+ M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90 ------ Fix qflx_evap_tot doc
+ M models/lnd/clm/src/biogeophys/Biogeophysics1Mod.F90 ---- Fix qflx_evap_tot doc
+ M models/lnd/clm/src/biogeophys/Biogeophysics2Mod.F90 ---- Fix qflx_evap_tot doc
+ M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 -------- Fix qflx_evap_tot doc
+ M models/lnd/clm/src/biogeophys/BiogeophysicsLakeMod.F90 - Fix qflx_evap_tot doc
+ M models/lnd/clm/src/biogeophys/UrbanMod.F90 ------------- Fix qflx_evap_tot doc
+ M models/lnd/clm/src/biogeophys/HydrologyLakeMod.F90 ----- Fix qflx_evap_tot doc
+ M models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90 -- Fix qflx_evap_tot doc
+
+>>>>>>>>> Add component meta-data for ESMF
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90 - Add meta-data description
+ of CLM
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_mct.F90 -- Add more arguments to
+ lnd_register method
+
+Summary of testing:
+
+ bluefire: All PASS except...
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 4
+037 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ..............FAIL! rc= 10
+059 smLI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+060 erLI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+061 brLI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+062 blLI1 TBL.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 4
+ bluefire interactive testing: All PASS
+ bluefire/CESM testing: All PASS
+ bluefire/PTCLM testing: All PASS
+ jaguar interactive testing: All PASS except (up to 017 brJ74)...
+011 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic
+.............FAIL! rc= 10
+012 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic
+...........FAIL! rc= 5
+013 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+ up to....
+
+017 brJ74 TBR.sh 4p_nrcasasc_ds clm_std^nl_urb_br 10001230:3600 1x1_tropicAtl test -3+-3 arb_ic .PASS
+ edinburgh/lf95 interactive testing: All PASS except...
+005 smAL4 TSM.sh _sc_ds clm_std^nl_ptsmode 20030101:1800 10x15 USGS -10 cold ....................FAIL! rc= 10
+ edinburgh/PTCLM testing: All PASS up to ...
+myPTCLMtests_US-Ha1_I_1850.PTCLM FAIL 0
+ mirage,storm/ifort interactive testing: All PASS except...
+017 erR53 TER.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:1800 10x15 USGS@1850 10+38 cold .........FAIL! rc= 13
+018 brR53 TBR.sh 17p_cnc13sc_do clm_std^nl_urb_br 20020115:1800 10x15 USGS@1850 72+72 cold ......FAIL! rc= 11
+020 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+021 erG53 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 10+38 arb_ic ...........FAIL! rc= 5
+022 brG53 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+025 erH43 TER.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 10+38 cold FAIL! rc= 13
+026 brH43 TBR.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 72+72 cold FAIL! rc= 11
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_19
+
+Changes answers relative to baseline: bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_19
+Originator(s): erik (Erik Kluzek)
+Date: Wed Dec 8 22:20:30 MST 2010
+One-line Summary: Bring irrigation branch to the trunk
+
+Purpose of changes:
+
+Add option for simple code to redirect some riverflow to irrigate generic crops.
+Irrigation is turned on at 6AM, runs for 4 hours and keeps soil moisture to 0.7.
+Change corn and wheat indices to c3crop and irrigated generic c3 crop.
+Add QIRRIG as a history file output. Change pft-physiology and RTM flow files from
+ASCII to NetCDF. Single pft-physiology file can handle all cases (has extra FCUR value
+for CNDV), also has for new fields for crops that will come in later: corn, spring-wheat,
+winter-wheat and soybean. Add findat and fsurdat files for irrigation (f09, f19, f10, finidat
+only for f19). Split RTM run method into three and move subroutines around to where makes
+more sense. Fix a mksurfdata PFT override bug. Synchronize the Makefiles for the tools
+and add build for Darwin intel and PGI and remove Darwin XLF. Remove concurrent
+directives and UNICOSMP, CPP_VECTOR, NEC_SX CPP #ifdefs. Remove some #include
+misc.h/preproc.h statements. Switch pio_close for ncd_close calls. Replace some constants
+with parameters. Remove clm_comp layer and call clm_initialize and clm_driver directly.
+Change mk*.F90 subroutines in mksurfdata into modules, so that argument checking will
+happen at compile-time.
+
+Bugs fixed (include bugzilla ID):
+ 964 (Remove UNICOS #ifdef logic in clm)
+ 1238 (PST test fails)
+ 1249 (problem in mksurfdata for PFT override mode)
+ 1253 (mkglacier in mksurfdata has arguments in wrong order)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1165 (Restart trouble for scaled harvest test on mirage)
+ 1224 (Fix -aerdepgrid/ndepgrid options in PTCLM.py)
+ 1248 (PTCLM can only go to 2005)
+ 1251 (PTCLM testcases aborts in I_QIAN case)
+ 1258 (runinit_ibm.csh needs to be updated)
+ 1264 (Incorrect doc of qflx_evap_tot in the code)
+ 1265 (Fix ndep/aerdepregrid.ncl)
+ 1266 (Threading problem with CNDV)
+ 1298 (Can NOT turn RTM off in CLM_CONFIG_OPTS)
+ 1299 (interpinic does NOT work going from non glc_mec)
+ 1304 (bug in interpinic *_var)
+ 1306 (mksoifmaxInit is NOT called)
+ 1305 (dvolrdt is documented with the wrong units)
+ 1318 (interpinic has trouble with new restart files)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: std-test
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist:
+ Add -irrig and -rtm_res options to build-namelist
+
++ -irrig Seek surface datasets with irrigation turned on.
++ -rtm_res "resolution" Specify river transport model resolution.
+ (Still only have half-degree files in the XML database)
+
+ Add new history field:
+
+ QIRRIG water added through irrigation (mm/s)
+
+List any changes to the defaults for the boundary datasets: NetCDF pft-phys/RTM files
+ NetCDF PT-physiology file: pft-physiology.c101006.nc
+ finidat and surfdata files for irrigation (for 1.9x2.5@2000)
+ surfdata files for irrigation (for f09 and f10)
+ NetCDF River-direction file: clmi.IQirrcr_2000-01-01_1.9x2.5_gx1v6_c101115.nc
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, slevis, dlawren, wsacks, mvertens, swensosc
+ slevis/wsacks -- irrigation changes
+ dlawren -------- convert pft-physiology file to NetCDF
+ mvertens ------- high level restructuring
+ swensosc ------- convert RTM flow file to NetCDF
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts and csm_share
+
+ scripts to scripts4_101206
+ csm_share to csm_share3_101122
+
+List all files eliminated:
+
+D models/lnd/clm/src/main/inicFileMod.F90 --------- Move to inicPerpMod
+D models/lnd/clm/src/main/clm_comp.F90 ------------ Move to clm_initialize and
+ clm_driver
+D models/lnd/clm/src/main/scam_setlatlonidx.F90 --- Use shr_scam version
+D models/lnd/clm/src/main/snowdp2lev.F90 ---------- Move to mkarbinitMod
+D models/lnd/clm/src/main/areaMod.F90 ------------- Split out into relavent modules:
+ celledge -> RtmMapMod
+ map_setmapsAr -> RmtMapMod
+ cellarea -> clm_initialize
+ map_setgatm -> downscaleMod
+D models/lnd/clm/test/system/tests_posttag_spot1 -- rename to yong
+
+D models/lnd/clm/tools/mksurfdata/mkfmax.F90 ---- rename to mkfmaxMod.F90
+D models/lnd/clm/tools/mksurfdata/mkvocef.F90 --- rename to mkvocefMod.F90
+D models/lnd/clm/tools/mksurfdata/mkglacier.F90 - put in mkglcmecMod.F90
+D models/lnd/clm/tools/mksurfdata/mklanwat.F90 -- rename to mklanwatMod.F90
+D models/lnd/clm/tools/mksurfdata/mkelev.F90 ---- put in mkurbanparMod.F90
+D models/lnd/clm/tools/mksurfdata/mkurban.F90 --- put in mkurbanparMod.F90
+D models/lnd/clm/tools/mksurfdata/mksoitexMod.F90 rename to mksoilMod.F90
+
+List all files added and what they do:
+
+A + models/lnd/clm/test/system/nl_files/clm_irrig -- New irrigation test
+
+>>>>>>>>>>> Some high level restructuring/renames
+A + models/lnd/clm/src/main/inicPerpMod.F90 -------- From inicFileMod
+A + models/lnd/clm/src/riverroute/RtmMapMod.F90 ---- From areaMod.F90
+A models/lnd/clm/test/system/tests_posttag_yong -- rename add more tests
+
+>>>>>>>>>>> Rename mksurfdata subroutines into modules
+A models/lnd/clm/tools/mksurfdata/mkfmaxMod.F90
+A models/lnd/clm/tools/mksurfdata/mksoilMod.F90
+A models/lnd/clm/tools/mksurfdata/mkvocefMod.F90
+A models/lnd/clm/tools/mksurfdata/mklanwatMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>> Add irrigation "AZ" tests at 10x15 with irrigation on
+M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+M models/lnd/clm/test/system/tests_pretag_bluefire
+M models/lnd/clm/test/system/tests_pretag_edinburgh
+M models/lnd/clm/test/system/tests_pretag_jaguar_nompi
+M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+M models/lnd/clm/test/system/tests_posttag_purempi_regression
+M models/lnd/clm/test/system/tests_posttag_intrepid_nompi
+M models/lnd/clm/test/system/tests_posttag_nompi_regression
+M models/lnd/clm/test/system/tests_pretag_bluefire_nompi -- Remove repeated test
+
+M models/lnd/clm/test/system/README.testnames --- Add Z res (10x15 with irrig)
+M models/lnd/clm/test/system/test_driver.sh ----- Changes for lynx and yong
+
+>>>>>>>>>>> Fix bug 1249 for PFT overrides, correct irrigation sample namelist
+>>>>>>>>>>> Change subroutines into modules for mk*.F90 files (allows compiler to check args)
+>>>>>>>>>>> Fix bug 1253 putting mksoitex call after mkglacier
+M models/lnd/clm/tools/mksurfdata/mksurfdata.globalirrig - Correct name of irrigation dataset
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 nullify pctpft_i
+M models/lnd/clm/tools/mksurfdata/mkpftMod.F90 Set nlat_i/nlon_i to 1 if PFT override
+M models/lnd/clm/tools/mksurfdata/mkglcmecMod.F90 -- Add mkglacier subroutine
+M models/lnd/clm/tools/mksurfdata/mkurbanparMod.F90 Add mkurban and mkelev subroutines
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 ----- Change subroutines into modules
+ nullify pctpft_i, put mksoitex call after mkglacier
+M models/lnd/clm/tools/mksurfdata/Srcfiles --------- Change names of files
+M models/lnd/clm/tools/mksurfdata/mkpftMod.F90 ----- Add mkirrig subroutine
+
+
+>>>>>>>>>>> Sync up the tools Makefiles and add darwin intel and pgi build (remove darwin xlf)
+M models/lnd/clm/tools/mksurfdata/Makefile ---- Sync up makefiles, add darwin build
+M models/lnd/clm/tools/interpinic/Makefile ---- Sync up makefiles, add darwin build
+M models/lnd/clm/tools/mkgriddata/Makefile ---- Sync up makefiles, add darwin build
+M models/lnd/clm/tools/mkdatadomain/Makefile -- Sync up makefiles, add darwin build
+
+>>>>>>>>>>> Add -irrig and -rtm_res options, update files to new NetCDF versions,
+>>>>>>>>>>> add in findat/fsurdat files for irrigation (f19,f10, f09)
+M models/lnd/clm/bld/build-namelist ----- Add -irrig, -rtm_res options
+ set do_budgets, and budget_inst in drv_namelist, finidat/fsurdat depend on irrig
+ set create_crop_landunit by irrig
+M models/lnd/clm/bld/clm.cpl7.template -- Set CLM_RTM_RES to half-degree and pass
+ to build-namelist
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml ------ add irrig and rtm_res
+M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml default for irrig and rtm_res
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ---- New NetCDF
+ pft-physiology file (for all configs), finidat and fsurdat files check
+ irrig, f19, f10, and f09 surfdata files for irrigation (and f19 finidat)
+ defaults for create_croplandunit, new NetCDF RTM direction file
+M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml ---- Set do_budgets
+ to .true. and budget_inst to 1.
+
+>>>>>>>>>>> Remove concurrent directives and misc.h/preproc.h #includes
+>>>>>>>>>>> Remove scam_setlatlonidx and use shr_scam_.
+>>>>>>>>>>> Switch ncorn for nc3crop and nwheat for nirrig change pio_close for ncd_close
+M models/lnd/clm/src/biogeochem/CNMRespMod.F90 -------- Remove concurrent directives
+ remove misc.h/preproc.h #includes
+M models/lnd/clm/src/biogeochem/CASAMod.F90 ----------- Change pio_closefile to
+ ncd_pioclosefile
+M models/lnd/clm/src/biogeochem/CNWoodProductsMod.F90 - Remove directives
+M models/lnd/clm/src/biogeochem/DUSTMod.F90 ----------- Remove directives
+M models/lnd/clm/src/biogeochem/STATICEcosysDynMod.F90 ncorn->nc3crop,
+ use shr_scam_getCloseLatLon in place of scam_setlatlonidx,
+ Remove directives, switch pio_close with ncd_pio_closefile
+M models/lnd/clm/src/biogeochem/CNDecompMod.F90 ------- Remove directives and #includes
+M models/lnd/clm/src/biogeochem/CNDVMod.F90 ----------- Switch pio_plosefile with ncd_close
+M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90 ---- ncorn->nc2crop
+M models/lnd/clm/src/biogeochem/DryDepVelocity.F90 ---- ncorn->nc3crop, nwheat->nirrig
+ and remove #includes
+M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90 ncorn->nc3crop, nwheat->nirrig
+ and remove #includes
+
+>>>>>>>>>>> Remove concurrent directives and misc.h/preproc.h #includes
+>>>>>>>>>>> Remove scam_setlatlonidx and use shr_scam_.
+>>>>>>>>>>> Switch ncorn for nc3crop and nwheat for nirrig change pio_close for ncd_close
+M models/lnd/clm/src/main/clm_varcon.F90 ------- Add degpsec, isecspday,
+ and remove #includes
+M models/lnd/clm/src/main/clm_varpar.F90 ------- Remove #includes, add ivis/inir
+ indices, and make rtmlat/rtmlon variables not parameters
+M models/lnd/clm/src/main/CNiniTimeVar.F90 ----- Remove directives, and #includes,
+ add qflx_irrig
+M models/lnd/clm/src/main/abortutils.F90 ------- Remove directives, and #includes
+ and NEC_SX, and UNICOSMP CPP defines
+M models/lnd/clm/src/main/accumulMod.F90 ------- Remove directives
+M models/lnd/clm/src/main/decompInitMod.F90 ---- Remove UNICOSMP CPP defines
+M models/lnd/clm/src/main/clm_initializeMod.F90 Move cellarea from areaMod to here
+ work with downscale a bit, add stuff from clm_comp init to here
+M models/lnd/clm/src/main/clmtypeInitMod.F90 --- Add irrig_rate and n_irrig_steps_left
+M models/lnd/clm/src/main/iniTimeConst.F90 ----- Switch pio_close with ncd_close,
+ add single-column read for PCT_CLAY, switch 86400 for secspday
+M models/lnd/clm/src/main/histFileMod.F90 ------ Remove UNICOSMP, switch pio_close
+ with ncd_close
+M models/lnd/clm/src/main/restFileMod.F90 ------ Switch pio_close with ncd_close
+There's also a new driver namelist setting that will update the orbit each year
+(setting orb_mode and orb_iyear_align).
+
+Bugs fixed (include bugzilla ID):
+ 1225 (abort if both trigrid and finemesh on)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 935 (RTM warning NOT an error)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on mirage...)
+ 1165 (Restart trouble for scaled harvest test on mirage)
+ 1224 (Fix -aerdepgrid/ndepgrid options in PTCLM.py)
+ 1238 (PST test fails)
+ 1239 (ESMF build fails)
+ 1240 (lynx_pgi build fails)
+ 1249 (problem in mksurfdata for PFT override mode)
+ 1258 (runinit_ibm.csh needs to be updated)
+ 1264 (Incorrect doc of qflx_evap_tot in the code)
+ 1265 (Fix ndep/aerdepregrid.ncl)
+ 1266 (Threading problem with CNDV)
+ 1318 (interpinic has trouble with new restart files)
+ 1372 (pio problem writing out CLM history files for CRU
+ 1381 (Can't change monthly average files to NOT be one per month)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart)
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system: Add in darwin_intel build
+
+Describe any changes made to the namelist:
+
+List any changes to the defaults for the boundary datasets: Add in T341 datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm,
+ csm_share, pio, mct, cprnc
+
+ scripts to mpiserial07_scripts4_101117
+ drv to drv3_1_45
+ datm to datm8_101105
+ csm_share to share3_101118
+ pio to pio1_2_6
+ mct to MCT2_7_0_100228-mpiserial101109_tag02
+ cprnc to cprnc_101119
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/tests_pretag_jaguar - remove non-existant tests
+
+>>>>>>>>>> Remove clmi frequency setting, add darwin build/run
+ M models/lnd/clm/test/system/input_tests_master
+ M models/lnd/clm/test/system/TBR.sh
+ M models/lnd/clm/test/system/test_driver.sh - Add darwin builds
+ M models/lnd/clm/test/system/mknamelist
+
+>>>>>>>>>> Change orb_iyear for orb_iyear_ad
+ M models/lnd/clm/test/system/nl_files/nl_urb
+ M models/lnd/clm/test/system/nl_files/nl_noicertm_sclharv
+ M models/lnd/clm/test/system/nl_files/clm_ndepdyn -- remove ndepsrc stream setting
+ Can now replace usage of this file with clm_std
+ M models/lnd/clm/test/system/nl_files/nl_cn_conly
+ M models/lnd/clm/test/system/nl_files/nl_urb_br
+
+>>>>>>>>>> Get build working with darwin_intel
+ M models/lnd/clm/bld/configure ------ get it working with darwin_intel
+ M models/lnd/clm/bld/clm.cpl7.template - Use $GMAKE, set to gmake if not set
+
+>>>>>>>>>> Remove non-existant resolution: 2.5x3.33
+ M models/lnd/clm/bld/namelist_files/checklatsfiles.ncl
+ M models/lnd/clm/bld/namelist_files/checkdatmfiles.ncl
+
+>>>>>>>>>> Add orb_mode, set orbit based on it, also add orb_iyear_align
+>>>>>>>>>> Add run_barriers, pio_inparm namelist, add T341 resolution (512x1024)
+ M models/lnd/clm/bld/build-namelist - Set orbit based on orb_mode, set pio namelist
+ for stand-alone testing,
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml ---- Remove pio namelist
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml
+ M models/lnd/clm/bld/namelist_files/datm-build-namelist
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -- Add T341: griddata,
+ fracdata, surfdata, topodata (fracdata for USGS and tx0.1 masks)`
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml
+
+>>>>>>>>>> Switch orb_iyear for orb_iyear_ad
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850_control.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_transient.xml
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml
+
+>>>>>>>>>> Switch ncd_x for pio_x/nf_x
+>>>>>>>>>> Fix a couple memory leaks that Jim Edwards found
+>>>>>>>>>> Make ncd_pio private, add documentation add attributes for restart history
+>>>>>>>>>> files, add 2D character read (needed for NetCDF pft-physiology file read on
+>>>>>>>>>> irrigation branch)
+ M models/lnd/clm/src/biogeochem/CASAMod.F90 ------------ Replcae pio_x
+ M models/lnd/clm/src/biogeochem/STATICEcosysDynMod.F90 - use masterproc at top of module
+ M models/lnd/clm/src/biogeochem/CNDVMod.F90 ------------ Replace pio_x
+ M models/lnd/clm/src/biogeochem/CNrestMod.F90 ---------- Replace pio_x
+ M models/lnd/clm/src/main/inicFileMod.F90 -------------- Add use MPI_LOGICAL
+ M models/lnd/clm/src/main/accumulMod.F90 --------------- Replace pio_x
+ M models/lnd/clm/src/main/clm_initializeMod.F90 -------- Remove samegrids use downscale,
+ don't pass namelist to ncd_pio initialization
+ M models/lnd/clm/src/main/subgridRestMod.F90 ----------- Add use endrun, switch ncd_x for nf_x
+ M models/lnd/clm/src/main/ndepStreamMod.F90 ------------ PIO initialization uses
+ driver settings, pass get_calendar to initialization
+ M models/lnd/clm/src/main/histFileMod.F90 -------------- Add attributes to history restart files
+ M models/lnd/clm/src/main/restFileMod.F90 -------------- Replace pio_x
+ M models/lnd/clm/src/main/clm_time_manager.F90 --------- Replace nf_x with ncd_x add
+ get_calendar
+ M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90 ----- Abort if trigrid and downscale
+ Update orbit params in run-phase
+ M models/lnd/clm/src/main/clm_varctl.F90 --------------- Remove samegrids
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90 --- Abort if trigrid and downscale
+ Update orbit params in run-phase
+ M models/lnd/clm/src/main/ncdio_pio.F90 ---------------- Update documentation, make private
+ add ncd_pio_closefile wrapper, make some pio interfaces public from here,
+ add ncd_io_char_var2_nf for NetCDF pft-physiology file, remove pio namelist
+ remove a second allocation that Jim Edwards found
+ M models/lnd/clm/src/main/surfrdMod.F90 ---------------- Fix memory leak from Jim Edwards
+ M models/lnd/clm/src/riverroute/RtmMod.F90 ------------- Switch nf_x with ncd_x
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 -- Add use for spval and iulog,
+ change nf_x for ncd_x
+
+Summary of testing:
+
+All TBL tests fail... (although you can use clm4_0_16 with updated externals and show b4b)
+
+ bluefire: All PASS except...
+029 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+030 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+031 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+033 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ..............FAIL! rc= 10
+055 smLI1 TSM.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+056 erLI1 TER.sh _sc_dh clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+057 brLI1 TBR.sh _sc_dh clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+ bluefire interactive testing: All PASS
+ bluefire/CESM testing:
+FAIL SMS_RLA.f45_f45.I.bluefire.compare_hist.clm4_0_16
+FAIL SMS_RLA.f45_f45.I.bluefire.compare.clm4_0_16
+FAIL SMS_RLB.f45_f45.I.bluefire.compare_hist.clm4_0_16
+FAIL SMS_RLB.f45_f45.I.bluefire.compare.clm4_0_16
+FAIL SMS_ROA.f45_f45.I.bluefire.compare.clm4_0_16
+FAIL ERS_D.f45_g37.I.bluefire.compare_hist.clm4_0_16
+FAIL ERS_D.f45_g37.I.bluefire.compare.clm4_0_16
+FAIL PST.f45_g37.I1850.bluefire <<<<< Didn't create scripts problem, bug 1238
+FAIL PET_PT.f45_g37.I1850.bluefire.compare.clm4_0_16
+FAIL ERS_E.f19_g16.I1850.bluefire <<<< Scripts build issue, bug 1239
+BFAIL ERS_E.f19_g16.I1850.bluefire.generate.clm4_0_17
+FAIL ERS_E.f19_g16.I1850.bluefire.compare_hist.clm4_0_16
+FAIL ERB.f09_g16.I_1948-2004.bluefire.compare.clm4_0_16
+FAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm4_0_16
+FAIL PST.f10_f10.I8520CN.bluefire <<<<< Didn't create scripts problem, bug 1238
+FAIL PET_PT.f10_f10.I8520CN.bluefire.compare.clm4_0_16
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm4_0_16
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm4_0_16
+ bluefire/PTCLM testing: All PASS
+ jaguar: All PASS except...
+007 brB91 TBR.sh _scsnf_dh clm_std^nl_urb 20021230:1800 4x5 gx3v7 72+72 arb_ic ..................FAIL! rc= 13
+022 erH92 TER.sh 17p_cnsc_dm clm_ndepdyn^nl_cn_conly 20020101:1800 4x5 gx3v7@1850-2000 10+38 cold FAIL! rc= 13
+023 brH92 TBR.sh 17p_cnsc_dm clm_ndepdyn^nl_cn_conly 20020101:1800 4x5 gx3v7@1850-2000 72+72 cold FAIL! rc= 11
+038 smLI2 TSM.sh _sc_dm clm_std 20020101:1800 94x192 T62 -10 arb_ic .............................FAIL! rc= 10
+039 erLI2 TER.sh _sc_dm clm_std 20020101:1800 94x192 T62 -5+-5 arb_ic ...........................FAIL! rc= 5
+040 brLI2 TBR.sh _sc_dm clm_std 20020101:1800 94x192 T62 -10+-10 arb_ic .........................FAIL! rc= 5
+042 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 20020115:1800 10x15 USGS 96 arb_ic ....................FAIL! rc= 10
+043 erL58 TER.sh _sc_dh clm_std^nl_crcrop 20020115:1800 10x15 USGS 72+72 arb_ic .................FAIL! rc= 5
+044 brL58 TBR.sh _sc_dh clm_std^nl_crcrop 20020115:1800 10x15 USGS 72+72 arb_ic .................FAIL! rc= 5
+ jaguar interactive testing: All PASS except...
+007 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+008 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+009 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+027 sm974 TSMscript_tools.sh mksurfdata mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds ....FAIL! rc= 6
+ edinburgh/lf95 interactive testing: All PASS
+ edinburgh/lf95 testing: All PASS except...
+005 smD91 TSM.sh _persc_dh clm_per^nl_urb 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 10
+006 erD91 TER.sh _persc_dh clm_per^nl_urb 20021231:1200 4x5 gx3v7 72+72 cold ....................FAIL! rc= 5
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+012 smG56 TSM.sh 17p_sc_m clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic ..............FAIL! rc= 10
+013 smE91 TSM.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:1800 4x5 gx3v7 96 arb_ic .................FAIL! rc= 10
+014 erE91 TER.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:1800 4x5 gx3v7 10+38 arb_ic ..............FAIL! rc= 5
+015 brE91 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021230:1800 4x5 gx3v7 72+72 arb_ic ...........FAIL! rc= 5
+018 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:1800 10x15 USGS@2000 10+38 cold ............FAIL! rc= 13
+019 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:1800 10x15 USGS@2000 72+72 cold .........FAIL! rc= 11
+025 smL51 TSM.sh _sc_dh clm_std^nl_urb 20020115:1800 10x15 USGS 96 arb_ic .......................FAIL! rc= 10
+026 erL51 TER.sh _sc_dh clm_std^nl_urb 20020115:1800 10x15 USGS 10+38 arb_ic ....................FAIL! rc= 5
+027 brL51 TBR.sh _sc_dh clm_std^nl_urb_br 20020115:1800 10x15 USGS 72+72 arb_ic .................FAIL! rc= 5
+029 smH41 TSM.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 96 cold ..FAIL! rc= 10
+030 erH41 TER.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 10+38 cold FAIL! rc= 5
+031 brH41 TBR.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 72+72 cold FAIL! rc= 5
+033 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 20020115:1800 10x15 USGS 96 arb_ic ....................FAIL! rc= 10
+ edinburgh/PTCLM testing: All PASS up to...
+myPTCLMtests_US-Ha1_I_1850.PTCLM FAIL 0
+ mirage,storm/ifort interactive testing: All PASS except...
+017 erR53 TER.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:1800 10x15 USGS@1850 10+38 cold .........FAIL! rc= 13
+018 brR53 TBR.sh 17p_cnc13sc_do clm_std^nl_urb_br 20020115:1800 10x15 USGS@1850 72+72 cold ......FAIL! rc= 11
+020 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 144 arb_ic .............FAIL! rc= 10
+021 erG53 TER.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 10+38 arb_ic ...........FAIL! rc= 5
+022 brG53 TBR.sh 17p_sc_do clm_pftdyn 10001230:1800 10x15 USGS@1000-1002 72+72 arb_ic ...........FAIL! rc= 5
+025 erH43 TER.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 10+38 cold FAIL! rc= 13
+026 brH43 TBR.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:1800 10x15 USGS@2000 72+72 cold FAIL! rc= 11
+ lynx/pgi testing: All FAIL scripts build issue <<<< bug 1240
+ yong/darwin_intel testing: All PASS up to ...
+005 smD94 TSM.sh _persc_ds clm_per^nl_urb 20021231:1200 4x5 gx3v7 144 cold ......................FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_16
+
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All with datm
+ - what platforms/compilers: All
+ - nature of change: roundoff
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ PERGRO test on bluefire
+
+===============================================================
+===============================================================
+Tag name: clm4_0_16
+Originator(s): erik/mvertens (Kluzek Erik 1326 CGD) (Vertenstein Mariana 1349 CGD)
+Date: Wed Oct 27 13:33:21 MDT 2010
+One-line Summary: Fix downscaling roundoff difference for same-grids by copying scale factor when needed
+
+Purpose of changes:
+
+Fix bug 1230, that caused problems with runoff to the ocean when running fully coupled. The global integrals of runoff fields
+was the same in the coupler -- but the values where roundoff different. This caused problems both in testing for bit-for-bit with
+the previous version and with restarts. The problem was that in the downscaling changes made in clm4_0_15 the areal scaling factor
+asca needed to be copied from adomain into ldomain is no downscaling is taking place.
+
+Bugs fixed (include bugzilla ID):
+===============================================================
+Tag name: clm4_0_14
+Originator(s): erik (Erik Kluzek)
+Date: Tue Oct 19 13:12:36 MDT 2010
+One-line Summary: Fix finidat file for T31 sim_year=2000 cases
+
+Purpose of changes:
+
+Remove the 1850 T31 finidat file for sim_year=2000 and use the previous sim_year=2000
+files (created using interpinic). Update scripts and datm.
+
+Bugs fixed (include bugzilla ID): Correct finidat file for T31 sim_year=2000
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on gust...)
+ 1165 (Restart trouble for scaled harvest test on gust)
+ 1224 (Fix -aerdepgrid/ndepgrid options in PTCLM.py)
+ 1249 (problem in mksurfdata for PFT override mode)
+ 1258 (runinit_ibm.csh needs to be updated)
+ 1264 (Incorrect doc of qflx_evap_tot in the code)
+ 1265 (Fix ndep/aerdepregrid.ncl)
+ 1266 (Threading problem with CNDV)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: Go back to T31,sim_year=2000
+finidat file
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts and datm
+
+ scripts to mpiserial05_scripts4_101018
+ datm to datm8_101008
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -- Return to old
+ finidat file for T31 sim_year=2000
+M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml - change
+ fearedep to datm_file_aero
+
+Summary of testing:
+
+ bluefire/CESM testing:
+PASS SMS_D.T31_g37.I1850CN.bluefire
+PASS SMS_D.T31_g37.ICN.bluefire
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_13
+
+Changes answers relative to baseline: T31 2000 cases
+
+===============================================================
+===============================================================
+Tag name: clm4_0_13
+Originator(s): erik (Erik Kluzek)
+Date: Sat Oct 16 09:14:08 MDT 2010
+One-line Summary: Bring in PTCLM branch, add in T31 finidat file and turn off ice_runoff for T31
+
+Purpose of changes:
+
+Bring in PTCLM work. Update externals for scripts, datm, drv. Get mksurfdata to have options to override soil/PFT with user input values. Fix some issues with getregional_datasets.pl. Remove old stand-alone CLM Makefile (always use CESM Macro's files and Makefile). More removal of ndepsrc in build-namelist. Turn off ice_runoff for T31. Add in T31 finidat file.
+
+Bugs fixed (include bugzilla ID):
+ 1189 (Create ability to change soil color/texture in mksurfdata)
+ 1188 (Add ability to handle control transient land-cover change)
+ 1206 (Problem looping over a single year of CPLHIST forcing)
+ 1211 (Small memory leak in CLM4 initialization)
+ 1223 (ESMF problem)
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on gust...)
+ 1165 (Restart trouble for scaled harvest test on gust)
+ 1224 (Fix -aerdepgrid/ndepgrid options in PTCLM.py)
+ 1249 (problem in mksurfdata for PFT override mode)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1017 (SCM mode can NOT restart
+ 452 (Problem with support of soil-colors != 8 or 20)
+
+Type of tag: standard
+
+Describe any changes made to build system:
+
+ Remove custom options to stand-alone build/test, require using cesm make files
+
+Describe any changes made to the namelist: None, although many new options to mksurfdata namelist
+
+List any changes to the defaults for the boundary datasets: New T31 finidat files
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm, csm_share
+
+ drv to drvseq3_1_37
+ datm to datm8_100921
+ csm_share to share3_101001
+ scripts to PTCLM03_scripts4_101005
+
+List all files eliminated:
+
+ R models/lnd/clm/test/system/tests_posttag_breeze >>> rename to mirage
+>>>>>>>>>>> Rename to module
+ R models/lnd/clm/tools/mksurfdata/mkglcmec.F90
+ R models/lnd/clm/tools/mksurfdata/mksoicol.F90
+ R models/lnd/clm/tools/mksurfdata/mksoitex.F90
+
+ R models/lnd/clm/bld/config_files/Makefile.in --- Remove always use CESM make
+
+List all files added and what they do:
+
+>>>>>>>>>>> Renames
+ A models/lnd/clm/test/system/tests_posttag_mirage
+ A models/lnd/clm/tools/mksurfdata/mkglcmecMod.F90
+ A models/lnd/clm/tools/mksurfdata/mksoitexMod.F90
+ A models/lnd/clm/tools/mksurfdata/mksoicolMod.F90
+
+>>>>>>>>>>> Namelist settings for standard urban single-point
+ A models/lnd/clm/bld/namelist_files/use_cases/stdurbpt.xml
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>> Remove PTS_MODE restart/branch tests
+ M models/lnd/clm/test/system/tests_posttag_lynx_nompi
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_posttag_spot1 --------- remove hybrid test
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+ M models/lnd/clm/test/system/tests_pretag_jaguar_nompi
+ M models/lnd/clm/test/system/tests_posttag_intrepid_nompi
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+
+>>>>>>>>>>> Change breeze+ for mirage changes for new build that requires CESM build files
+ M models/lnd/clm/test/system/TCB.sh -------------------- Set nc_path
+ M models/lnd/clm/test/system/config_files/_nrmexsc_ds -- Use -sitespf_pt
+ M models/lnd/clm/test/system/config_files/_nrvansc_ds -- Use -sitespf_pt
+ M models/lnd/clm/test/system/test_driver.sh ------------ Swap out mirage/storm for breeze+, add GEN machine options, remove CLM_CESMBLD
+ M models/lnd/clm/test/system/mknamelist ---------------- Use config_file variable
+ M models/lnd/clm/test/system/TSMscript_tools.sh -------- Add exedir
+ M models/lnd/clm/test/system/CLM_runcmnd.sh ------------ Add more options for yong, change breeze+ to mirage/storm
+ M models/lnd/clm/test/system/nl_files/mksrfdt_1x1_brazil_1850 ------ Add exedir
+ M models/lnd/clm/test/system/nl_files/mksrfdt_1x1_brazil_1850-2000 - Add exedir
+
+>>>>>>>>>>> Add new options to override dataset setttings with your own values for: soil color/texture, and PFT
+ M models/lnd/clm/tools/mksurfdata/mkvarctl.F90 ----- Make private
+ M models/lnd/clm/tools/mksurfdata/mkglacier.F90 ---- Add option to zero out glacier
+ M models/lnd/clm/tools/mksurfdata/mkharvestMod.F90 - Add mkharvest_parse_oride to override harvesting
+ M models/lnd/clm/tools/mksurfdata/mkfileMod.F90 ---- Move nglcec here
+ M models/lnd/clm/tools/mksurfdata/mklanwat.F90 ----- Add option to zero out lake
+ M models/lnd/clm/tools/mksurfdata/mkurban.F90 ------ Add option to zero out urban
+ M models/lnd/clm/tools/mksurfdata/mkvarsur.F90 ----- Make private
+ M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 ----- Add new namelist options: soil_color, soil_sand, soil_clay, pft_idx, pft_frc
+ M models/lnd/clm/tools/mksurfdata/mkpftMod.F90 ----- Add init and override methods: mkpftInit, mkpft, and mkpft_parse_oride
+ M models/lnd/clm/tools/mksurfdata/Filepath --------- Add esmf_wrf_timemgr to directory list
+ M models/lnd/clm/tools/mksurfdata/Srcfiles --------- Change names, add shr_cal_mod, shr_string_mod, and ESMF files
+
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl ---- Add new options to override your own values, and error check the input:
++ -dynpft "filename" Dynamic PFT/harvesting file to use
++ (rather than create it on the fly)
++ (must be consistent with first year)
++ -exedir "directory" Directory where mksurfdata program is
++ (by default assume it's in the current directory)
++OPTIONS to override the mapping of the input gridded data with hardcoded input
++
++ -pft_frc "list of fractions" Comma delimited list of percentages for veg types
++ -pft_idx "list of veg index" Comma delimited veg index for each fraction
++ -soil_cly "% of clay" % of soil that is clay
++ -soil_snd "% of sand" % of soil that is sand
+
+>>>>>>>>>>> Start fixing some issues with getregional scripts
+M models/lnd/clm/tools/ncl_scripts/getregional_datasets.pl
+M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl
+
+>>>>>>>>>>> Add USER_FCTYP
+ M models/lnd/clm/tools/mksurfdata/Makefile
+ M models/lnd/clm/tools/interpinic/Makefile
+ M models/lnd/clm/tools/mkgriddata/Makefile
+ M models/lnd/clm/tools/mkdatadomain/Makefile
+
+>>>>>>>>>>> Remove stand-alone user options and require cesm_bld, add ice_runoff run_stopdate, and new finidat files for T31, more ndepmapalgo defaults
+>>>>>>>>>>> Change stop_n values for urban single-point so will run to completion
+ M models/lnd/clm/bld/configure --------------- Remove options: test, cc, cflags, fc, fflags, fopt, gmake, ldflags, linker, mpi/nc_inc/_lib
+ add nc_path and mpi_path options, require cesm_bld, and remove logic for doing
+ clm-stand-alone build
+ M models/lnd/clm/bld/queryDefaultNamelist.pl - Check for valid values, add list options,
+ M models/lnd/clm/bld/queryDefaultXML.pm ------ Change a comment
+ M models/lnd/clm/bld/build-namelist ---------- Add setting of ice_runoff
+ M models/lnd/clm/bld/config_files/config_sys_defaults.xml --- Set more default machine names
+ M models/lnd/clm/bld/config_files/config_definition.xml ----- Remove compiler options above and add nc_path/mpi_path
+
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --- Add run_stopdate and work on comments
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Add ice_runoff defaults, new finidat files for T31, add more ndepmapalgo defaults
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml - Change stop_n for urban single-point, add stop_date
+
+ M models/lnd/clm/doc/UsersGuide/preface.xml - Change comment
+
+>>>>>>>>>>> Fix two code bugs (1211 and 1223)
+ M models/lnd/clm/src/main/iniTimeConst.F90 ------------ make sure to deallocate memory
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90 -- add use statement needed for endrun
+
+Summary of testing:
+
+ bluefire testing: All PASS except.. (up to 054 smI59)
+029 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+030 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+031 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+032 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+033 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+ bluefire interactive testing: All PASS except..
+061 sm9S4 TSMscript_tools.sh ncl_scripts getregional_datasets.pl getregional ....................FAIL! rc= 6
+ bluefire/CESM testing: All PASS except
+BFAIL SMS_RLA.f45_f45.I.bluefire.compare.clm4_0_11
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_11
+ jaguar interactive testing: All PASS except...
+005 smAK4 TSM.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -10 cold ............FAIL! rc= 10
+007 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+008 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+009 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+ edinburgh/lf95 interactive testing: All PASS
+ mirage/storm.intel interactive testing: ALL PASS up to...
+017 erR53 TER.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@1850 10+38 cold ....FAIL! rc= 13
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_12
+
+Changes answers relative to baseline: No bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_12
+Originator(s): erik (Erik Kluzek)
+Date: Fri Sep 10 13:07:03 MDT 2010
+One-line Summary: Add U10 to history, cesm1_0_rel06 updates, PTCLM02 updates (except
+ mksurfdata), remove ndepdat/dyn/faerdep
+
+Purpose of changes:
+
+Update to latest cesm1 release branch. Change SPMD from spmd to use_mpiserial in
+configure. Remove old aerdep and ndepdat/dyn files from code and scripts. Change ccsm in
+scripts to cesm. Add in new U10 field to history files, change old name to U10_DUST. Some
+updates from PTCLM branch for XML database. Also perturb initial conditions read in from
+initial file by pertlim. With with PERGRO CPP #ifdef a bit. Start adding in testing on
+lynx.
+
+Bugs fixed (include bugzilla ID):
+ 1199 (Add trusted machine history file for PERGRO analysis)
+ 1196 (Add urban option to configure, delete GRANDVIEW ifdefs)
+ 1191 (UG documentation for single-point needs to change que to shared-que)
+ 1167 (Add note about reducing PE's for single-point mode)
+ 1115 (Make config_definition names the same as configure options)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on gust...)
+ 1165 (Restart trouble for scaled harvest test on gust)
+ 1206 (Problem looping over a single year of CPLHIST forcing)
+ http://bugs.cgd.ucar.edu/
+
+Known Limitations:
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1147 (mkgriddata can't straddle over Greenwich)
+
+Type of tag: std-test
+
+Describe any changes made to build system:
+ Names of configure modes changed: seq_ccsm to clm_stndln, and ext_ccsm_seq to ext_cesm
+ Add sitespf_pt option which will set either MEXICOCITY or VANCOUVER cpp ifdefs
+ Names of some configure options changed to make consistent with config_definition file.
+
+Describe any changes made to the namelist: Remove use_ndepstream/fndepdat/fndepdyn/faerdep
+
+List any changes to the defaults for the boundary datasets:
+
+ New 10x15 rcp6 transient 1850-2100 pftdyn dataset
+ Add navy oro file to clm_tools XML file
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, U10 code from Keith Oleson
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts and datm
+ scripts to scripts4_100901a
+ datm to datm8_100830
+
+List all files eliminated:
+
+ D models/lnd/clm/src/main/aerdepMod.F90
+ D models/lnd/clm/src/main/ndepFileMod.F90
+ D models/lnd/clm/tools/ncl_scripts/convertUrbanOffline2Seq.ncl
+ D models/lnd/clm/tools/ncl_scripts/getndepdatFrom20thCentury.ncl
+ D models/lnd/clm/tools/mkgriddata/mkgriddata.ccsm_dom -- Rename to .cesm_dom
+ D models/lnd/clm/doc/UsersGuide/fixvan_datm.buildnml.diff
+
+List all files added and what they do:
+
+>>>>>>>>>> Transient test files for rcp2.6 and rcp4.5, start adding lynx testing
+ A models/lnd/clm/test/system/nl_files/clm_transient_rcp2.6
+ A models/lnd/clm/test/system/nl_files/clm_transient_rcp4.5
+ A models/lnd/clm/test/system/tests_posttag_lynx
+ A models/lnd/clm/test/system/tests_posttag_lynx_nompi
+
+>>>>>>>>>> Sample perturbation growth data for jaguar, intel and lahey
+ A models/lnd/clm/tools/ncl_scripts/RMSjaguar.dat
+ A models/lnd/clm/tools/ncl_scripts/RMSintel.dat
+ A models/lnd/clm/tools/ncl_scripts/RMSlahey.dat
+
+ A models/lnd/clm/tools/mkgriddata/mkgriddata.cesm_dom - rename from .ccsm_dom
+
+>>>>>>>>>> Plot of sample bad perturbation error growth
+ A models/lnd/clm/doc/UsersGuide/badpergro.jpg
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>> Change ccsm_seq=>clm_stndln, spmd=>nouse_mpiserial
+ M models/lnd/clm/test/system/config_files/_nrsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dh
+ M models/lnd/clm/test/system/config_files/17p_nrsc_ds
+ M models/lnd/clm/test/system/config_files/17p_scnv_do
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_m
+ M models/lnd/clm/test/system/config_files/_scnv_dh
+ M models/lnd/clm/test/system/config_files/_nrsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_o
+ M models/lnd/clm/test/system/config_files/4p_nrcasasc_ds
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dm
+ M models/lnd/clm/test/system/config_files/_nrsc_do
+ M models/lnd/clm/test/system/config_files/_persc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_do
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_s
+ M models/lnd/clm/test/system/config_files/_nrsc_ds
+ M models/lnd/clm/test/system/config_files/_scnv_do
+ M models/lnd/clm/test/system/config_files/_persc_dm
+ M models/lnd/clm/test/system/config_files/_persc_do
+ M models/lnd/clm/test/system/config_files/4p_vodsrsc_dh
+ M models/lnd/clm/test/system/config_files/_persc_ds
+ M models/lnd/clm/test/system/config_files/_nrmexsc_ds
+ M models/lnd/clm/test/system/config_files/_mec10sc_dh
+ M models/lnd/clm/test/system/config_files/4p_vodsrsc_dm
+ M models/lnd/clm/test/system/config_files/_nrcnsc_do
+ M models/lnd/clm/test/system/config_files/17p_sc_dh
+ M models/lnd/clm/test/system/config_files/4p_vodsrsc_do
+ M models/lnd/clm/test/system/config_files/_mec10sc_dm
+ M models/lnd/clm/test/system/config_files/_nrcnsc_ds
+ M models/lnd/clm/test/system/config_files/4p_casasc_dh
+ M models/lnd/clm/test/system/config_files/4p_vodsrsc_ds
+ M models/lnd/clm/test/system/config_files/17p_sc_dm
+ M models/lnd/clm/test/system/config_files/_mec10sc_do
+ M models/lnd/clm/test/system/config_files/17p_sc_do
+ M models/lnd/clm/test/system/config_files/_sc_dh
+ M models/lnd/clm/test/system/config_files/_mec10sc_ds
+ M models/lnd/clm/test/system/config_files/4p_casasc_dm
+ M models/lnd/clm/test/system/config_files/4p_casasc_do
+ M models/lnd/clm/test/system/config_files/17p_sc_ds
+ M models/lnd/clm/test/system/config_files/_sc_dm
+ M models/lnd/clm/test/system/config_files/4p_casasc_ds
+ M models/lnd/clm/test/system/config_files/_nrsc_s
+ M models/lnd/clm/test/system/config_files/_sc_do
+ M models/lnd/clm/test/system/config_files/17p_cnsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnsc_m
+ M models/lnd/clm/test/system/config_files/_sc_ds
+ M models/lnd/clm/test/system/config_files/17p_cnsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnsc_do
+ M models/lnd/clm/test/system/config_files/_nrvansc_ds
+ M models/lnd/clm/test/system/config_files/17p_sc_h
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_do
+ M models/lnd/clm/test/system/config_files/4p_casasc_h
+ M models/lnd/clm/test/system/config_files/17p_sc_m
+ M models/lnd/clm/test/system/config_files/17p_sc_o
+ M models/lnd/clm/test/system/config_files/_sc_h
+ M models/lnd/clm/test/system/config_files/4p_casasc_m
+ M models/lnd/clm/test/system/config_files/4p_casasc_o
+ M models/lnd/clm/test/system/config_files/_sc_m
+ M models/lnd/clm/test/system/config_files/17p_vodsrsc_h
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_dh
+ M models/lnd/clm/test/system/config_files/_sc_o
+ M models/lnd/clm/test/system/config_files/17p_vodsrsc_m
+ M models/lnd/clm/test/system/config_files/_sc_s
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_dm
+ M models/lnd/clm/test/system/config_files/17p_vodsrsc_o
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_do
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_do
+ M models/lnd/clm/test/system/config_files/17p_vodsrsc_dh
+ M models/lnd/clm/test/system/config_files/_scsnf_dh
+ M models/lnd/clm/test/system/config_files/4p_vodsnrsc_ds
+ M models/lnd/clm/test/system/config_files/17p_vodsrsc_dm
+ M models/lnd/clm/test/system/config_files/_scsnf_dm
+ M models/lnd/clm/test/system/config_files/17p_vodsrsc_do
+ M models/lnd/clm/test/system/config_files/4p_vodsrsc_h
+ M models/lnd/clm/test/system/config_files/_scsnf_do
+ M models/lnd/clm/test/system/config_files/17p_vodsrsc_ds
+ M models/lnd/clm/test/system/config_files/_mec10sc_h
+ M models/lnd/clm/test/system/config_files/4p_vodsrsc_o
+ M models/lnd/clm/test/system/config_files/_mec10sc_m
+ M models/lnd/clm/test/system/config_files/_mec10sc_o
+ M models/lnd/clm/test/system/config_files/17p_scnv_dh
+ M models/lnd/clm/test/system/config_files/17p_cndvsc_h
+ M models/lnd/clm/test/system/config_files/README --- seq-ccsm=>standalone clm
+
+>>>>>>>>>> Change comments of CCSM to CESM, start adding in test support of lynx, convert
+>>>>>>>>>> SPMD to NOUSE_MPISERIAL, change mode name of seq_ccsm to clm_stndln, add tests
+>>>>>>>>>> for more rcp's.
+ M models/lnd/clm/test/system/TCB.sh ------------- CCSM_MACH=>CESM_MACH, ccsm_bld=>cesm_bld
+ M models/lnd/clm/test/system/README.testnames --- Change ccsm=>cesm, seq_ccsm=>clm_stndln
+ M models/lnd/clm/test/system/test_driver.sh ----- Change CLM_CCSMBLD=>CLM_CESMBLD,
+ Start adding lynx.
+ M models/lnd/clm/test/system/input_tests_master - Add HX and HY tests, ccsm=>cesm
+ M models/lnd/clm/test/system/README ------------- CLM_CCSMBLD=>CLM_CESMBLD
+ M models/lnd/clm/test/system/CLM_runcmnd.sh ----- NOSPMD=>NOUSE_MPISERIAL
+
+>>>>>>>>>> Change comments from ccsm to cesm, change namelist to get faerdep file from,
+>>>>>>>>>> work on pergro plot so can plot more graphs.
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.pl --- ccsm=>cesm,
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl -- ccsm=>cesm,
+ get aerdep file from clmexp clm_tool namelist
+ M models/lnd/clm/tools/ncl_scripts/RMSintrepid.dat ---- New data
+ M models/lnd/clm/tools/ncl_scripts/RMSbluefire.dat ---- New data
+ M models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl --- Get from clmexp namelist
+ M models/lnd/clm/tools/ncl_scripts/pergroPlot.ncl ----- Add ability to plot up to
+ five files, make sure lines are different, add success line to end.
+ M models/lnd/clm/tools/ncl_scripts/ndeplintInterp.ncl - Do some operations with
+ out meta-data to save time and remove warnings
+ M models/lnd/clm/tools/ncl_scripts/README --------- Change ccsm=>cesm and improve
+ M models/lnd/clm/tools/interpinic/runinit_ibm.csh - Change ccsm=>cesm
+ M models/lnd/clm/tools/mkgriddata/mkvarctl.F90 ---- Correct documentation, ccsm=>cesm
+ M models/lnd/clm/tools/mkgriddata/mkgriddata.F90 -- ccsm=>cesm
+ M models/lnd/clm/tools/mkgriddata/README ---------- ccsm=>cesm
+ M models/lnd/clm/tools/mkdatadomain/addglobal.F90 - ccsm=>cesm
+
+>>>>>>>>>> Changes comments of ccsm to cesm, add sitespf_pt config option, remove
+>>>>>>>>>> -ndepsrc, add ndepmapalgo, switch prog_seasalt for progsslt, spmd for
+>>>>>>>>>> nouse_mpiserial, change names of modes
+ M models/lnd/clm/bld/configure ---------------- ccsm=>cesm, +sitespf_pt,
+ prog_seasalt=>progsslt, spmd=>nouse_mpiserial, modes changed to
+ ext_cesm, and clm_stndln, remove setting of SPMD cppdef
+ M models/lnd/clm/bld/queryDefaultNamelist.pl -- ccsm=>cesm
+ M models/lnd/clm/bld/queryDefaultXML.pm ------- Remove ability to use cam config
+ file, spmd=>nouse_mpiserial
+ M models/lnd/clm/bld/build-namelist ----------- ccsm=>cesm, remove -ndepsrc,
+ add rcp to some settings, ccsm_seq=>clm_stndln, set start_ymd from
+ runstart_date, add settings of ndepmapalgo, remove fndepdat/dyn/faerdep
+ M models/lnd/clm/bld/clm.cpl7.template -------- Remove -spmd, mode now ext_cesm,
+ ccsm=>cesm
+ M models/lnd/clm/bld/README ------------------- CCSM=>CESM
+ M models/lnd/clm/bld/config_files/Makefile.in - SPMD=>NOUSE_MPISERIAL, ccsm=>cesm
+ M models/lnd/clm/bld/config_files/config_definition.xsl --- Titles to caption,
+ put valid_values under description
+ M models/lnd/clm/bld/config_files/config_sys_defaults.xml - spmd=>nouse_mpiserial
+ M models/lnd/clm/bld/config_files/config_definition.xml --- +sitespf_pt,
+ comp_interface=>comp_intf, ccsm=>cesm, spmd=>nouse_mpiserial, mode
+ valid values are: ext_cesm, clm_stndln
+
+>>>>>>>>>> Remove ndepsrc/usr_ndepstream/fndepdat/fndepdyn/faerdep add mksrf_navyoor
+>>>>>>>>>> Change comments from ccsm to cesm, exchange run_startdate for start_ymd, add
+>>>>>>>>>> ndepmapalo, add stop_option/stop_n settings for spinup modes
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --------- Remove
+ ndepsrc/use_ndepstream/fndepdat/dyn/faerdep, ccsm=>cesm, add mksrf_navyoro,
+ run_startdate, faerdep and fndepdat for aerdepregrid/ndepregrid tools,
+ add 0.33x0.33 resolution for navyoro file
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml --- Remove
+ ndepsrc add defaults when sitespf_pt is set
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml ------ rcp6 datm_presaero
+ M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl ----------- Add sitespf_pt
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xsl --------- Headers to captions
+ M models/lnd/clm/bld/namelist_files/datm-build-namelist ------------- ccsm=>cesm
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ------- ccsm=>cesm,
+ remove use_ndepstream, faerdep, fndepdat, fndepdyn
+ add ndepmapalgo
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml - Add navy oro
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml ------- Add
+ stop_option/stop_n for spinup modes, change start_ymd for run_startdate
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml - Add ndepmapalgo
+
+>>>>>>>>>> Change config mode names (from ccsm_seq to clm_stndln), remove ndepsrc,
+>>>>>>>>>> remove start_ymd, clm_demand just sets fpftdyn (fndepdat/dyn removed)
+ M models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml ---- mode
+ changes to clm_stndln
+ M models/lnd/clm/bld/namelist_files/use_cases/glacier_mec.xml ----- mode
+ changes to clm_stndln
+ M models/lnd/clm/bld/namelist_files/use_cases/1850_control.xml ---- mode
+ changes to clm_stndln
+ M models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml - mode
+ changes to clm_stndln, remove start_ymd, clm_demand just sets fpftdyn
+ remove ndepsrc
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_transient.xml - mode
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml - mode
+ changes to clm_stndln, remove start_ymd, clm_demand just sets fpftdyn
+ remove ndepsrc
+ M models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml - mode
+ changes to clm_stndln, clm_demand just sets fpftdyn remove ndepsrc
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_transient.xml --- mode
+ changes to clm_stndln, remove start_ymd, clm_demand just sets fpftdyn
+ remove ndepsrc
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml - mode
+ changes to clm_stndln, remove start_ymd, clm_demand just sets fpftdyn
+ remove ndepsrc
+ M models/lnd/clm/bld/namelist_files/use_cases/pergro.xml -- Also output TSA
+ M models/lnd/clm/bld/namelist_files/use_cases/pergro0.xml - Also output TSA
+
+>>>>>>>>>> Change so any scripts calls start with "./", ccsm=>cesm, remove ndepsrc
+>>>>>>>>>> Use macro for PTS_MODE, use .submit rather than .run scripts.
+>>>>>>>>>> Add bit about managing your data with link_dirtree, add more notes and
+>>>>>>>>>> examples for PERGRO testing, add more notes about using batch for single-pt
+>>>>>>>>>> mode
+ M models/lnd/clm/doc/UsersGuide/trouble_shooting.xml -- Add more about ccsm log file
+ M models/lnd/clm/doc/UsersGuide/config_cache.xml ------ Update from configure
+ M models/lnd/clm/doc/UsersGuide/single_point.xml ------ Add section on which mode
+ Add warning about single-point on batch machines, remove warning about
+ error that was fixed, remove notes about setting ndepsrc
+ M models/lnd/clm/doc/UsersGuide/special_cases.xml ----- Use .submit, add notes
+ about using provided history files from bluefire for PERGRO testing, and
+ add bit about TSA as well as TSOI, add more machines and examples of
+ bad pergro for PERGRO examples.
+ M models/lnd/clm/doc/UsersGuide/tools.xml ------------- Remove ndepsrc stuff
+ M models/lnd/clm/doc/UsersGuide/preface.xml ----------- Talk about very latest updates.
+ remove bit about PERGRO not validated
+ M models/lnd/clm/doc/UsersGuide/clm_ug.xml ------------ Update version, remove bug fix
+ M models/lnd/clm/doc/UsersGuide/appendix.xml ----------
+ M models/lnd/clm/doc/UsersGuide/adding_files.xml ------ Add bit about managing your
+ data when you use link_dirtree, update table, remove ndepsrc
+ M models/lnd/clm/doc/UsersGuide/custom.xml ------------ Remove bit about rcp experimental
+ comment out tables that cause docbook to fail with a seg fault.
+ M models/lnd/clm/doc/UsersGuide/pergro.jpg ------------ New data
+ M models/lnd/clm/doc/UsersGuide/Makefile -------------- Remove vandif bug fix
+ M models/lnd/clm/doc/Quickstart.userdatasets ---------- Shorten lines remove faerdep
+ correct procedure
+ M models/lnd/clm/doc/Quickstart.GUIDE ----------------- Use .submit script
+ M models/lnd/clm/doc/UsersGuide/stylesheethtml2docbook.xsl - Change tables from
+ informal to formal, using captions for titles, add template for bold.
+
+>>>>>>>>>> Changes comments for CCSM to CESM, remove misc.h and preproc.h #includes
+>>>>>>>>>> Remove use_ndepstream/fndepdat/fndepdyn/faerdep/set_*dep_from_file
+>>>>>>>>>> Add u10_clm and va, add ability to perturb IC from startup finidat file
+ M models/lnd/clm/src/biogeochem/DUSTMod.F90 ----- CCSM=>CESM, remove misc/preproc.h
+ M models/lnd/clm/src/main/clm_comp.F90 ---------- CCSM=>CESM
+ M models/lnd/clm/src/main/clm_initializeMod.F90 - Remove use_ndepstream logic
+ hardwire it to on
+ M models/lnd/clm/src/main/clm_glclnd.F90 -------- CCSM=>CESM
+ M models/lnd/clm/src/main/clmtypeInitMod.F90 ---- Add u10 and va
+ M models/lnd/clm/src/main/clm_atmlnd.F90 -------- Rm set_caerdep_from_file/dustdep
+ M models/lnd/clm/src/main/controlMod.F90 -------- Rm fndepdat, fndepdyn,
+ use_ndepstream, faerdep, ccsm=>cesm
+ M models/lnd/clm/src/main/clm_time_manager.F90 -- ccsm=>cesm, remove misc/preproc.h
+ M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90 --- lnd_chkAerDep_mct just
+ aborts if aerosols NOT sent from atm.
+ M models/lnd/clm/src/main/clm_driver.F90 ------------- Rm aerdep and old ndep interpoaltion
+ M models/lnd/clm/src/main/clm_varctl.F90 ------------- Rm set_caerdep_from_file/dustdep,
+ faerdep, fndepdat, fndepdyn, use_ndepstream
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90 - lnd_chkAerDep_mct just
+ aborts if aerosols NOT sent from atm.
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_mct.F90 -- ccsm=>cesm
+ M models/lnd/clm/src/main/surfrdMod.F90 ---------- ccsm=>cesm, remove misc/preproc.h
+ M models/lnd/clm/src/main/domainMod.F90 ---------- Rm misc/preproc.h, ccsm=>cesm
+ M models/lnd/clm/src/main/clmtype.F90 ------------ Add u10_clm, and va
+ M models/lnd/clm/src/main/histFldsMod.F90 -------- Add U10, and VA, and mv old U10 to U10_DUST
+ M models/lnd/clm/src/main/mkarbinitMod.F90 ------- Make into module, remove
+ misc/preproc.h, add seperate subroutine to perturb initial conditions
+ M models/lnd/clm/src/biogeophys/UrbanInitMod.F90 - Remove misc/preproc.h,
+ remove GRANDVIEW #ifdefs
+ M models/lnd/clm/src/biogeophys/FrictionVelocityMod.F90 - Add u10/va,
+ remove misc/preproc.h and concurrent loops
+ M models/lnd/clm/src/biogeophys/BiogeophysicsLakeMod.F90 Add some PERGRO #ifdef
+ remove misc/preproc.h
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 --- Add perturbIC call
+ remove misc/preproc.h and concurrent loops
+ M models/lnd/clm/src/biogeophys/SoilHydrologyMod.F90 ---- Remove misc/preproc.h,
+ KO comments and concurrent loops
+ M models/lnd/clm/src/biogeophys/UrbanMod.F90 ------------ Remove misc/preproc.h,
+ and GRANDVIEW #ifdefs
+
+ M README - Start with ./, and correct .build script name, and use .submit in exp
+
+Summary of testing:
+
+ bluefire interactive testing: All PASS except...
+025 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ jaguar: All PASS except...
+007 brB91 TBR.sh _scsnf_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 72+72 arb_ic .............FAIL! rc= 13
+ jaguar interactive testing:
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+012 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+ edinburgh/lf95 interactive testing: All PASS except...
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+ edinburgh/lf95: All PASS except...
+005 smD91 TSM.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 144 cold .................FAIL! rc= 10
+006 erD91 TER.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 72+72 cold ...............FAIL! rc= 5
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+012 smG56 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+013 smE91 TSM.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 96 arb_ic ............FAIL! rc= 10
+014 erE91 TER.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 10+38 arb_ic .........FAIL! rc= 5
+015 brE91 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v7 72+72 arb_ic ......FAIL! rc= 5
+018 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 10+38 cold .......FAIL! rc= 13
+019 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS@2000 72+72 cold ....FAIL! rc= 11
+025 smL51 TSM.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 96 arb_ic ..................FAIL! rc= 10
+026 erL51 TER.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 5
+027 brL51 TBR.sh _sc_dh clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+029 smH41 TSM.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 96 cold FAIL! rc= 10
+030 erH41 TER.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 10+38 coFAIL! rc= 5
+031 brH41 TBR.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 72+72 coFAIL! rc= 5
+033 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 96 arb_ic ...............FAIL! rc= 10
+ breeze,gale,hail,gust/ifort interactive testing: All PASS except...
+017 erR53 TER.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@1850 10+38 cold ....FAIL! rc= 13
+018 brR53 TBR.sh 17p_cnc13sc_do clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS@1850 72+72 cold .FAIL! rc= 11
+020 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+021 erG53 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+022 brG53 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+023 blG53 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+025 erH43 TER.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 10+38 coFAIL! rc= 13
+026 brH43 TBR.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 72+72 coFAIL! rc= 11
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_11
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_11
+Originator(s): erik (Erik Kluzek)
+Date: Fri Aug 27 14:14:37 MDT 2010
+One-line Summary: New files for rcp6, fix MPI bug, update externals
+
+Purpose of changes:
+
+Add in new pftdyn and stream_ndep files for rcp=6.0. Fix MPI bug where send array was the same as receive array.
+Fix problem with datm template on gust, and syntax errors for pt1_pt1 mode. Add start_tod to drv/scripts.
+
+Bugs fixed (include bugzilla ID):
+ 1197 (MPI problem sending and receiving data in same array)
+ 1207 (Problem with datm template on gust)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1206 (Problem looping over a single year of CPLHIST forcing)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: standard
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist:
+ Add ndepmapalgo
+ Move datasets just for clm tools to clm_tools namelist_defaults XML file
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm,
+csm_share
+ scripts to scripts4_100730
+ drv to drvseq3_1_33
+ datm to datm8_100728
+ csm_share to share3_100802
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>>>>>>> Add configure test file for serial
+ A models/lnd/clm/test/system/config_files/4p_vodsnrsc_ds
+
+>>>>>>>>>>>> Add some files to test mksurfdata.pl script
+ A models/lnd/clm/test/system/nl_files/mksrfdt_1x1_brazil_1850
+ A models/lnd/clm/test/system/nl_files/mksrfdt_1x1_brazil_1850-2000
+
+>>>>>>>>>>>> Put all files for clm-tools in seperate file
+ A models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml
+
+>>>>>>>>>>>> Add new chapter on trouble shooting, add a script to limit
+>>>>>>>>>>>> the line lengths, add style sheet to convert HTML XSL table
+>>>>>>>>>>>> to docbook. Add file to fix vancouver problem.
+ A models/lnd/clm/doc/UsersGuide/trouble_shooting.xml
+ A models/lnd/clm/doc/UsersGuide/limitLineLen.pl
+ A models/lnd/clm/doc/UsersGuide/addxhtmlhead.pl
+ A models/lnd/clm/doc/UsersGuide/stylesheethtml2docbook.xsl
+ A models/lnd/clm/doc/UsersGuide/fixvan_datm.buildnml.diff
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>> Change some of the tests around
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_edinburgh
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+ M models/lnd/clm/test/system/tests_pretag_jaguar
+ M models/lnd/clm/test/system/tests_pretag_jaguar_nompi
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+
+>>>>>>>>>>>> Work with testing a bit, add mksurfdata.pl and drydep tests
+ M models/lnd/clm/test/system/README.testnames ---- Add "V" drydep test
+ M models/lnd/clm/test/system/test_driver.sh ------ Add pftdata, change
+multi-processing a bit
+ M models/lnd/clm/test/system/TSMscript_tools.sh -- Fix some glitches
+ M models/lnd/clm/test/system/gen_test_table.sh --- Convert to xhtml
+ M models/lnd/clm/test/system/nl_files/clm_usrdat - Remove non-streams mode for ndep
+and aerdep
+ M models/lnd/clm/test/system/input_tests_master -- Add mksurfdata.pl and drydep tests
+ make scsnf 4x5 rather than 10x15
+
+>>>>>>>>>>>> Add -nomv, usrname, and pftdyn options, add ability to run in
+>>>>>>>>>>>> a different directory, check for vegtyp files before running.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl
+
+>>>>>>>>>>>> Handle rcp's correctly, and handle datm streams for presaero files
+>>>>>>>>>>>> and ndep streams files
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.pl -- default rcp=hist, set
+ RCP to ncl script
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl - add ability to handle
+ usrname files, and handle ndep and aerdep streams files correctly
+
+>>>>>>>>>>>> Move tools files into clm_tools default file, add ndepmapalgo
+>>>>>>>>>>>> Work on the formatting of the files, do better with clm_usrdat_name
+ M models/lnd/clm/bld/queryDefaultNamelist.pl - Add clm_tools default file.
+ Don't limit list to -var, as now done in .pm file below.
+ M models/lnd/clm/bld/queryDefaultXML.pm ------ If -var set, don't process variables
+ that don't match
+ M models/lnd/clm/bld/config_files/config_definition.xsl - Change to lowercase
+ for xhtml standard, remove glacier list
+ M models/lnd/clm/bld/config_files/config_definition.xml - Put glc_nec in physics list
+ M models/lnd/clm/bld/build-namelist --------------------- Fix minor doc issues
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --- Add ndepmapalgo,
+ change formatting for GPTL options
+ M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl ----- Improve formatting,
+ put note in table if All: res, masks, yrs, or sim_yr_rng
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xsl --- Improve formatting
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Remove mksrf_fvegtyp
+ files and fndepdat files for single-years only used for processing
+tools
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml - Add in handling
+ of rcp's, and stream_fldfilename_ndep, remove fndepdat/dyn
+
+>>>>>>>>>>>> Update to documentation from update to cesm1_0_rel_05
+ M models/lnd/clm/doc/UsersGuide/single_point.xml
+ M models/lnd/clm/doc/UsersGuide/get_Icaselist.pl
+ M models/lnd/clm/doc/UsersGuide/special_cases.xml
+ M models/lnd/clm/doc/UsersGuide/co2_streams.txt
+ M models/lnd/clm/doc/UsersGuide/tools.xml
+ M models/lnd/clm/doc/UsersGuide/preface.xml
+ M models/lnd/clm/doc/UsersGuide/clm_ug.xml
+ M models/lnd/clm/doc/UsersGuide/appendix.xml
+ M models/lnd/clm/doc/UsersGuide/adding_files.xml
+ M models/lnd/clm/doc/UsersGuide/custom.xml
+ M models/lnd/clm/doc/UsersGuide/addco2_datm.buildnml.diff
+ M models/lnd/clm/doc/UsersGuide/Makefile
+ M models/lnd/clm/doc/Quickstart.userdatasets
+ M models/lnd/clm/doc/KnownBugs
+ M models/lnd/clm/doc/README
+ M models/lnd/clm/src/main/ndepStreamMod.F90
+ M models/lnd/clm/src/main/surfrdMod.F90
+
+>>>>>>>>>>>> Update to documentation from update to cesm1_0_rel_05
+ M Copyright
+ M README
+
+Summary of testing:
+
+ bluefire: All PASS except...
+029 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+030 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+031 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+032 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+033 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+038 smC45 TSM.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic .........FAIL! rc= 10
+039 erC45 TER.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -3+-7 arb_ic .......FAIL! rc= 5
+040 brC45 TBR.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -5+-5 arb_ic .......FAIL! rc= 5
+041 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic .........FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+006 smHS3 TSM.sh _nrcnsc_do clm_usrdat 20030101:NONE:1800 13x12pt_f19_alaskaUSA gx1v6 -6 arb_ic .FAIL! rc= 8
+007 erHS3 TER.sh _nrcnsc_do clm_usrdat 20030101:NONE:1800 13x12pt_f19_alaskaUSA gx1v6 -3+-3 arb_ic FAIL! rc= 5
+008 brHS3 TBR.sh _nrcnsc_do clm_usrdat 20030101:NONE:1800 13x12pt_f19_alaskaUSA gx1v6 -3+-3 arb_ic FAIL! rc= 5
+009 blHS3 TBL.sh _nrcnsc_do clm_usrdat 20030101:NONE:1800 13x12pt_f19_alaskaUSA gx1v6 -6 arb_ic .FAIL! rc= 4
+025 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+026 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+030 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+065 sm9S4 TSMscript_tools.sh ncl_scripts getregional_datasets.pl getregional ....................FAIL! rc= 6
+066 sm974 TSMscript_tools.sh mksurfdata mksurfdata.pl mksrfdt_1x1_brazil_1850-2000^tools__ds ....FAIL! rc= 6
+ bluefire/CESM testing:
+FAIL SMS_RLA.f45_f45.I.bluefire
+BFAIL SMS_RLA.f45_f45.I.bluefire.generate.clm4_0_11
+FAIL SMS_RLA.f45_f45.I.bluefire.compare_hist.clm4_0_10
+FAIL SMS_RLB.f45_f45.I.bluefire.compare_hist.clm4_0_10
+FAIL SMS_RLB.f45_f45.I.bluefire.compare.clm4_0_10
+FAIL ERS_D.f45_g37.I.bluefire.compare_hist.clm4_0_10
+FAIL ERS_D.f45_g37.I.bluefire.compare.clm4_0_10
+FAIL ERI.f19_g16.IG.bluefire.compare.clm4_0_10
+FAIL ERB.f09_g16.I_1948-2004.bluefire.compare.clm4_0_10
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm4_0_11
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_10
+FAIL PST.f10_f10.I8520CN.bluefire.compare.clm4_0_10
+FAIL PET_PT.f10_f10.I8520CN.bluefire.compare.clm4_0_10
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm4_0_10
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm4_0_10
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_10
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_10
+Originator(s): erik (Erik Kluzek)
+Date: Wed Aug 4 14:37:59 MDT 2010
+One-line Summary: Update doc to cesm_rel05, bug-fixes, fix issues for single-point, mksurfdata/getregional scripts
+
+Purpose of changes:
+
+Use nn instead of copy for CO2 patch file. Update documentation to latest cesm version
+05. Update externals. Some changes to build-namelist for generic single-point
+simulations. Move tools XML files to clm_tools namelist_default file. Add 4x5 drydep
+test, work with testing a bit. Add tests for getregional.pl and mksurfdata.pl scripts.
+Add: usrname, nomv and pftdata options to mksurfdata.pl. Get RCP's working in getregional
+script. Update getregional to handle ndep and aerdep streams, also get it to run in a
+different directory. XML query wont test variables that don't match when -var option is
+specified. Convert test table to xhtml. Move glc_nec to physics. Add option for
+ndepmapalgo. Get faerdep and fndep streams files working right in
+namelist_defaults_usrdat.xml file.
+
+Bugs fixed (include bugzilla ID):
+ 1166 (get_regional script needs to be updated)
+ 1190 (add ndepmapalgo to ndep streams)
+ 1192 (Y1K problem for mksurfdata.pl)
+ 1193 (bug in reading GLCMASK)
+ 1207 (Problem with datm template on gust)
+
+Update of datm also fixes several issues with datm for single pt simulations: 1173, 1175, 1176, 1181
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on gust...)
+ 1165 (Restart trouble for scaled harvest test on gust)
+ 1197 (MPI problem sending and receiving data in same array)
+ 1206 (Problem looping over a single year of forcing)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: standard
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist:
+ Add ndepmapalgo
+ Move datasets just for clm tools to clm_tools namelist_defaults XML file
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm, csm_share
+ scripts to scripts4_100730
+ drv to drvseq3_1_33
+ datm to datm8_100728
+ csm_share to share3_100802
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>>>>>>> Add configure test file for serial
+ A models/lnd/clm/test/system/config_files/4p_vodsnrsc_ds
+
+>>>>>>>>>>>> Add some files to test mksurfdata.pl script
+ A models/lnd/clm/test/system/nl_files/mksrfdt_1x1_brazil_1850
+ A models/lnd/clm/test/system/nl_files/mksrfdt_1x1_brazil_1850-2000
+
+>>>>>>>>>>>> Put all files for clm-tools in seperate file
+ A models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml
+
+>>>>>>>>>>>> Add new chapter on trouble shooting, add a script to limit
+>>>>>>>>>>>> the line lengths, add style sheet to convert HTML XSL table
+>>>>>>>>>>>> to docbook. Add file to fix vancouver problem.
+ A models/lnd/clm/doc/UsersGuide/trouble_shooting.xml
+ A models/lnd/clm/doc/UsersGuide/limitLineLen.pl
+ A models/lnd/clm/doc/UsersGuide/addxhtmlhead.pl
+ A models/lnd/clm/doc/UsersGuide/stylesheethtml2docbook.xsl
+ A models/lnd/clm/doc/UsersGuide/fixvan_datm.buildnml.diff
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>> Change some of the tests around
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_edinburgh
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+ M models/lnd/clm/test/system/tests_pretag_jaguar
+ M models/lnd/clm/test/system/tests_pretag_jaguar_nompi
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+
+>>>>>>>>>>>> Work with testing a bit, add mksurfdata.pl and drydep tests
+ M models/lnd/clm/test/system/README.testnames ---- Add "V" drydep test
+ M models/lnd/clm/test/system/test_driver.sh ------ Add pftdata, change multi-processing a bit
+ M models/lnd/clm/test/system/TSMscript_tools.sh -- Fix some glitches
+ M models/lnd/clm/test/system/gen_test_table.sh --- Convert to xhtml
+ M models/lnd/clm/test/system/nl_files/clm_usrdat - Remove non-streams mode for ndep and aerdep
+ M models/lnd/clm/test/system/input_tests_master -- Add mksurfdata.pl and drydep tests
+ make scsnf 4x5 rather than 10x15
+
+>>>>>>>>>>>> Add -nomv, usrname, and pftdyn options, add ability to run in
+>>>>>>>>>>>> a different directory, check for vegtyp files before running.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl
+
+>>>>>>>>>>>> Handle rcp's correctly, and handle datm streams for presaero files
+>>>>>>>>>>>> and ndep streams files
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.pl -- default rcp=hist, set
+ RCP to ncl script
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl - add ability to handle
+ usrname files, and handle ndep and aerdep streams files correctly
+
+>>>>>>>>>>>> Move tools files into clm_tools default file, add ndepmapalgo
+>>>>>>>>>>>> Work on the formatting of the files, do better with clm_usrdat_name
+ M models/lnd/clm/bld/queryDefaultNamelist.pl - Add clm_tools default file.
+ Don't limit list to -var, as now done in .pm file below.
+ M models/lnd/clm/bld/queryDefaultXML.pm ------ If -var set, don't process variables
+ that don't match
+ M models/lnd/clm/bld/config_files/config_definition.xsl - Change to lowercase
+ for xhtml standard, remove glacier list
+ M models/lnd/clm/bld/config_files/config_definition.xml - Put glc_nec in physics list
+ M models/lnd/clm/bld/build-namelist --------------------- Fix minor doc issues
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --- Add ndepmapalgo,
+ change formatting for GPTL options
+ M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl ----- Improve formatting,
+ put note in table if All: res, masks, yrs, or sim_yr_rng
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xsl --- Improve formatting
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Remove mksrf_fvegtyp
+ files and fndepdat files for single-years only used for processing tools
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml - Add in handling
+ of rcp's, and stream_fldfilename_ndep, remove fndepdat/dyn
+
+>>>>>>>>>>>> Update to documentation from update to cesm1_0_rel_05
+ M models/lnd/clm/doc/UsersGuide/single_point.xml
+ M models/lnd/clm/doc/UsersGuide/get_Icaselist.pl
+ M models/lnd/clm/doc/UsersGuide/special_cases.xml
+ M models/lnd/clm/doc/UsersGuide/co2_streams.txt
+ M models/lnd/clm/doc/UsersGuide/tools.xml
+ M models/lnd/clm/doc/UsersGuide/preface.xml
+ M models/lnd/clm/doc/UsersGuide/clm_ug.xml
+ M models/lnd/clm/doc/UsersGuide/appendix.xml
+ M models/lnd/clm/doc/UsersGuide/adding_files.xml
+ M models/lnd/clm/doc/UsersGuide/custom.xml
+ M models/lnd/clm/doc/UsersGuide/addco2_datm.buildnml.diff
+ M models/lnd/clm/doc/UsersGuide/Makefile
+ M models/lnd/clm/doc/Quickstart.userdatasets
+ M models/lnd/clm/doc/KnownBugs
+ M models/lnd/clm/doc/README
+ M models/lnd/clm/src/main/ndepStreamMod.F90
+ M models/lnd/clm/src/main/surfrdMod.F90
+
+>>>>>>>>>>>> Update to documentation from update to cesm1_0_rel_05
+ M Copyright
+ M README
+
+Summary of testing:
+
+ bluefire: All PASS except...
+029 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+030 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+031 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+032 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+033 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+055 smLI1 TSM.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+056 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+057 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+058 blLI1 TBL.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+009 blHS3 TBL.sh _nrcnsc_do clm_usrdat 20030101:NONE:1800 13x12pt_f19_alaskaUSA gx1v6 -6 arb_ic .FAIL! rc= 4
+025 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+026 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+030 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+ bluefire/CESM testing: All PASS except...
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm4_0_10
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_09
+ jaguar interactive testing: All PASS except...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+012 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+ edinburgh/lf95 interactive testing: All PASS except...
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+ edinburgh/lf95: All PASS except...
+005 smD91 TSM.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 144 cold .................FAIL! rc= 10
+006 erD91 TER.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 72+72 cold ...............FAIL! rc= 5
+007 blD91 TBL.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 144 cold .................FAIL! rc= 4
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+012 smG56 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+013 smE91 TSM.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 96 arb_ic ............FAIL! rc= 10
+014 erE91 TER.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 10+38 arb_ic .........FAIL! rc= 5
+015 brE91 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v7 72+72 arb_ic ......FAIL! rc= 5
+016 blE91 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 48 arb_ic ............FAIL! rc= 4
+018 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 10+38 cold .......FAIL! rc= 13
+019 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS@2000 72+72 cold ....FAIL! rc= 11
+025 smL51 TSM.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 96 arb_ic ..................FAIL! rc= 10
+026 erL51 TER.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 5
+027 brL51 TBR.sh _sc_dh clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+028 blL51 TBL.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 48 arb_ic ..................FAIL! rc= 4
+029 smH41 TSM.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 96 cold FAIL! rc= 10
+030 erH41 TER.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 10+38 coFAIL! rc= 5
+031 brH41 TBR.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 72+72 coFAIL! rc= 5
+032 blH41 TBL.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 48 cold FAIL! rc= 4
+033 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 96 arb_ic ...............FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_09
+
+Changes answers relative to baseline: bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_09
+Originator(s): erik (erik)
+Date: Mon Jun 14 00:02:12 MDT 2010
+One-line Summary: Fix some small issues, update documentation, and externals
+
+Purpose of changes:
+
+Work on documentation for CESM1.0 release, with glcec changes, and namelist changes. Run
+testing and fix bugs. Move documentation changes from release branch to trunk. Fix
+getregional script for transient. Remove "At point 2" from lnd log files. Update
+csm_share, and scripts version so can now run testing with lahey compiler. Get CO2 patch
+file working.
+
+Bugs fixed (include bugzilla ID):
+ 1092 (Problems running on dublin with datm8 with debug)
+ 1159 (date in fco2 file is not used)
+ 1160 (Fix mksurfdata.pl script to work with 1000-1004)
+ 1167 (doc. about running single point reduce pes)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on gust...)
+ 1165 (Restart trouble for scaled harvest test on gust)
+ 1166 (get_regional script needs to be updated)
+ 1192 (Y1K problem for mksurfdata.pl)
+ 1193 (bug in reading GLCMASK)
+ 1197 (MPI problem sending and receiving data in same array)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: std-test
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm,
+cism, csm_share
+ scripts to scripts4_100612
+ drv to drvseq3_1_31
+ datm to datm8_100612
+ cism to cism1_100608
+ csm_share to share3_100607
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+>>>>>>>>>>>> Add testing configure file
+ A models/lnd/clm/test/system/config_files/_nrcnsc_do
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>> Add CLM_USRDAT_NAME and getregional.pl tests
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_edinburgh_nompi
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+ M models/lnd/clm/test/system/README.testnames
+ M models/lnd/clm/test/system/TSMscript_tools.sh
+ M models/lnd/clm/test/system/nl_files/clm_usrdat
+ M models/lnd/clm/test/system/nl_files/getregional
+ M models/lnd/clm/test/system/input_tests_master
+ M models/lnd/clm/test/system/README
+
+>>>>>>>>>>>>
+ M models/lnd/clm/tools/ncl_scripts/getco2_historical.ncl ---- Add comment that
+ date variable is NOT used
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.pl -- Add path to scripts
+ so can run from a different directory
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl - Fix warnings and
+ allow some files to not be converted if not needed
+ M models/lnd/clm/tools/ncl_scripts/pftdyntest2raw.ncl ------- Re-order longitudes
+ so from -180-180 rather than 0-360
+
+>>>>>>>>>>>>
+ M models/lnd/clm/bld/queryDefaultXML.pm ----- Get working for usrdat better
+ M models/lnd/clm/bld/listDefaultNamelist.pl - Get working for usrdat files
+ M models/lnd/clm/bld/build-namelist --------- Allow lnd_res to be usrdat name
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml - Set year first/last
+ for datm_presaero for clim_2000
+ M models/lnd/clm/bld/namelist_files/datm-build-namelist -------- Don't allow
+ prognostic for datm_presaero
+
+>>>>>>>>>>>> Update documentation, add cprnc README to document
+ M models/lnd/clm/doc/UsersGuide/tools.xml
+ M models/lnd/clm/doc/UsersGuide/preface.xml
+ M models/lnd/clm/doc/UsersGuide/clm_ug.xml
+ M models/lnd/clm/doc/UsersGuide/adding_files.xml
+ M models/lnd/clm/doc/UsersGuide/appendix.xml
+ M models/lnd/clm/doc/UsersGuide/custom.xml
+ M models/lnd/clm/doc/UsersGuide/single_point.xml
+ M models/lnd/clm/doc/UsersGuide/Makefile
+ M models/lnd/clm/doc/UsersGuide/special_cases.xml
+ M models/lnd/clm/doc/UsersGuide/addco2_datm.buildnml.diff - Update patch to
+ work with the latest datm with DATM_PRESAERO
+
+>>>>>>>>>>>> Remove "at point 2" and fix esmf duplication from fix by Mariana
+ M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90
+
+>>>>>>>>>>>> Update README files and use CESM in place of CCSM
+ M models/lnd/clm/doc/Quickstart.userdatasets
+ M models/lnd/clm/doc/IMPORTANT_NOTES
+ M models/lnd/clm/doc/KnownBugs
+ M models/lnd/clm/doc/README
+ M models/lnd/clm/doc/index.shtml
+ M Copyright
+ M README
+
+Summary of testing:
+
+ bluefire: All PASS except...
+016 blF92 TBL.sh 17p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 48 cold .............FAIL! rc= 5
+017 smEH1 TSM.sh 4p_vodsrsc_dh clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic FAIL! rc= 10
+018 erEH1 TER.sh 4p_vodsrsc_dh clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 10+38 arb_icFAIL! rc= 5
+019 brEH1 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 24+24 arbFAIL! rc= 5
+020 blEH1 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic FAIL! rc= 4
+021 smHN1 TSM.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -10 colFAIL! rc= 8
+022 erHN1 TER.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -3+-7 cFAIL! rc= 3
+023 brHN1 TBR.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -5+-5 cFAIL! rc= 3
+024 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -10 colFAIL! rc= 3
+025 smHO2 TSM.sh 17p_cnsc_dm clm_drydep 20000704:NONE:1800 10x15 USGS@2000 -90 cold .............FAIL! rc= 3
+026 erHO2 TER.sh 17p_cnsc_dm clm_drydep 20000704:NONE:1800 10x15 USGS@2000 -3+-7 cold ...........FAIL! rc= 3
+027 brHO2 TBR.sh 17p_cnsc_dm clm_drydep 20000704:NONE:1800 10x15 USGS@2000 -5+-5 cold ...........FAIL! rc= 3
+028 blHO2 TBL.sh 17p_cnsc_dm clm_drydep 20000704:NONE:1800 10x15 USGS@2000 -90 cold .............FAIL! rc= 3
+029 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 3
+030 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 3
+031 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 3
+032 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 3
+033 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 3
+034 smH41 TSM.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 96 cold FAIL! rc= 3
+035 erH41 TER.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 10+38 coFAIL! rc= 3
+036 brH41 TBR.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 72+72 coFAIL! rc= 3
+037 blH41 TBL.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 48 cold FAIL! rc= 3
+038 smC45 TSM.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic .........FAIL! rc= 3
+039 erC45 TER.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -3+-7 arb_ic .......FAIL! rc= 3
+040 brC45 TBR.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -5+-5 arb_ic .......FAIL! rc= 3
+041 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic .........FAIL! rc= 3
+042 smC61 TSM.sh _scnv_dh clm_std^nl_urb 20020101:NONE:1800 1.9x2.5 gx1v6 -6 startup ............FAIL! rc= 3
+043 erC61 TER.sh _scnv_dh clm_std^nl_urb 20020101:NONE:1800 1.9x2.5 gx1v6 10+38 startup .........FAIL! rc= 3
+044 brC61 TBR.sh _scnv_dh clm_std^nl_urb_br 20020101:NONE:1800 1.9x2.5 gx1v6 -3+-3 startup ......FAIL! rc= 3
+045 blC61 TBL.sh _scnv_dh clm_std^nl_urb 20020101:NONE:1800 1.9x2.5 gx1v6 48 startup ............FAIL! rc= 3
+046 smH52 TSM.sh 17p_cnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 96 cold ..........FAIL! rc= 3
+047 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 10+38 cold .......FAIL! rc= 3
+048 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS@2000 72+72 cold ....FAIL! rc= 3
+049 blH52 TBL.sh 17p_cnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 48 cold ..........FAIL! rc= 3
+050 smV61 TSM.sh _mec10sc_dh clm_glcmec 19980115:NONE:1800 1.9x2.5 gx1v6 96 arb_ic ..............FAIL! rc= 3
+051 erV61 TER.sh _mec10sc_dh clm_glcmec 19980115:NONE:1800 1.9x2.5 gx1v6 10+38 arb_ic ...........FAIL! rc= 3
+052 brV61 TBR.sh _mec10sc_dh clm_std 19980115:NONE:1800 1.9x2.5 gx1v6 72+72 arb_ic ..............FAIL! rc= 3
+053 blV61 TBL.sh _mec10sc_dh clm_glcmec 19980115:NONE:1800 1.9x2.5 gx1v6 48 arb_ic ..............FAIL! rc= 3
+054 smI59 TSMcnspinup.sh 17p_cnadspinupsc_dm 17p_cnexitspinupsc_dm 17p_cnsc_dm clm_std 20020115:NONEFAIL! rc= 3
+055 smLI1 TSM.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 3
+056 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 3
+057 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 3
+058 blLI1 TBL.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 3
+059 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 96 arb_ic ...............FAIL! rc= 3
+060 erL58 TER.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 3
+061 brL58 TBR.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 3
+062 blL58 TBL.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 48 arb_ic ...............FAIL! rc= 3
+063 smJ61 TSM.sh 4p_casasc_dh clm_std^nl_urb 20021230:NONE:1800 1.9x2.5 gx1v6 96 cold ...........FAIL! rc= 3
+064 erJ61 TER.sh 4p_casasc_dh clm_std^nl_urb 20021230:NONE:1800 1.9x2.5 gx1v6 10+38 cold ........FAIL! rc= 3
+065 brJ61 TBR.sh 4p_casasc_dh clm_std^nl_urb_br 20021230:NONE:1800 1.9x2.5 gx1v6 72+72 cold .....FAIL! rc= 3
+066 blJ61 TBL.sh 4p_casasc_dh clm_std^nl_urb 20021230:NONE:1800 1.9x2.5 gx1v6 48 cold ...........FAIL! rc= 3
+067 smJ05 TSM.sh 4p_casasc_h clm_std^nl_lfiles 19800101:NONE:1800 0.47x0.63 gx1v6 48 arb_ic .....FAIL! rc= 3
+ bluefire interactive testing: All PASS except...
+009 blHS3 TBL.sh _nrcnsc_do clm_usrdat 20030101:NONE:1800 13x12pt_f19_alaskaUSA gx1v6 -6 arb_ic .FAIL! rc= 5
+021 blJ74 TBL.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic FAIL! rc= 5
+025 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+026 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+030 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+062 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 3
+063 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 3
+064 smZ94 TSMtools.sh mkdatadomain tools__ds namelist ...........................................FAIL! rc= 3
+065 sm9S4 TSMscript_tools.sh ncl_scripts getregional_datasets.pl getregional ....................FAIL! rc= 3
+ bluefire/CESM testing: All PASS except...
+BFAIL SMS_RLB.f45_f45.I.bluefire.compare.clm4_0_08
+BFAIL ERI.f19_g16.IG.bluefire.compare.clm4_0_08
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm4_0_09
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_08
+ jaguar interactive testing: All PASS except...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+025 smV23 TSM.sh _mec10sc_do clm_glcmec 19980115:NONE:1800 48x96 gx3v7 96 arb_ic ................FAIL! rc= 8
+026 erV23 TER.sh _mec10sc_do clm_glcmec 19980115:NONE:1800 48x96 gx3v7 10+38 arb_ic .............FAIL! rc= 5
+027 brV23 TBR.sh _mec10sc_do clm_std 19980115:NONE:1800 48x96 gx3v7 72+72 arb_ic ................FAIL! rc= 5
+ jaguar/CESM testing: All PASS
+ edinburgh/lf95 interactive testing: All PASS except...
+004 blA74 TBL.sh _nrsc_ds clm_std^nl_urb 20030101:NONE:1800 1x1_brazil navy -10 arb_ic ..........FAIL! rc= 5
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+008 blAL4 TBL.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -10 cold ...............FAIL! rc= 5
+012 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 5
+ edinburgh/lf95: All PASS except...
+005 smD91 TSM.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 144 cold .................FAIL! rc= 10
+006 erD91 TER.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 72+72 cold ...............FAIL! rc= 5
+007 blD91 TBL.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 144 cold .................FAIL! rc= 4
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+012 smG56 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+013 smE91 TSM.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 96 arb_ic ............FAIL! rc= 10
+014 erE91 TER.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 10+38 arb_ic .........FAIL! rc= 5
+015 brE91 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v7 72+72 arb_ic ......FAIL! rc= 5
+016 blE91 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 48 arb_ic ............FAIL! rc= 4
+018 erH52 TER.sh 17p_cnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 10+38 cold .......FAIL! rc= 13
+019 brH52 TBR.sh 17p_cnsc_dm clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS@2000 72+72 cold ....FAIL! rc= 11
+025 smL51 TSM.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 96 arb_ic ..................FAIL! rc= 10
+026 erL51 TER.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 5
+027 brL51 TBR.sh _sc_dh clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+028 blL51 TBL.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 48 arb_ic ..................FAIL! rc= 4
+029 smH41 TSM.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 96 cold FAIL! rc= 10
+030 erH41 TER.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 10+38 coFAIL! rc= 5
+031 brH41 TBR.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 72+72 coFAIL! rc= 5
+032 blH41 TBL.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 48 cold FAIL! rc= 4
+033 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 96 arb_ic ...............FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_08
+
+Changes answers relative to baseline: bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_08
+Originator(s): erik (erik)
+Date: Fri Jun 4 01:25:39 MDT 2010
+One-line Summary: Snow hydrology bug fix from Keith and Dave
+
+Purpose of changes:
+
+SnowHydrology bug fix from Keith Oleson. For test-suite, make default to send aerosol
+data through datm. Update version of cism, scripts and datm. Remove some of the old aerdep
+stuff from the XML database as we now are using presaero from datm (leave 1-deg and
+2-deg).
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on gust...)
+ 1165 (Restart trouble for scaled harvest test on gust)
+ 1166 (get_regional script needs to be updated)
+ 1192 (Y1K problem for mksurfdata.pl)
+ 1193 (bug in reading GLCMASK)
+ 1197 (MPI problem sending and receiving data in same array)
+ 1206 (Problem looping over a single year of forcing)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: critical
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: Move datm_presaero to overall defaults
+
+List any changes to the defaults for the boundary datasets:
+ Remove all faerdep files except f09 and f19
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+ code changes come from Keith Oleson and Dave Lawrence
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, cism and drv
+ scripts to scripts4_100603a
+ drv to drvseq3_1_29
+ cism to cism1_100603
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+ A models/lnd/clm/test/system/config_files/17p_cnsc_m
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/nl_files/clm_ndepdyn - Switch demand for ndepdyn for
+ ndepsrc stream
+
+ M models/lnd/clm/bld/build-namelist ----- Get datm_presaero if not null
+ do NOT set faerdep
+ M models/lnd/clm/bld/clm.cpl7.template -- Set datm_presaero by DATM_PRESAERO
+ if datm or to prognostic if not (so aerosol dep require from atm)
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml - Set
+ datm_presaero by resolution, sim_year, sim_year_range and rcp
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml ---- Remove datm_presaero
+ M models/lnd/clm/bld/namelist_files/datm-build-namelist ----------- Set
+ datm_presaero by resolution, sim_year, sim_year_range and rcp
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ----- Remove
+ all faerdep files except for f09 and 19
+
+ M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90 - Snow hydrology fix
+
+Summary of testing:
+
+ bluefire interactive testing: All PASS up to...
+014 smJ74 TSM.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test
+-1100 arb_ic FAIL! rc= 10
+ bluefire/CCSM testing: All PASS except..
+FAIL SMS_RLB.f45_f45.I.bluefire
+BFAIL SMS_RLB.f45_f45.I.bluefire.generate.clm4_0_08
+FAIL SMS_RLB.f45_f45.I.bluefire.compare_hist.clm4_0_06
+FAIL ERS_D.f45_g37.I.bluefire.compare_hist.clm4_0_06
+FAIL ERS_D.f45_g37.I.bluefire.compare.clm4_0_06
+FAIL PST.f45_g37.I1850.bluefire.compare.clm4_0_06
+FAIL PET_PT.f45_g37.I1850.bluefire.compare.clm4_0_06
+FAIL ERS_E.f19_g16.I1850.bluefire.compare_hist.clm4_0_06
+FAIL ERS_E.f19_g16.I1850.bluefire.compare.clm4_0_06
+FAIL ERB.f09_g16.I_1948-2004.bluefire.compare.clm4_0_06
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm4_0_08
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_06
+FAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm4_0_06
+FAIL PST.f10_f10.I8520CN.bluefire.compare.clm4_0_06
+FAIL PET_PT.f10_f10.I8520CN.bluefire.compare.clm4_0_06
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm4_0_06
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm4_0_06
+ Extra testing:
+PASS ERI.f19_g16.IG.bluefire
+Make sure answers agree with /OLESON/csm/ccsm4_0_beta52_ndepaer other than VOC fields
+Test that F case will configure..
+create_newcase -compset F -case testF -res f19_g16 -mach bluefire -skip_rundb
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_07
+
+Changes answers relative to baseline: YES
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers:
+ - what code configurations: ALL
+ - what platforms/compilers: ALL
+ - nature of change: larger than roundoff/same climate
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+
+ - platform/compilers: IBM
+ - compset (and additional configure options): I1850CN
+ - build-namelist options (or complete namelist): -ndepsrc stream
+
+ MSS location of control simulations used to validate new climate:
+
+ /OLESON/csm/ccsm4_0_beta52_ndepaer
+ /OLESON/csm/ccsm4_0_beta52_ndepaertrans
+
+ The above is identical to this tag (other than the two VOC fields that changed)
+
+===============================================================
+===============================================================
+Tag name: clm4_0_07
+Originator(s): erik (erik)
+Date: Thu Jun 3 21:22:46 MDT 2010
+One-line Summary: Some cleanup/fix bugs, add RTM var, add albice to namelist, allow last-millenium in mksurfdata, allow setting of datm_presaero in clm test-suite
+
+Purpose of changes:
+
+Fix mksurfdata.pl, to correctly create 1000-1004 test datasets. Fix drydep for OpenMP.
+Update 1x1_tropicAtl_1000-1004 test fsurdat file. Move glc_grid from configure to
+build-namelist. Add in alb_ice to namelist. Start adding in the capability to handle
+mksurfdata from 0850-1850AD, put all mksrf_fvegtyp files in XML database (remove some of
+the sample pftdyn text files). New RTM field on history output from Sean (VOLR and
+VOLR_ICE, only VOLR output by default). Allow use of aerosol data from datm for I cases
+in the clm test suite. Split out datm-build-namelist from clm build-namelist (put in
+bld/namelist_files).
+
+Bugs fixed (include bugzilla ID):
+ 1162 (OpenMP bug with dry-deposition code in clm)
+ 883 (aerosol deposition not from atm)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1164 (Restart trouble for CN13 on gust...)
+ 1165 (Restart trouble for scaled harvest test on gust)
+ 1166 (get_regional script needs to be updated)
+ 1192 (Y1K problem for mksurfdata.pl)
+ 1193 (bug in reading GLCMASK)
+ 1197 (MPI problem sending and receiving data in same array)
+ 1206 (Problem looping over a single year of forcing)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: standard
+
+Describe any changes made to build system: Move glc_grid to build-namelist
+ remove esmf_libdir, fix ccsm_bld so will build threaded properly
+
+Describe any changes made to the namelist: Add albice to namelist
+ Add new history fields VOLR and VOLR_ICE
+
+ VOLR RTM storage: LIQ (m3)
+ VOLR_ICE RTM storage: ICE (m3)
+
+List any changes to the defaults for the boundary datasets:
+ New datasets for 1x1_tropicAtl 1000 tests
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, VOLR changes come from Sean Swenson
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, pio
+ scripts to scripts4_100601
+ drv to drvseq3_1_28
+ pio to pio1_1_1
+
+List all files eliminated:
+
+>>>>>>>>>>>>>>> Remove mksurdata pftdyn text files, let XML database create them
+ D models/lnd/clm/tools/mksurfdata/pftdyn_hist_simyr2000.txt
+ D models/lnd/clm/tools/mksurfdata/pftdyn_hist_simyr1850.txt
+ D models/lnd/clm/tools/mksurfdata/pftdyn_rcp6.0_simyr1850-2100.txt
+ D models/lnd/clm/tools/mksurfdata/pftdyn_rcp2.6_simyr1850-2100.txt
+ D models/lnd/clm/tools/mksurfdata/pftdyn_rcp4.5_simyr1850-2100.txt
+ D models/lnd/clm/tools/mksurfdata/pftdyn_rcp8.5_simyr1850-2100.txt
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>>> Split out datm part of build-namelist into it's own script
+ A models/lnd/clm/bld/namelist_files/datm-build-namelist
+
+ A models/lnd/clm/tools/mksurfdata/pftdyn_hist_simyr2000-2000.txt -- Same as
+ old file with 2000.txt name rather than 2000-2000.txt name.
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>> Move glc_grid to build-namelist, remove kraken
+ M models/lnd/clm/test/system/config_files/_mec10sc_dh
+ M models/lnd/clm/test/system/config_files/_mec10sc_dm
+ M models/lnd/clm/test/system/config_files/_mec10sc_do
+ M models/lnd/clm/test/system/config_files/_mec10sc_ds
+ M models/lnd/clm/test/system/config_files/_mec10sc_h
+ M models/lnd/clm/test/system/config_files/_mec10sc_m
+ M models/lnd/clm/test/system/config_files/_mec10sc_o
+ M models/lnd/clm/test/system/test_driver.sh --------- Remove kraken, update dataroot
+ for bluefire, and tempworkspacefor intrepid
+ M models/lnd/clm/test/system/CLM_runcmnd.sh --------- Remove kraken
+ M models/lnd/clm/test/system/nl_files/clm_drydep ---- Change drydep to drv_drydep
+
+>>>>>>>>>>>>>>> Use XML database for pftdyn files, have mksrfdata.pl write out pftdyn files
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.globalirrig - change pftdyn file name
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.regional ---- change pftdyn file name
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pftdyn ------ start at 1850
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.singlept ---- change pftdyn file name
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.namelist ---- change pftdyn file name
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl ---------- Get mksrf_vegtyp filenames
+ from XML database for all files, and write out pftdyn files with them
+ also get working for 1000-1004 test cases (specifically for 1x12_tropicAtl
+ test case)
+ M models/lnd/clm/tools/mksurfdata/pftdyn_hist_simyr1850-2005.txt - Use CSMDATA versions
+ of 1850 and 2000 files
+
+>>>>>>>>>>>>>>> Move glc_grid to namelist, fix ccsm_bld=on, add datm_presaero
+>>>>>>>>>>>>>>> Only do COLD start for startup type
+ M models/lnd/clm/bld/configure --------------- Remove glc_grid and esmf_libdir
+ add in control of CCSM_VOC, set compile_threaded for ccsm_bld on,
+ M models/lnd/clm/bld/queryDefaultNamelist.pl - Remove double reading of namelist_defaults_overall.xml
+ M models/lnd/clm/bld/queryDefaultXML.pm ------ Add csmdata to beginning of file, only
+ if it's a relative pathname (to handle instances of /cgd/tss for mksrf_vegtyp files)
+ M models/lnd/clm/bld/config_files/config_definition.xml - Remove glc_grid/esmf_libdir
+ M models/lnd/clm/bld/listDefaultNamelist.pl --- Move glc_grid to namelist vars
+ M models/lnd/clm/bld/build-namelist ----------- Add in glc_grid, and datm_presaero
+ change -drydep to -drv_drydep option, set glc_nthreads, outnc_large_files
+ and albice if glc_nec>0, move datm settings to own datm-build-namelist.
+ M models/lnd/clm/bld/clm.cpl7.template -------- Move glc_grid to build-namelist,
+ remove outnc_large_files setting (now in build-namelist), only do
+ COLD start for startup type (NOT for hybrid or branch).
+
+>>>>>>>>>>>>>>> Add albice/glc_grid/datm_presaero/outnc_large_files
+>>>>>>>>>>>>>>> New datasets for 1x1_tropicAtl 1000 tests
+>>>>>>>>>>>>>>> Add in all mksrf_fvegtyp files and include last-millenium
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml ------ Add in albice,
+ and glc_* settings for drv (glc_nthreads, glc_ntasks etc.), add presaero
+ datam_presaero, datm_file_aero, datm_year_first_aero, datm_year_last_aero,
+ datm_year_align_aero, and glc_grid. Add 0.5x0.5 resolution (for mksurfdata)
+ and some premillenial years (850,1100,1350,1600) and sim-year ranges
+ (850-1100,1100-1350,1350-1600,1600-1850)
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml - Set default
+ masks here, and add in glc_grid default
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml ---- Add in some
+ new domain files that are in datm template, and add in datm_presaero
+ settings needed: datm_file_aero, datm_aero_streams, datm_year_first_aero
+Â datm_year_last_aero, and datm_year_align_aero
+ M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl --------- Show
+ datm_presaero setting if set.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ----- Add
+ outnc_large_files, albice, and move mask to overall, update
+ 1x1_tropicAtl files for 1000, 1000-1004, add in all mksrf_fvegtyp
+ files for all scenarios and last-millenium. Add in diri and diro.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml ----- Get URL
+ svn keyword set properly.
+
+>>>>>>>>>>>>>>> Put datm/drv settings on bottom (only for mode=ccsm_seq)
+>>>>>>>>>>>>>>> set datm_presaero and data_cycle_beg/end years
+ M models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml -- default
+ datm_presaero is clim_2000
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml - default
+ datm_presaero is rcp8.5, beg/end year 1972-2004
+ M models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml - default
+ datm_presaero is rcp8.5, beg/end year 1972-2004
+ M models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml ------------ default
+ datm_presaero is trans_1850-2000, beg/end year 1948-1972, co2=386.9
+ M models/lnd/clm/bld/namelist_files/use_cases/glacier_mec.xml ---------------- default
+ datm_presaero is clim_2000
+ M models/lnd/clm/bld/namelist_files/use_cases/1850_control.xml --------------- default
+ datm_presaero is clim_1850, beg/end year 1948/1972
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_transient.xml - default
+ datm_presaero is rcp2.6, beg/end year 1972-2004
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_transient.xml --- default
+ datm_presaero is rcp6.0, beg/end year 1972-2004
+ M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml - default
+ datm_presaero is rcp4.5, beg/end year 1972-2004
+
+>>>>>>>>>>>>>>> Add in albice to namelist, add VOLR and VOLR_ICE to history files
+>>>>>>>>>>>>>>> always call interpMonthlyVeg for drydep
+ M models/lnd/clm/src/biogeochem/DryDepVelocity.F90 - Remove interpMonthlyVeg
+ call as coming from a threaded region
+ M models/lnd/clm/src/main/clm_comp.F90 ------------- Call interpMonthlyVeg
+ for drydep even if CN is on
+ M models/lnd/clm/src/main/controlMod.F90 ----------- Add albice
+ M models/lnd/clm/src/main/clm_varcon.F90 ----------- Remove albice
+ M models/lnd/clm/src/main/clm_driver.F90 ----------- Always call interpMonthlyVeg
+ if drydep is on (even when NOT doalb)
+ M models/lnd/clm/src/main/histFldsMod.F90 ---------- Add VOLR and VOLR_ICE
+ (VOLR_ICE is an optional field)
+ M models/lnd/clm/src/riverroute/RtmMod.F90 --------- Handle volr under runoff
+ type, rather than as local variable
+ M models/lnd/clm/src/riverroute/RunoffMod.F90 ------ Add volr, volrlnd, volr_nt1/2
+ M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90 Add albice as public var
+ that can be set in controlMod on namelist
+
+Summary of testing:
+
+ bluefire: All PASS except (up to test 061 nl_crcrop)
+061 brL58 TBR.sh _sc_dh clm_std^nl_crcrop
+004 blA91 TBL.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v7 -6 arb_ic ...................FAIL! rc= 7
+007 blD91 TBL.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 144 cold .................FAIL! rc= 7
+011 blE91 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 48 arb_ic ............FAIL! rc= 7
+020 blEH1 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic FAIL! rc= 7
+024 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -10 colFAIL! rc= 7
+029 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+030 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+031 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+032 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+033 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+037 blH41 TBL.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 48 cold FAIL! rc= 7
+045 blC61 TBL.sh _scnv_dh clm_std^nl_urb 20020101:NONE:1800 1.9x2.5 gx1v6 48 startup ............FAIL! rc= 7
+ bluefire interactive testing: All PASS up to...
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ bluefire/CCSM testing: All PASS except..
+FAIL ERI.f19_g16.IG.bluefire
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_06
+
+
+ bluefire/extra CCSM testing:
+Make sure answers agree with /OLESON/csm/ccsm4_0_beta52_ndepaer other than VOC fields
+ (when snowhydrology changes are put in)
+
+ breeze,gale,hail,gust/ifort interactive testing: All PASS except...
+017 erR53 TER.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@1850 10+38 cold ....FAIL! rc= 13
+018 brR53 TBR.sh 17p_cnc13sc_do clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS@1850 72+72 cold .FAIL! rc= 11
+020 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+021 erG53 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+022 brG53 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+023 blG53 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+025 erH43 TER.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 10+38 coFAIL! rc= 13
+026 brH43 TBR.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 72+72 coFAIL! rc= 11
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_06
+
+Changes answers relative to baseline: no bit-for-bit (except omp active stand-alone tests)
+ The standalone tests with OpenMP on are different because the previous tag
+ wasn't building with OpenMP
+
+===============================================================
+===============================================================
+Tag name: clm4_0_06
+Originator(s): erik (erik)
+Date: Wed May 26 10:35:26 MDT 2010
+One-line Summary: Update gglc to cism
+
+Purpose of changes:
+
+Changes from jwolfe to lnd_comp* subroutines to exchange cism fields. Requires an update
+to the driver for the index of the fieldnames passed. Change paths of gglc glc_grid files
+from gglc to cism. Make stream the default for all resolutions for ndepsrc.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1162 (OpenMP bug with dry-deposition code in clm)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1197 (MPI problem sending and receiving data in same array)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: critical
+
+Describe any changes made to build system:
+ Change name of ice model from gglc to cism
+ Change list of fields exchanged with cism
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ Change pathnames for gglc fglcmask datasets to cism
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, jwolfe, lipscomb
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, cism
+ scripts to scripts4_100525
+ drv to drvseq3_1_26
+ cism to cism1_100525b
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml - Change default
+ of ndepsrc for f19 and f09 to stream
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ----- Change pathnames
+ for fglcmask files to pathame with cism instead of gglc
+
+ M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90 ---------------- Pass a different
+ set of fields for sno (needed for update to cism)
+
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90 -------------- Pass a different
+ set of fields for sno (needed for update to cism)
+
+Summary of testing:
+
+ bluefire/CCSM testing:
+FAIL ERI.f19_g16.IG.bluefire
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm4_0_06
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_05
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm4_0_05
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm4_0_05
+ bluefire/CCSM extra testing:
+PASS SMS.f19_g16.IG.bluefire
+PASS ERS.f19_g16.IG.bluefire
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_05
+
+Changes answers relative to baseline: Only when glc is active
+ Or for f19 and f09 with CN as now ndepsrc streams is the default for all resolutions
+ (previously ndepsrc data was the default for f19 and f09)
+
+===============================================================
+===============================================================
+Tag name: clm4_0_05
+Originator(s): erik (erik)
+Date: Tue May 25 15:13:30 MDT 2010
+One-line Summary: Move Nitrogen deposition stream branch to trunk
+
+Purpose of changes:
+
+Move branch that treats ndepdyn files as streams to trunk. Change csm_share to have a
+simpler normalization for coszen scaling (from dlawren/kauff in datm/csm_share). Fix
+fragile code in clm_atmlnd, from Mariana. Update to datm8 that can set streams for
+aerosols. Fix template so that CLM_BLDNML_OPTS is active. Update scripts and get in
+other new finidat files, change clm test list, include _E test and IG f19 test.
+
+Bugs fixed (include bugzilla ID):
+ 1161 (New history fields added that should NOT be)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1162 (OpenMP bug with dry-deposition code in clm)
+ 1163 (finidat file has a bunch of NaN's in it)
+ 1197 (MPI problem sending and receiving data in same array)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: Add -ndepsrc option to build-namelist
+
+List any changes to the defaults for the boundary datasets:
+ Add new datasets for Nitrogen deposition streams files (same as fndepdyn files)
+ Remove fndepdat/fndepdyn files for resolutions other than f09 and f19
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm, csm_share
+ scripts to scripts4_100524b
+ drv to drvseq3_1_23
+ datm to datm8_100420
+ csm_share to share3_100423
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>>>>>>> Handle Nitrogen deposition streams
+A models/lnd/clm/src/main/ndepStreamMod.F90
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>> Add some CN drydep tests for hybrid/open-MP
+M models/lnd/clm/test/system/tests_pretag_jaguar
+M models/lnd/clm/test/system/input_tests_master
+
+>>>>>>>>>>>> Add ability to handle ndep streams namelists
+>>>>>>>>>>>> Add ability to set CLM_BLDNML_OPTS
+M models/lnd/clm/bld/build-namelist
+M models/lnd/clm/bld/clm.cpl7.template
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+>>>>>>>>>>>> Set ndepstreams variables if ndepsrc=stream, otherwise set fndep files
+M models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml
+
+>>>>>>>>>>>> Handle new ndep streams namelist and namelist variables
+>>>>>>>>>>>> Remove misc.h and preproc.h #includes
+M models/lnd/clm/src/main/clm_comp.F90 ---------- Renumber starting at 1 not 0
+M models/lnd/clm/src/main/clm_initializeMod.F90 - Handle initialization both
+ for ndep streams and old ndep handling
+M models/lnd/clm/src/main/aerdepMod.F90 --------- Check if allocated before allocate
+M models/lnd/clm/src/main/iniTimeConst.F90 ------ Move setting of ndep out of here
+M models/lnd/clm/src/main/clm_atmlnd.F90 -------- Remove fragile code
+M models/lnd/clm/src/main/controlMod.F90 -------- Handle use_ndepstream
+M models/lnd/clm/src/main/clm_varctl.F90 -------- Add use_ndepstream
+M models/lnd/clm/src/main/clm_driver.F90 -------- Add ndep_interp if use_ndepstream
+ and first and last years are different
+M models/lnd/clm/src/main/ndepFileMod.F90 ------- Make fndepdat optional input so
+ can do this way (old way) or ndep streams (new way).
+M models/lnd/clm/src/main/clm_glclnd.F90 -------- Change order of vars from Bill Lipscomb
+
+M models/lnd/clm/src/main/areaMod.F90 - Add interfaces for MCT datatypes
+
+M models/lnd/clm/src/main/clmtypeInitMod.F90 - Remove unfilled history vars
+M models/lnd/clm/src/main/clmtype.F90 -------- Remove unfilled history vars
+M models/lnd/clm/src/main/histFldsMod.F90 ---- Remove unfilled history vars, add QTOPSOIL
+ as an optional history variable.
+M models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90 - Handle fragile code mapping with MCT
+M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90 --- Handle fragile code mapping with MCT
+
+Summary of testing:
+
+ bluefire: All PASS except...
+029 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+030 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+031 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+032 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+033 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+037 blH41 TBL.sh 17p_cnsc_dh clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 48 cold FAIL! rc= 7
+049 blH52 TBL.sh 17p_cnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 48 cold ..........FAIL! rc= 7
+055 smLI1 TSM.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+056 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+057 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+058 blLI1 TBL.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+040 blK74 TBL.sh 17p_cndvsc_s clm_std 19971231:NONE:1800 1x1_brazil navy -670 arb_ic ............FAIL! rc= 7
+048 blHQ4 TBL.sh _nrcnsc_ds clm_drydep 20000214:NONE:1800 1x1_brazil navy@2000 -150 cold ........FAIL! rc= 7
+ bluefire/CCSM testing: All PASS except...
+FAIL SMS_RLA.f45_f45.I.bluefire.compare_hist.clm4_0_04
+FAIL SMS_RLA.f45_f45.I.bluefire.compare.clm4_0_04
+FAIL SMS_RLB.f45_f45.I.bluefire.compare_hist.clm4_0_04
+FAIL SMS_RLB.f45_f45.I.bluefire.compare.clm4_0_04
+FAIL SMS_ROA.f45_f45.I.bluefire.compare_hist.clm4_0_04
+FAIL SMS_ROA.f45_f45.I.bluefire.compare.clm4_0_04
+FAIL ERS_D.f45_g37.I.bluefire.compare_hist.clm4_0_04
+FAIL ERS_D.f45_g37.I.bluefire.compare.clm4_0_04
+FAIL PST.f45_g37.I1850.bluefire.compare.clm4_0_04
+FAIL PET_PT.f45_g37.I1850.bluefire.compare.clm4_0_04
+BFAIL ERS_E.f19_g16.I1850.bluefire.compare.clm4_0_04
+FAIL ERI.f19_g16.IG.bluefire
+FAIL ERB.f09_g16.I_1948-2004.bluefire.compare.clm4_0_04
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm4_0_05
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_04
+FAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm4_0_04
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm4_0_04
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm4_0_04
+FAIL PST.f10_f10.I8520CN.bluefire.compare.clm4_0_04
+FAIL PET_PT.f10_f10.I8520CN.bluefire.compare.clm4_0_04
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+017 erR53 TER.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@1850 10+38 cold ....FAIL! rc= 13
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_04
+
+Changes answers relative to baseline: Yes!
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change: same climate
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ ndepaer01_clm3_7_15 + SnowHydrology changes
+ - platform/compilers: IBM
+ - compset (and additional configure options): I1850CN
+ - build-namelist options (or complete namelist): -ndepsrc stream
+
+ MSS location of control simulations used to validate new climate:
+
+ /OLESON/csm/ccsm4_0_beta52_ndepaer
+ /OLESON/csm/ccsm4_0_beta52_ndepaertrans
+
+===============================================================
+===============================================================
+Tag name: clm4_0_04
+Originator(s): erik (erik)
+Date: Thu May 20 10:57:54 MDT 2010
+One-line Summary: New namelist items: ice_runoff, scaled_harvest, carbon_only, new
+ RTM hist vars, new finidat files, update esmf interface, turn off aerosol read quicker
+
+Purpose of changes:
+
+Redo all fndepdyn datasets for f19. Add namelist option to turn off ice-flow and send it
+to liquid runoff: ice_runoff (by default .true.). Add new coefficients for harvest from
+Johann, and add ability to trigger it on and off for backwards compatibility
+(scaled_harvest, by default .false.). Change SUPLN from CPP token to carbon_only namelist
+item. Add in new RTM variable to history files from Sean. Add in T31 1850/2000 CN/non-CN
+and 2-deg 2000 CNDV finidat files. Turn off reading of aerosol/dust at initialization
+rather than run time, so files aren't even opened if CAM is passing data to clm. Update
+lnd_comp_esmf to same as mct interface.
+
+New history fields are incorrect. This is bug 1161. Since, time-lines are critical
+and testing was completed, these changes will go in, but will be removed next week.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1161 (New history fields added that should NOT be)
+ 1197 (MPI problem sending and receiving data in same array)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: critical
+
+Describe any changes made to build system: Remove SUPLN #ifdef change to namelist option
+
+Describe any changes made to the namelist: Add namelist items
+
+ ice_runoff = If true, river runoff will be split up into liquid and ice streams,
+ otherwise ice runoff will be zero and all runoff directed to liquid stream
+ scaled_harvest = If true, harvesting will be scaled according to coeffecients
+ determined by Johann Feddema, 2009
+ carbon_only = If true, and CLMCN carbon-nitrogen model is on, Nitrogen will be
+ prescribed rather than prognosed
+
+List any changes to the defaults for the boundary datasets:
+ New fndepdyn files with correct time coordinate
+ New finidat files for T31 1850/2000 and f19 2000 for CNDV
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated:
+
+>>>>>>>>>>>>>> Remove SUPLN build tests
+ D models/lnd/clm/test/system/config_files/17p_cnnsc_h
+ D models/lnd/clm/test/system/config_files/17p_cnnsc_m
+ D models/lnd/clm/test/system/config_files/17p_cnnsc_o
+ D models/lnd/clm/test/system/config_files/_cnnsc_h
+ D models/lnd/clm/test/system/config_files/_cnnsc_m
+ D models/lnd/clm/test/system/config_files/_cnnsc_o
+ D models/lnd/clm/test/system/config_files/17p_nrcnnsc_ds
+ D models/lnd/clm/test/system/config_files/17p_cnnsc_dh
+ D models/lnd/clm/test/system/config_files/17p_cnnsc_dm
+ D models/lnd/clm/test/system/config_files/17p_cnnsc_do
+ D models/lnd/clm/test/system/config_files/_cnnsc_dh
+ D models/lnd/clm/test/system/config_files/17p_cnnsc_ds
+ D models/lnd/clm/test/system/config_files/_cnnsc_dm
+ D models/lnd/clm/test/system/config_files/_cnnsc_do
+ D models/lnd/clm/test/system/config_files/_cnnsc_ds
+
+>>>>>>>>>>>>>> Remove namelist files no longer used
+ D models/lnd/clm/test/system/nl_files/scam
+ D models/lnd/clm/test/system/nl_files/ext_ccsm_seq_cam
+ D models/lnd/clm/test/system/nl_files/nl_glcsmb
+ D models/lnd/clm/test/system/nl_files/scam_prep
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>> Add ice_runoff=.false., scaled_harvest=.true., and carbon_only tests
+ A models/lnd/clm/test/system/nl_files/nl_noicertm_sclharv
+ A models/lnd/clm/test/system/nl_files/nl_cn_conly
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>> Switch SUPLN tests for carbon_only
+>>>>>>>>>>>>>> Add ice_runoff=.false., scaled_harvest=.true tests
+ M models/lnd/clm/test/system/tests_pretag_bluefire
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/config_files/README
+ M models/lnd/clm/test/system/tests_pretag_edinburgh
+ M models/lnd/clm/test/system/tests_pretag_jaguar
+ M models/lnd/clm/test/system/tests_posttag_breeze
+ M models/lnd/clm/test/system/README.testnames
+ M models/lnd/clm/test/system/tests_pretag_jaguar_nompi
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/input_tests_master
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+
+>>>>>>>>>>>>>> Remove setting of supln to off
+ M models/lnd/clm/test/system/config_files/17p_cnsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnsc_do
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_do
+
+>>>>>>>>>>>>>> Remove SUPLN from configure
+ M models/lnd/clm/bld/configure
+ M models/lnd/clm/bld/config_files/config_definition.xml
+
+>>>>>>>>>>>>>> Add carbon_only, scaled_harvest and ice_runoff options
+>>>>>>>>>>>>>> T31 1850/2000 finidat files, f19 CNDV 2000 finidat file
+>>>>>>>>>>>>>> Add error checking, change fndepdyn files for ones with
+>>>>>>>>>>>>>> corrected time axis.
+ M models/lnd/clm/bld/build-namelist
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+
+>>>>>>>>>>>>>> Remove SUPLN #ifdef for carbon_only namelist
+ M models/lnd/clm/src/biogeochem/CNNStateUpdate1Mod.F90 - Correct comment
+ M models/lnd/clm/src/biogeochem/CNAllocationMod.F90 ---- Switch SUPLN for carbon_only
+ M models/lnd/clm/src/biogeochem/DryDepVelocity.F90 ----- Remove ltype as duplicated
+
+>>>>>>>>>>>>>> Add carbon_only, scaled_harvest, and ice_runoff options
+>>>>>>>>>>>>>> Add new RTM history variables
+ M models/lnd/clm/src/main/clmtypeInitMod.F90 ----------- Add res_sno, topo_ndx,
+ topo_slope, var_track, var_track2, frost_table, zwt_perched,
+ qflx_top_soil, qflx_snow_out, qflx_drain_perched
+ M models/lnd/clm/src/main/pftdynMod.F90 ---------------- Add CN ifdef's for harvest
+ add if for scaled_harvest or not
+ M models/lnd/clm/src/main/iniTimeConst.F90 ------------- Add CN ifdef's for ndep
+ M models/lnd/clm/src/main/histFileMod.F90 -------------- Add RTM ifdef's for frivinp_rtm
+ M models/lnd/clm/src/main/controlMod.F90 --------------- Put options in appropriate
+ RTM and CN #ifdef blocks. Add ice_runoff, scaled_harvest and carbon_only to namelist
+ M models/lnd/clm/src/main/clm_varctl.F90 --------------- Add CN/RTM #ifdefs, add
+ scaled_harvest and ice_runoff
+ M models/lnd/clm/src/main/clm_driver.F90 --------------- Add CN #ifdef for ndepdyn
+ M models/lnd/clm/src/main/ndepFileMod.F90 -------------- Add CN #ifdef
+ M models/lnd/clm/src/main/clmtype.F90 ------------------ Add res_sno, topo_ndx,
+ topo_slope, var_track, var_track2, frost_table, zwt_perched,
+ qflx_top_soil, qflx_snow_out, qflx_drain_perched
+ M models/lnd/clm/src/main/histFldsMod.F90 -------------- Add
+ FROST_TABLE, ZWT_PERCH, QDRAI_PERCH, QTOPSOIL
+
+ M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90 ----- Move lnd_chkAerDep_mct to init
+ add ice_runoff option to output rtm streams
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90 --- Move lnd_chkAerDep_mct to
+ add ice_runoff option to output rtm streams. And sync up with lnd_comp_mct
+init
+
+
+Summary of testing:
+
+ bluefire interactive extra checking:
+001 smH43 TSM.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 96 cold PASS
+002 erH43 TER.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 10+38 coPASS
+003 brH43 TBR.sh 17p_cnsc_do clm_std^nl_noicertm_sclharv 20021230:NONE:1800 10x15 USGS@2000 72+72 coPASS
+005 smH93 TSM.sh 17p_cnsc_do clm_ndepdyn^nl_cn_conly 20020101:NONE:1800 4x5 gx3v7@1850-2000 96 cold PASS
+006 erH93 TER.sh 17p_cnsc_do clm_ndepdyn^nl_cn_conly 20020101:NONE:1800 4x5 gx3v7@1850-2000 10+38 coPASS
+007 brH93 TBR.sh 17p_cnsc_do clm_ndepdyn^nl_cn_conly 20020101:NONE:1800 4x5 gx3v7@1850-2000 72+72 coPASS
+ bluefire/CCSM testing: All PASS except...
+FAIL ERI.T31_g37.IG.bluefire
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm4_0_04
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_03
+ jaguar/CCSM testing: All FAIL
+FAIL ERS_D.f09_g16.I1850.jaguar
+FAIL PST.f10_f10.I8520CN.jaguar
+FAIL PET_PT.f10_f10.I8520CN.jaguar
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+020 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_03
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_03
+Originator(s): erik (erik)
+Date: Mon May 17 14:06:50 MDT 2010
+One-line Summary: Changes from Francis for VOC and drydep
+
+Purpose of changes:
+
+Changes from Francis Vitt and Jean-Francois Lamarque for VOC and drydep. Add a scaling
+factor for VOC isoprene. Get annual LAI and differences from CLMSP even when CLMCN is
+on when sending drydep to atm, as need LAI monthly differences to estimate season index.
+Get these changes to work with CN on and off and also get it to work with DEBUG mode
+on. Use clm veg indicies in pftvarcon and abort drydep if don't find a wesley veg type
+index. Fix ndeplintInterp.ncl script for rcp=-999.9 historical (bug 1153). Add in quarter
+degree gx1v6 fraction dataset.
+
+Bugs fixed (include bugzilla ID):
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1158 (I_1850-2000_CN (I4804CN) inconsistent with I_1850-2000 (I4804) compset)
+ 1197 (MPI problem sending and receiving data in same array)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: qtr-degree, gx1v6 frac/domain datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, dlawren, fvitt
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts
+
+ scripts to scripts4_100513
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+ A models/lnd/clm/test/system/config_files/_nrcnsc_ds --- cn test without rtm or supln
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>> Add in qtr-degree fraction and domain file datasets
+>>>>>>>>>>> Make gx1v6 default mask for qtr-degree
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+
+>>>>>>>>>>> Separate out CN+SUPLN tests as H and CN only as P
+ M models/lnd/clm/test/system/tests_pretag_bluefire
+ M models/lnd/clm/test/system/README.testnames
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/nl_files/clm_drydep ---- correct build-nml options
+ M models/lnd/clm/test/system/input_tests_master ----- Remove 360x720 tests, add
+ drydep tests with CN and without, have start dates for drydep
+ tests span the year
+
+>>>>>>>>>>> A few small fixes to tools
+ M models/lnd/clm/tools/mksurfdata/mkvocef.F90 --------- Remove diagnostics as nonsensical
+ (also was incorrect, see bug 1157)
+ M models/lnd/clm/tools/ncl_scripts/ndeplintInterp.ncl - Fix so can work with historical case
+
+>>>>>>>>>>> Allow some CLMSP subroutines to be called even with CLMCN so that LAI can help set
+>>>>>>>>>>> the season index when dry-deposition is active (and only when dry-dep is active)
+ M models/lnd/clm/src/biogeochem/STATICEcosysDynMod.F90 - Change #ifdefs so that
+ some can be called from drydep even when CN is on. Don't allow
+ EcosystemDyn to be called if CN on though.
+ M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90 ----- Add a scaling factor
+ for isoprene
+ M models/lnd/clm/src/biogeochem/DryDepVelocity.F90 ----- Make winter season on
+ anytime you have snow. Use pftvarcon indices to set wesveg type from
+ clmveg type. Add landuse type and set to desert winter if not veg type.
+ (so won't abort on DEBUG mode)
+ M models/lnd/clm/src/main/clm_initializeMod.F90 -------- Also call
+ EcosystemDynini and readAnnualVegetation even if CN is on when drydep is on.
+
+Summary of testing:
+
+ bluefire: Ran 100 days of 1850CN, compared to Francis's mods and the two results were identical
+ bluefire interactive testing:
+001 smCO3 TSM.sh _sc_do clm_drydep^nl_urb 20021001:NONE:3600 10x15 USGS -10 cold ................PASS
+002 erCO3 TER.sh _sc_do clm_drydep^nl_urb 20021001:NONE:3600 10x15 USGS -3+-7 cold ..............PASS
+003 brCO3 TBR.sh _sc_do clm_drydep^nl_urb_br 20021001:NONE:3600 10x15 USGS -5+-5 cold ...........PASS
+004 blCO3 TBL.sh _sc_do clm_drydep^nl_urb 20021001:NONE:3600 10x15 USGS -30 cold ................PASS
+005 smCP3 TSM.sh _sc_do clm_drydep^nl_urb 20020317:NONE:1800 1.9x2.5 gx1v6 -15 startup ..........PASS
+006 erCP3 TER.sh _sc_do clm_drydep^nl_urb 20020317:NONE:1800 1.9x2.5 gx1v6 -3+-7 startup ........PASS
+007 brCP3 TBR.sh _sc_do clm_drydep^nl_urb_br 20020317:NONE:1800 1.9x2.5 gx1v6 -5+-5 startup .....PASS
+008 blCP3 TBL.sh _sc_do clm_drydep^nl_urb 20020317:NONE:1800 1.9x2.5 gx1v6 -15 startup ..........PASS
+001 sm654 TSMtools.sh mkgriddata tools__ds namelist .............................................PASS
+002 sm674 TSMtools.sh mkgriddata tools__ds singlept .............................................PASS
+003 sm774 TSMtools.sh mksurfdata tools__ds singlept .............................................PASS
+004 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................SKIPPED*
+005 sm853 TSMtools.sh interpinic tools__o runoptions ............................................PASS
+ bluefire/CCSM testing: All PASS except...
+BFAIL SMS_RLA.f45_f45.I.bluefire.compare.clm4_0_0+upext
+BFAIL SMS_RLB.f45_f45.I.bluefire.compare.clm4_0_0+upext
+BFAIL SMS_ROA.f45_f45.I.bluefire.compare.clm4_0_0+upext
+FAIL ERI.T31_g37.IG.bluefire
+BFAIL ERB.f09_g16.I_1948-2004.bluefire.compare.clm4_0_0+upext
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm4_0_03
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm4_0_0+upext
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_02
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_02
+Originator(s): erik (erik)
+Date: Thu May 13 00:47:40 MDT 2010
+One-line Summary: Make sure dtime is initialized, so that answers are consistently the same as clm4_0_00
+
+Purpose of changes:
+
+Make sure dtime is initialized before it is used in lnd_run_mct/lnd_run_esmf so
+that results are consistent. This bug has been around since clm3_6_36 where doalb
+logic was changed. However, until clm4_0_01 results seemed to have been consistent,
+but with clm4_0_01 results were inconsistent, and usually incorrect for nstep=1 (in
+calculating calday1 and hence doalb).
+
+Bugs fixed (include bugzilla ID):
+ 1156 (Reproducability problem with clm4_0_01)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ 1197 (MPI problem sending and receiving data in same array)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: critical
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>>>> Add a reproducability test
+ A models/lnd/clm/test/system/TRP.sh
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>> Add reproducability test
+ M models/lnd/clm/test/system/input_tests_master
+
+>>>>>>>>> Set glcmec by GLC_NEC_ $ifdefs
+ M models/lnd/clm/src/main/clm_varpar.F90
+
+>>>>>>>>> Make sno fields NOT optional, and set dtime before use in _run
+ M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90
+
+Summary of testing:
+
+ bluefire interactive testing:
+001 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic .........PASS
+ bluefire/CCSM testing:
+PASS PST.f45_g37.I1850.bluefire.compare.clm4_0_0+upext
+PASS PET_PT.f45_g37.I1850.bluefire.compare.clm4_0_0+upext
+PASS ERS.f19_g16.I1850.bluefire.compare_hist.clm4_0_0+upext
+PASS ERS.f19_g16.I1850.bluefire.compare.clm4_0_0+upext
+PASS PST.f10_f10.I8520CN.bluefire.compare.clm4_0_0+upext
+PASS PET_PT.f10_f10.I8520CN.bluefire.compare.clm4_0_0+upext
+
+CLM tag used for the baseline comparison tests if applicable: clm4_0_00
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_01
+Originator(s): erik (erik)
+Date: Tue May 11 14:39:25 MDT 2010
+One-line Summary: Move glacier multiple elevation class branch to the trunk so that we can work with the active glacier model
+
+Purpose of changes:
+
+Add ability to handle glacier multiple elevation classes (glc_mec) in clm, so that we
+can interact with the active glacier component (glc). Adds glacier elevation classes
+to the surface datasets and requires they be read in when glacier multiple elevation
+classes are active. New namelist options for glc_mec include glc_smb and glc_dyntopo.
+At build-time the number of glc_mec classes is set (can be 0, 1, 3, 5, or 10). The
+model also interacts with the mask of valid glacier points that the active glacier
+model determined (input with the fglcmask file), and set by glc_grid (which can be
+gland5,gland10, or gland20 for 5-20km resolution over Greenland). glc_grid is set at
+build time, but should be moved to the build-namelist.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1156 (Reproducability problem with clm4_0_01)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ 1197 (MPI problem sending and receiving data in same array)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: standard
+
+Describe any changes made to build system: Add glc_nec and glc_grid options to configure
+ glc_nec can be 1,3,5, or 10 and MUST match the number on the input surface dataset
+ the elevation classes themselves are read from the surface dataset
+ glc_grid can be gland5, gland10, gland20 for greenland 5, 10, or 20km resolution
+ it is merely passed on to build-namelist to pick the glcmask file
+
+Describe any changes made to the namelist:
+
+- create_glacier_mec_landunit (= T when these landunits are created; F by default)
+- glc_smb (= T if passing surface mass balance to GLC; else pass PDD info; T by default)
+- glc_dyntopo (= T if CLM topography changes dynamically; currently F)
+ (NOT fully implemented yet)
+
+ New history fields:
+ QICE ice growth/melt (mm/s)
+ QICEYR ice growth/melt (mm/s)
+ gris_mask Greenland mask (unitless)
+ gris_area Greenland ice area (km^2)
+ aais_mask Antarctic mask (unitless)
+ aais_area Antarctic ice area (km^2)
+
+Changes to build-namelist:
+
+ finidat file and possibly the fsurdat files include glc_nec values
+ Currently only support glc_nec=0 or glc_nec=10
+
+List any changes to the defaults for the boundary datasets: Update datm domain file for T31
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, jwolfe, lipscomb, dlawren
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, csm_share
+ Also add in active glacier model
+ scripts to scripts4_100510a
+ csm_share to share3_100423
+ gglc to glc4_100507
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>>>>> Add mec tests
+ A models/lnd/clm/test/system/config_files/_mec10sc_dh
+ A models/lnd/clm/test/system/config_files/_mec10sc_dm
+ A models/lnd/clm/test/system/config_files/_mec10sc_do
+ A models/lnd/clm/test/system/config_files/_mec10sc_ds
+ A models/lnd/clm/test/system/config_files/_mec10sc_h
+ A models/lnd/clm/test/system/config_files/_mec10sc_m
+ A models/lnd/clm/test/system/config_files/_mec10sc_o
+ A models/lnd/clm/test/system/nl_files/clm_glcmec
+ A models/lnd/clm/test/system/nl_files/nl_glcsmb
+
+>>>>>>>>>> Handle passing of data from clm to the active glacier model
+ A models/lnd/clm/src/main/clm_glclnd.F90 -- handle passing data to glc model
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>> Add mec tests
+ M models/lnd/clm/test/system/tests_pretag_jaguar
+ M models/lnd/clm/test/system/input_tests_master
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/tests_pretag_bluefire
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/config_files/README
+ M models/lnd/clm/test/system/README.testnames
+
+>>>>>>>>>>>> Add GLC_MEC to mksurfdata, add ability to set glc_nec on namelist
+ M models/lnd/clm/tools/mksurfdata/mkglcmec.F90
+ M models/lnd/clm/tools/mksurfdata/mkvarctl.F90
+ M models/lnd/clm/tools/mksurfdata/mkfileMod.F90
+ M models/lnd/clm/tools/mksurfdata/Makefile ------ Add gfortran remove xlf90 for Darwin
+ M models/lnd/clm/tools/mksurfdata/mkvarpar.F90
+ M models/lnd/clm/tools/mksurfdata/README
+ M models/lnd/clm/tools/mksurfdata/mksrfdat.F90
+
+>>>>>>>>>>>> Add some more checking for glc settings
+ M models/lnd/clm/bld/listDefaultNamelist.pl - Try to make faster, add loop over
+ glc_nec and glc_grid
+ M models/lnd/clm/bld/build-namelist --------- Get default glc_smb when
+ create_glacier_mec_landunits is on
+ M models/lnd/clm/bld/clm.cpl7.template ------ Add glc_ settings
+ M models/lnd/clm/bld/namelist_files/namelist_defaults.xsl
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Add glc_nec to
+ finidat files, add glc_nec="10" fsurdat files for T31, 1-deg, 2-deg
+ M models/lnd/clm/bld/configure
+ M models/lnd/clm/bld/config_files/Makefile.in - Add gfortran to Darwin and remove
+xlf90
+ M models/lnd/clm/bld/config_files/config_definition.xsl - Add glacier types
+ M models/lnd/clm/bld/config_files/config_definition.xml
+
+>>>>>>>>>>>> Read in glacier elevation classes from surfdata file as GLC_MEC
+>>>>>>>>>>>> require it when create_glacier_mec_landunits is .true. and use it
+>>>>>>>>>>>> to set value of glc_topomax. Add checking for glc options.
+>>>>>>>>>>>> Also remove concurrent directives
+ M models/lnd/clm/src/main/clm_varcon.F90 -------- Add h2osno_max, lapse_glcmec
+ and istice_mec, change albice when GLC_NEC>0
+ M models/lnd/clm/src/main/clm_varpar.F90 -------- Add npatch_glacier_mec
+ M models/lnd/clm/src/main/dynlandMod.F90 -------- Add checking for istice_mec
+ M models/lnd/clm/src/main/decompInitMod.F90 ----- Pass glcmask in
+ M models/lnd/clm/src/main/clm_initializeMod.F90 - Handle create_glacier_mec_landunit
+ M models/lnd/clm/src/main/ncdio.F90 ------------- Add 2D module procedures to ncd_iolocal interface
+ M models/lnd/clm/src/main/subgridMod.F90 -------- Handle create_glacier_mec_landunit if true
+ M models/lnd/clm/src/main/clmtypeInitMod.F90 ---- Add glcmecpoi and greenland and antarctic mask/area
+ add forc_pbot, forc_rho, glc_topo, forc_t, forc_th to ces, forc_q to cws, eflx_bot to cef
+ add qflx_glcice, glc_rofi, glc_rofl
+ M models/lnd/clm/src/main/pftdynMod.F90 --------- Change comments
+ M models/lnd/clm/src/main/iniTimeConst.F90 ------ Handle istice_mec
+ M models/lnd/clm/src/main/clm_atmlnd.F90 -------- Fix comment
+ M models/lnd/clm/src/main/clm_varsur.F90 -------- Add topoxy
+ M models/lnd/clm/src/main/controlMod.F90 -------- Add create_glacier_mec_landunit, glc_dyntopo, glc_smb, fglcmask to namelist
+ M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90 - Add sno_export/import
+ M models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90 Add sno_export/import
+ M models/lnd/clm/src/main/filterMod.F90 --------- Add istice_mec
+ M models/lnd/clm/src/main/clm_varctl.F90 -------- Add fglcmask, create_glacier_mec_landunit,
+ glc_dyntopo, glc_smb, glc_nec, and glc_topomax add some error checking for them
+ M models/lnd/clm/src/main/initGridCellsMod.F90 -- Make ice sheet masks and deal with glcmask
+ M models/lnd/clm/src/main/surfrdMod.F90 --------- Read GLCMASK, GLC_MEC, PCT_GLC_MEC and TOPO_GLC_MEC when create_glacier_mec_landunit
+ M models/lnd/clm/src/main/domainMod.F90 --------- Add glcmask
+ M models/lnd/clm/src/main/clmtype.F90 ----------- Add forc_pbot, forc_rho, glc_frac, glc_topo add
+ forc_t, forc_q, eflx_bot, qflx_glcice, glc_rofi, glc_rofl, glcmecpoi, gris and assis mask/area
+ M models/lnd/clm/src/main/histFldsMod.F90 ------- Add new fields when create_glacier_mec_landunit
+ M models/lnd/clm/src/main/histFileMod.F90 ------- Add glacier_mec to notes, set_noglcmec to hist_addfld1d
+ M models/lnd/clm/src/main/mkarbinitMod.F90 ------ Set mask sno to h2osno_max, use istice_mec
+ M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90 ----- Assess if istice_mec and add qflx_glcice for glc_dyntopo
+P
+ M models/lnd/clm/src/biogeophys/SurfaceRadiationMod.F90 - Assess if istice_mec
+ M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90 -- Assess if istice_mec and add eflx_bot
+ M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90 ---- Assess if istice_mec
+ M models/lnd/clm/src/biogeophys/Biogeophysics1Mod.F90 --- Move forc_pbot/forc_q/forc_t/forc_th from g to c, assess istice_mec
+ M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90 ---- Assess if isice_mec
+ M models/lnd/clm/src/biogeophys/Hydrology1Mod.F90 ------- Assess if isice_mec move force_t from g to c
+ M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 ------- Assess if istice_mec and add qflx_glcice
+ M models/lnd/clm/src/biogeophys/clm_driverInitMod.F90 --- Downscale forc_t, forc_th, forc_q, forc_pbot from gridcell to columns
+ based on surface eleveation for glc_mec landunits
+ M models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90 - Change forcing from g to c
+
+Summary of testing:
+
+ bluefire interactive testing: All PASS up to..
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ bluefire/CCSM testing:
+FAIL PST.f45_g37.I1850.bluefire.compare.clm3_7_15
+FAIL PET_PT.f45_g37.I1850.bluefire.compare.clm3_7_15
+FAIL ERS.f19_g16.I1850.bluefire.compare_hist.clm3_7_15
+FAIL ERS.f19_g16.I1850.bluefire.compare.clm3_7_15
+FAIL ERI.T31_g37.IG.bluefire
+FAIL ERB.f09_g16.I_1948-2004.bluefire.compare.clm3_7_15
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm4_0_01
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_7_15
+FAIL PST.f10_f10.I8520CN.bluefire.compare.clm3_7_15
+FAIL PET_PT.f10_f10.I8520CN.bluefire.compare.clm3_7_15
+
+PASS SMS_D.f19_g16.IG.bluefire
+PASS ERS.f19_g16.IG.bluefire
+FAIL SMS.T31_g37.IG.bluefire
+FAIL SMS.f09_g16.IG.bluefire
+
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+020 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_15
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm4_0_00
+Originator(s): erik (erik)
+Date: Tue May 4 23:02:18 MDT 2010
+One-line Summary: Update to datm8, redirect aquifer overflow to drainage, add
+ gx3v7 masks, script to extract regional datasets, add harvesting for CN,
+ modify shrubs, include urban model, ice stream for snowcapping, new
+ build-namelist system, scale solar by solar zenith angle in datm, deep
+ soil with bedrock at bottom, organic matter in soils, SNICAR for snow
+ radiation, sparce dense aero, snow cover changes
+
+Type of tag: doc
+
+Software engineering changes:
+
+ Update to cpl7 and scripts.
+ Remove offline and cpl6 modes.
+ Remove support for CASA model.
+ Update to datm8 atmospheric data model.
+ Add gx3v7 land mask for T31 and fv-4x5 horizontal resolutions.
+ Add gx1v6 land mask for f05, f09, and f19 horizontal resolutions.
+ Add tx1v1 land mask and 1.9x2.5_tx1v1 horizontal resolution.
+ Add in 2.5x3.33 horizontal resolution.
+ Add in T62 horizontal resolution so can run at same resolution as input datm data.
+ Allow first history tape to be 1D.
+ Add ability to use own version of input datasets with CLM_USRDAT_NAME variable.
+ Add a script to extract out regional datasets.
+ New build-namelist system with XML file describing all namelist items.
+ Add glacier_mec use-case and stub glacier model.
+ Add ncl script to time-interpolate between 1850 and 2000 for fndepdat dataset, for fndepdyn version.
+ Make default of maxpatch_pft=numpft+1 instead of 4.
+ Only output static 3D fields on first h0 history file to save space.
+ Add new fields for VOC (Volatile Organic Compounds) on some surface datasets
+ Add irrigation area to mksurfdata tool (NOT used in CLM yet).
+ Add multiple elevation class option for glaciers in mksurfdata tool (NOT used in CLM yet).
+ Add ascale field to land model in support of model running on it's own grid.
+
+Science changes:
+
+ Change to freezing temperature constant
+ Forcing height at atm plus z0+d on each tile
+ Effective porosity divide by zero fix
+ Sparse/dense canopy aerodynamic parameters
+ Ground/snow emissivity smooth transition
+ Thermal and hydraulic properties of organic soil
+ Init h2osoi=0.3
+ Snow compaction fix
+ Snow T profile during layer splitting fix
+ Snow burial fraction
+ Snow cover fraction
+ SNICAR (snow aging, black carbon and dust deposition, vertical distribution of solar energy)
+ Remove SNOWAGE, no longer used
+ Deep soil (15 layers, ~50m), 5 new layers are hydrologically inactive bed rock
+ Ground evap (beta), stability, and litter resistance
+ Organic/mineral soil hydraulic conductivity percolation theory
+ Richards equation modifications
+ Normalization of frozen fraction of soil formulation
+ One-step solution for soil moisture and qcharge
+ Changes to rsub_max for drainage and decay factor for surface runoff
+ Fixed diurnal cycle of solar radiation in offline forcing data
+ Back to CLM3 lakes and wetlands datasets, but 1% rather than 5% threshold (same for glacier)
+ Changes to pft physiology file from CN
+ New grass optical properties
+ New surface dataset assuming no herbaceous understory
+ Direct versus diffuse radiation offline
+ New VOC model (MEGAN)
+ Snow-capped runoff goes to new ice stream and routed to ocean as ice
+ Dust model always on, LAI threshold parameter change from 0.1 to 0.3
+ Daylength control on vcmax
+ SAI and get_rad_dtime fix
+ Always run with MAXPATCH_PFT=npfts + 1 instead of 4
+ Transient land cover/use mode - datasets, energy and water balance
+ RTM sub-cycling
+ Twostream bug fix
+ Update soil colors
+ 2m relative humidity
+ Fix for aquifer leak (SoilHydrologyMod, BalanceCheckMod)
+ New nitrogen deposition file (units and sum of NOx, NHy)
+
+Quickstart to new cpl7 scripts...
+
+ cd scripts
+ ./create_newcase -help # get help on how to run create_newcase
+ ./create_newcase -case testI -mach bluefire -res f19_g16 -compset I # create new "I" case for bluefire at 1.9x2.5_gx1v6 res
+ # "I" case is clm active, datm7, and inactive ice/ocn
+ cd testI
+ ./xmlchange -help # Get help on editor for XML files
+ ./xmlchange env_conf.xml env_mach_pes # Edit configure files if needed
+ configure -case # create scripts
+ ./xmlchange env_build.xml # Edit build files if needed
+ testI.build # build model and create namelists
+ ./xmlchange env_run.xml # Edit run files if needed
+ bsub < testI.run # submit script
+ # (NOTE: edit env_run.xml to set RESUBMIT to number of times to automatically resubmit)
+Quickstart to use of regional extraction scripts and PERSONAL datasets:
+
+ # Run the script to create an area to put your files (assume CSMDATA set to standard inputdata)
+ cd scripts
+ setenv MYCSMDATA $HOME/myinputdata
+ link_dirtree $CSMDATA $MYCSMDATA
+
+ # Run the extraction for data from 52-73 North latitude, 190-220 longitude
+ # that creates 13x12 gridcell region from the f19 (1.9x2.5) global resolution over
+ # Alaska
+ cd ../models/lnd/clm/tools/ncl_scripts
+ setenv MYID 13x12pt_f19_alaskaUSA
+ getregional_datasets.pl -sw 52,190 -ne 73,220 -id $MYID -mycsmdata $MYCSMDATA
+
+ # Now create a case that uses these datasets
+ cd ../../../../../scripts
+ create_newcase -case testregional -compset I -mach bluefire -res pt1_pt1
+ cd testregional
+ $EDITOR env_conf.xml # change CLM_BLDNML_OPTS to include "-clm_usr_name $MYID" (expand $MYID)
+ $EDITOR env_mach_pes.xml # Change tasks/threads as appropriate (defaults to serial)
+ xmlchange -file env_run.xml -id DIN_LOC_ROOT_CSMDATA -val $MYCSMDATA
+
+ # Do other changes to xml files as appropriate
+ # configure as normal, then edit the datm namelist
+
+ configure -case
+
+ # Then build and run the case as normal
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ 1197 (MPI problem sending and receiving data in same array)
+Describe any changes made to build system:
+
+ Change directory structure to match CCSM.
+ Add BGP target.
+ Add choice between ESMF and MCT frameworks.
+ Start removing #ifdef and directives that supported Cray-X1 Phoenix as now decommissioned.
+ Make default of maxpatch_pft=numpft+1 instead of 4 for all configurations.
+ By default turn on CLAMP when either CN or CASA is enabled
+ New SNICAR_FRC, CARBON_AERO, and C13 CPP ifdef tokens.
+
+ New options added to configure:
+
+ -comp_intf Component interface to use (ESMF or MCT) (default MCT)
+ -nofire Turn off wildfires for bgc setting of CN (default includes fire for CN)
+ -pio Switch enables building with Parallel I/O library. [on | off] (default is on)
+ -snicar_frc Turn on SNICAR radiative forcing calculation. [on | off] (default is off)
+
+Describe any changes made to the namelist:
+
+ NOTE: build-namelist now checks the validity of your namelist you generate by looking at data in the namelist_definition.xml
+ file. In order to add new namelist items you need to change the code and also edit this file. To view information
+ on the namelist view the file:
+ models/lnd/clm/bld/namelist_files/namelist_definition.xml
+ in your browser and you'll see the names, type, description and valid_values for all namelist variables.
+
+ Changes to build-namelist:
+ Transient sim_year ranges (i.e. 1850-2000)
+ Remove cam_hist_case option.
+ Make sure options ONLY used for stand-alone testing have a "drv_" or "datm_" prefix in them and list these
+ options all together and last when asking for help from build-namelist.
+ New options to build-namelist:
+ -clm_usr_name "name" Dataset resolution/descriptor for personal datasets. Default: not used
+ Example: 1x1pt_boulderCO_c090722 to describe location,
+ number of pts, and date files created
+ New list options to build-namelist:
+ build-namelist -res list # List valid resolutions
+ build-namelist -mask list # List valid land-masks
+ build-namelist -sim_year list # List valid simulation years and simulation year ranges
+ build-namelist -clm_demand list # List namelist variables including those you could demand to be included.
+ build-namelist -use_case list # List valid use-cases
+ build-namelist -rcp list # List valid representative concentration pathways
+ # for future scenarios
+
+ List of use-cases for build-namelist:
+
+1850-2100_rcp4.5_transient = Simulate transient land-use, and aerosol deposition changes
+with historical data from 1850 to 2005 and then with the RCP4.5 scenario from MINICAM
+
+1850-2100_rcp8.5_transient = Simulate transient land-use, and aerosol deposition changes
+with historical data from 1850 to 2005 and then with the RCP8.5 scenario from MESSAGE
+
+ 1850_control = Conditions to simulate 1850 land-use
+2000-2100_rcp8.5_transient = Simulate transient land-use, and aerosol deposition changes
+with historical data from 2000 to 2005 and then with the RCP8.5 scenario from MESSAGE
+
+ 2000_control = Conditions to simulate 2000 land-use
+20thC_transient = Simulate transient land-use, and aerosol deposition changes from 1850
+to 2005
+ pergro = Perturbation error growth test with initial conditions perturbed by
+roundoff level
+ pergro0 = Perturbation error growth test with unperturbed initial conditions
+
+
+ New namelist items:
+
+ urban_hac = OFF, ON or ON_WASTEHEAT (default OFF) Flag for urban Heating and Air-Conditioning
+ OFF = Building internal temperature is un-regulated.
+ ON = Building internal temperature is bounded to reasonable range.
+ ON_WASTEHEAT = Building internal temperature is bounded and resultant waste
+ heat is given off.
+ urban_traffic = .true. or .false. (default .false.) Flag to include additional multiplicative factor of urban traffic
+ to sensible heat flux.
+ fsnowoptions = filename file for snow/aerosol optical properties (required)
+ fsnowaging = filename file for snow aging parameters (required)
+ faerdep = filename file of aerosol deposition (required)
+
+ New history variables: (note watt vs. W in units, 26 vs. 76)
+ BCDEP total BC deposition (dry+wet) from atmosphere kg/m^2/s
+ BIOGENCO biogenic CO flux uGC/M2/H
+ C13_PRODUCT_CLOSS C13 total carbon loss from wood product pools gC13/m^2/s
+ DSTDEP total dust deposition (dry+wet) from atmosphere kg/m^2/s
+ EFLX_DYNBAL dynamic land cover change conversion energy flux W/m^2
+ FGR12 heat flux between soil layers 1 and 2 watt/m^2
+ FSAT fractional area with water table at surface unitless
+ FSH_NODYNLNDUSE sensible heat flux not including correction for land use change
+ watt/m^2
+ GC_HEAT1 initial gridcell total heat content J/m^2
+ GC_HEAT2 post land cover change total heat content J/m^2 inactive
+ GC_ICE1 initial gridcell total ice content mm/s
+ GC_ICE2 post land cover change total ice content mm/s inactive
+ GC_LIQ1 initial gridcell total liq content mm
+ GC_LIQ2 initial gridcell total liq content mm inactive <<<< name??
+ H2OSNO_TOP mass of snow in top snow layer kg
+ HEAT_FROM_AC sensible heat flux put into canyon due to heat removed from air conditioning
+ watt/m^2
+ HK hydraulic conductivity mm/s inactive
+ ISOPRENE isoprene flux uGC/M2/H
+ LAND_USE_FLUX total C emitted from land cover conversion and wood produc t pools gC/m^2/s
+ LAND_UPTAKE NEE minus LAND_USE_FLUX, negative for update gC/m^2/s
+ LWup upwelling longwave radiation watt/m^2 inactive
+ MONOTERP monoterpene flux uGC/M2/H
+ NBP net biome production, includes fire, landuse, and harvest flux, positive for sink
+ gC/m^2/s
+ OCDEP total OC deposition (dry+wet) from atmosphere kg/m^2/s
+ OVOC other VOC flux uGC/M2/H
+ ORVOC other reactive VOC flux uGC/M2/H
+ PBOT atmospheric pressure Pa
+ PCO2 atmospheric partial pressure of CO2 Pa
+ PRODUCT_CLOSS total carbon loss from wood product pools gC/m^2/s
+ PRODUCT_NLOSS total N loss from wood product pools gN/m^2/s
+ Qair atmospheric specific humidity kg/kg inactive
+ Qanth anthropogenic heat flux watt/m^2 inactive
+ Qtau momentum flux kg/m/s^2
+ QFLX_LIQ_DYNBAL liq dynamic land cover change conversion runoff flux mm/s
+ QFLX_ICE_DYNBAL ice dynamic land cover change conversion runoff flux mm/s
+ QRUNOFF_NODYNLNDUSE total liquid runoff not including correction for land use change (does not include QSNWCPICE)
+ mm/s
+ QSNWCPICE excess snowfall due to snow capping mm/s
+ QSNWCPICE_NODYNLNDUSE excess snowfall due to snow capping not including correction for land use change
+ mm/s
+ QSNWCPLIQ excess rainfall due to snow capping mm/s inactive
+ SMP soil matric potential mm inactive
+ SNOAERFRC2L surface forcing of all aerosols in snow, averaged only when snow is present (land)
+ watt/m^2
+ SNOAERFRCL surface forcing of all aerosols in snow (land) watt/m^2
+ SNOBCFRCL surface forcing of BC in snow (land) watt/m^2
+ SNOBCMCL mass of BC in snow column kg/m2
+ SNOBCMSL mass of BC in top snow layer kg/m2
+ SNOdTdzL top snow layer temperature gradient (land) K/m
+ SNODSTFRC2L surface forcing of dust in snow, averaged only when snow is present (land)
+ watt/m^2
+ SNODSTFRCL surface forcing of dust in snow (land) watt/m^2
+ SNODSTMCL mass of dust in snow column kg/m2
+ SNODSTMSL mass of dust in top snow layer kg/m2
+ SNOFSRND direct nir reflected solar radiation from snow watt/m^2 inactive
+ SNOFSRNI diffuse nir reflected solar radiation from snow watt/m^2 inactive
+ SNOFSRVD direct vis reflected solar radiation from snow watt/m^2 inactive
+ SNOFSRVI diffuse vis reflected solar radiation from snow watt/m^2 inactive
+ SNOFSDSND direct nir incident solar radiation on snow watt/m^2 inactive
+ SNOFSDSNI diffuse nir incident solar radiation on snow watt/m^2 inactive
+ SNOFSDSVD direct vis incident solar radiation on snow watt/m^2 inactive
+ SNOFSDSVI diffuse vis incident solar radiation on snow watt/m^2 inactive
+ SNOLIQFL top snow layer liquid water fraction (land) fraction inactive
+ SNOOCMCL mass of OC in snow column kg/m2
+ SNOOCMSL mass of OC in top snow layer Kg/m2
+ SNOOCFRC2L surface forcing of OC in snow, averaged only when snow is present (land)
+ SNOOCFRCL surface forcing of OC in snow (land) watt/m^2
+ watt/m^2
+ SNORDSL top snow layer effective grain radius m^-6 inactive
+ SNOTTOPL snow temperature (top layer) K/m inactive <<< units?
+ SOILWATER_10CM soil liquid water + ice in top 10cm of soil kg/m2
+ SWup upwelling shortwave radiation watt/m^2 inactive
+ TSOI_10CM soil temperature in top 10cm of soil K
+ URBAN_AC urban air conditioning flux watt/m^2
+ URBAN_HEAT urban heating flux watt/m^2
+ VOCFLXT total VOC flux into atmosphere uGC/M2/H
+ Wind atmospheric wind velocity magnitude m/s inactive
+ WOOD_HARVESTC wood harvest (to product pools) gC/m^2/s
+ WOOD_HARVESTN wood harvest (to product pools) gN/m^2/s
+
+ History field name changes:
+
+ ANNSUM_PLANT_NDEMAND => ANNSUM_POTENTIAL_GPP
+ ANNSUM_RETRANSN => ANNMAX_RETRANSN
+ C13_DWT_PROD10C_LOSS => C13_PROD10C_LOSS
+ C13_DWT_PROD100C_LOSS => C13_PROD100C_LOSS
+ C13_DWT_PROD10N_LOSS => C13_PROD10N_LOSS
+ C13_DWT_PROD100C_LOSS => C13_PROD100C_LOSS
+ DWT_PROD100N_LOSS => PROD10N_LOSS
+ DWT_PROD100N_LOSS => PROD100N_LOSS
+ DWT_PROD100C_LOSS => PROD10C_LOSS
+ DWT_PROD100C_LOSS => PROD100C_LOSS
+ HCSOISNO => HC
+ TEMPSUM_PLANT_NDEMAND => TEMPSUM_POTENTIAL_GPP
+ TEMPSUM_RETRANSN => TEMPMAX_RETRANSN
+
+ History field names deleted:
+ SNOWAGE, TSNOW, FMICR, FCO2, DMI, QFLX_SNOWCAP
+
+ Add new urban oriented _U, and _R (Urban and Rural) for:
+ EFLX_LH_TOT, FGR, FIRA, FSH, FSM, Q2M, QRUNOFF, RH2M, SoilAlpha, TG, TREFMNAV, TREFMXAV, and TSA
+ (missing _R for SoilAlpha)
+
+Describe timing and memory performance:
+
+Versions of any externally defined libraries:
+
+ scripts scripts4_100108b
+ drv vocemis-drydep12_drvseq3_1_11
+ datm datm8_091218
+ socn stubs1_2_02/socn
+ sice stubs1_2_02/sice
+ sglc stubs1_2_02/sglc
+ csm_share vocemis-drydep13_share3_091217
+ esmf_wrf_timemgr esmf_wrf_timemgr_090402
+ timing timing_090929
+ mct MCT2_7_0_100106
+ pio pio60_prod
+ cprnc cprnc_081022
+
+Summary of testing:
+
+ bluefire: All PASS except...
+025 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+026 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+027 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+028 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+029 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+043 smLI1 TSM.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+044 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+045 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+046 blLI1 TBL.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+048 erL58 TER.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 12+84 arb_ic ............FAIL! rc= 12
+ bluefire interactive testing: All PASS except...
+013 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 5
+019 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 5
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+ bluefire/CCSM testing: All PASS except
+FAIL PST.f45_g37.I1850.bluefire.compare.clm3_7_10
+FAIL PET_PT.f45_g37.I1850.bluefire.compare.clm3_7_10
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm3_7_15
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_7_10
+FAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm3_7_10
+BFAIL PST.f10_f10.I8520CN.bluefire.compare.clm3_7_10
+BFAIL PET_PT.f10_f10.I8520CN.bluefire.compare.clm3_7_10
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm3_7_10
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm3_7_10
+ jaguar: All PASS except..
+005 smB51 TSM.sh _scsnf_dh clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 144 arb_ic ..............FAIL! rc= 10
+006 erB51 TER.sh _scsnf_dh clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+007 brB51 TBR.sh _scsnf_dh clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+026 smLI2 TSM.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+027 erLI2 TER.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+028 brLI2 TBR.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+030 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 96 arb_ic ...............FAIL! rc= 10
+031 erL58 TER.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 12+84 arb_ic ............FAIL! rc= 5
+ jaguar interactive testing: All PASS except...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+ edinburgh/ifort interactive testing: All PASS except...
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+014 erOC4 TER.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 115+115 arb_FAIL! rc= 5
+015 brOC4 TBR.sh _nrvansc_ds clm_urb1pt^nl_urb_br 19920812:NONE:3600 1x1_vancouverCAN navy 72+72 arbFAIL! rc= 5
+016 blOC4 TBL.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 4
+018 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+019 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+020 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 4
+ edinburgh/ifort: All PASS except...
+002 erA92 TER.sh _sc_dm clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v7 -3+-3 arb_ic ................FAIL! rc= 7
+003 brA92 TBR.sh _sc_dm clm_std^nl_urb_br 20030101:NONE:3600 4x5 gx3v7 -3+-3 arb_ic .............FAIL! rc= 6
+006 erD91 TER.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 72+72 cold ...............FAIL! rc= 7
+007 blD91 TBL.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 144 cold .................FAIL! rc= 5
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+ breeze,gale,hail,gust/ifort interactive testing: All PASS except...
+011 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 5
+020 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+021 erG53 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+022 brG53 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+023 blG53 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+
+CLM tag used for the baseline comparison tests if applicable: clm3_5_00
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers:
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change: new climate for clm4
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ ccsm4_0_beta35
+ bluefire
+ -compset B_1850_TRACK1_CN -res f19_g16
+
+ MSS location of control simulations used to validate new climate:
+
+/DLAWREN/csm/b40.1850.track1.2deg.003.snow
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+http://www.cgd.ucar.edu/ccr/paleo/b40.snow/b40.1850.track1.2deg.003.snow-b40.1850.track1.2deg.003.control/
+
+===============================================================
+===============================================================
+Tag name: clm3_8_00
+Originator(s): erik (erik)
+Date: Tue May 4 22:39:18 MDT 2010
+One-line Summary: Get future scenarios working, finalize documentation, bring in MEGAN VOC and CNDV, simplify, mksurfdata optimization, fix bugs: snow enthalpy, BMOZ, pergro, use pft weights from fsurdat NOT finidat
+
+Purpose of changes:
+
+Get all of the future scenarios working (other than rcp=6.0) and get all of the datasets
+for these scenarios (pftdyn, fndepdyn, and aerdep files, for rcp=2.6,4.5, and 8.5).
+Finalize the User's Guide for now, with reviews from: Sam, Keith, Dave, and Sean, as
+well as more work on tools chapter, and adding testing chapter in appendix. We brought
+in the MEGAN version of the Volatile Organic Compounds (VOC) module which also reads
+in VOC emission factors from the surface dataset, and hence all fsurdat files needed
+to be replaced. Along with this the mksurfdata tool was changed in order to handle VOC's
+and effort was made to optimize it, add shared memory paralelism, and do memory
+optimization. We also removed the old Dynamic Global Vegetation Model (DGVM) and replaced
+it with the Carbon Nitrogen Dynamic Vegetation model (CNDV). Make some simplifications
+in the configure system to always use the CCSM version of build files, remove some
+unused options, put standalone test options last in configure. Improve documenation in
+XML files for configure and build-namelist options.
+
+Fix many different bugs. Enthalpy in snow combination was sometimes NOT conserved and now
+is. There was a problem running DryDeposition for the BMOZ compset that is now fixed.
+There were multiple issues running PERGRO testing that is now fixed. Previously, if
+both the finidat file and the fsurdat file had PFT weights on them, the values from
+the finidat file was used, now it will use the values from the fsurdat file. There are
+also several cases where if the weights are different it will abort with an error, or
+at least send a message to the log file about the differences.
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: doc
+
+Describe any changes made to build system:
+ Switch cndv for dgvm, Have configure disallow bad cases
+ Change configure to NOT allow supln with spinup cases
+ Do NOT allow exit_spinup and ad_spinup at the same time.
+ Only allow voc to be set for seq_ccsm NOT ext_ccsm_seq
+ Remove carbon_aero and pio from configure (always build with pio)
+ emove unused configure options: clm_exe, -clm_exedir, and -clm_bld.
+ Move standalone testing options to the end of help. Use CCSM version
+ of mkSrcfiles/mkDepends, separate config vars into categories, work
+ on documentation with comments from Keith/Sam.
+
+Describe any changes made to the namelist:
+ Add 2000-2100 simulation year range as allowed option
+
+ Two new history fields:
+
+ TSOI_10CM = soil temperature in top 10cm of soil (K)
+ SOILWATER_10CM = soil liquid water + ice in top 10cm of soil (kg/m2)
+
+ Check for some files based on rcp (fpftdyn, ndepdyn, and aerdep)
+
+ Add in ability to add a user namelist in your case directory to input
+ namelist items at configure time. Simply add a file called "user_nl_clm"
+ as a valid namelist and the items in that namelist will show up in the initial
+ BuildConf/clm.buildnml.csh file.
+
+List any changes to the defaults for the boundary datasets:
+ get urbanc_alpha grid and frac files in
+ get in new single-point datasets
+ new qtr-degree, T62 and T85 fsurdat
+ new f10, f05, f09, 1850 fsurdat
+ new rcp=8.5, f19 pftdyn,
+ new rcp=4.5 f09, f19, f10 pftdyn
+ new rcp=2.6 f09, f19, f10 pftdyn
+ new rcp=8.5/4.5/2.6 f19 aerdep 1850-2100 datasets
+ new rcp=8.5/4.5 f19 fndepdyn 1850-2100 datasets
+ new rcp=2.6/4.5 f10, f45, f25, f09 aerdep/ndepdyn datasets
+ new rcp=2.6/4.5,8.5 f19 decadal averages for ndepdat
+ (Note: harvest was updated in PFTDYN files and raw PFT input files for 2006).
+ New 10x15 and 4x5 finidat files so that transient cases will work at those resolutions
+ New finidat files for 1-deg and 2-deg (from fully coupled simulations)
+ New datasets for I cases that are set in scripts
+ Duplicate cn datasets for cndv
+ New pft-physiology files with extra fields for CNDV
+ Remove 360x720 files, gx3v5, gx1v5 files
+ Remove 1x1.25, 2x2.5, and 2.5x3.33 grid resolutions
+ Remove gx1v3, gx1v4, gx1v5 land masks, add drydep defaults.
+ add mksrf_fvegtyp@1000-1004
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by:
+ snow changes came from dlawren and also reviewed by oleson
+ PFT weight change also reviewed by: dlawren, slevis, oleson
+ CNDV came from slevis
+ VOC changes came from Francis Vitt and Jean-Francois Lamarque
+ history changes came from Keith Oleson, reviewed by Dave Lawrenece
+ OpenMP bug fix came from Mariana-Vertenstein, reviewed by Pat Worley
+
+List any svn externals directories updated (csm_share, mct, etc.): all
+ scripts to scripts4_100406a
+ drv to drvseq3_1_23
+ datm to datm8_100406
+ csm_share to share3_100407
+ pio to pio1_0_18
+ timing to timing_091021
+
+Summary of testing:
+
+ bluefire: All PASS except...
+025 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+026 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+027 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+028 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+029 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+043 smLI1 TSM.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+044 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+045 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+046 blLI1 TBL.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+048 erL58 TER.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 12+84 arb_ic ............FAIL! rc= 12
+ bluefire interactive testing: All PASS except...
+013 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 5
+019 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 5
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+ bluefire/CCSM testing: All PASS except
+FAIL PST.f45_g37.I1850.bluefire.compare.clm3_7_10
+FAIL PET_PT.f45_g37.I1850.bluefire.compare.clm3_7_10
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm3_7_15
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_7_10
+FAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm3_7_10
+BFAIL PST.f10_f10.I8520CN.bluefire.compare.clm3_7_10
+BFAIL PET_PT.f10_f10.I8520CN.bluefire.compare.clm3_7_10
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm3_7_10
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm3_7_10
+ jaguar: All PASS except..
+005 smB51 TSM.sh _scsnf_dh clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 144 arb_ic ..............FAIL! rc= 10
+006 erB51 TER.sh _scsnf_dh clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+007 brB51 TBR.sh _scsnf_dh clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+026 smLI2 TSM.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+027 erLI2 TER.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+028 brLI2 TBR.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+030 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 96 arb_ic ...............FAIL! rc= 10
+031 erL58 TER.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 12+84 arb_ic ............FAIL! rc= 5
+ jaguar interactive testing: All PASS except...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+ edinburgh/ifort interactive testing: All PASS except...
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+014 erOC4 TER.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 115+115 arb_FAIL! rc= 5
+015 brOC4 TBR.sh _nrvansc_ds clm_urb1pt^nl_urb_br 19920812:NONE:3600 1x1_vancouverCAN navy 72+72 arbFAIL! rc= 5
+016 blOC4 TBL.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 4
+018 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+019 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+020 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 4
+ edinburgh/ifort: All PASS except...
+002 erA92 TER.sh _sc_dm clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v7 -3+-3 arb_ic ................FAIL! rc= 7
+003 brA92 TBR.sh _sc_dm clm_std^nl_urb_br 20030101:NONE:3600 4x5 gx3v7 -3+-3 arb_ic .............FAIL! rc= 6
+006 erD91 TER.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 72+72 cold ...............FAIL! rc= 7
+007 blD91 TBL.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 144 cold .................FAIL! rc= 5
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+ breeze,gale,hail,gust/ifort interactive testing: All PASS except...
+011 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 5
+020 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+021 erG53 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+022 brG53 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+023 blG53 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_00
+
+Changes answers relative to baseline: Yes!
+ snow change has a small effect on climate (see below)
+ Bringing in MEGAN VOC changes answers for VOC fluxes in a diagnostic way
+ Changing to use weights from fsurdat file rather than finidat file, changes
+ answers for cases with finidat startup files, if the weights are different.
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers:
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change: same climate
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ ccsm4_0_beta35
+ bluefire
+ -compset B_1850_TRACK1_CN -res f19_g16
+
+ MSS location of control simulations used to validate new climate:
+
+/DLAWREN/csm/b40.1850.track1.2deg.003.snow
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+http://www.cgd.ucar.edu/ccr/paleo/b40.snow/b40.1850.track1.2deg.003.snow-b40.1850.track1.2deg.003.control/
+http://www.cgd.ucar.edu/ccr/dlawren/research/clm4.0_dev/b40.1850.track1.2deg.003.snowa-b40.1850.track1.2deg.003.controla/setsIndex.html
+
+===============================================================
+===============================================================
+Tag name: clm3_7_15
+Originator(s): erik (erik)
+Date: Tue Apr 27 10:13:57 MDT 2010
+One-line Summary: Finish User's Guide, surfdata files for urban-1pt, fix mksurfdata ifort bugs, work with testing
+
+Purpose of changes:
+
+Fix all urban single-point datasets (mexicocity, urbanc_alpha), fix get_regional script
+to work. Add more documentation on mksurfdata to users-guide, add pergro procedure
+examples, more to testing section. Remove "moving the sun" warning. Fix
+ndeplintInterp.ncl and getregional_datasets.ncl scripts.
+
+Bugs fixed (include bugzilla ID):
+ 1125 (T85, qtr-degree and urban pt surface datasets)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1150 (Bug in indices in getregional_datasets.ncl script)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Type of tag: std-test
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: New fsurdat files for urban-1p datasets
+ 1x1_vancouverCAN, 1x1_mexicocityMEX, 1x1_urbanc_alpha, 1x1_asphaltjungleNJ
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): datm
+
+ datm to datm8_100406
+
+List all files eliminated:
+
+>>>>>>>>>>> Remove test lists no longer used
+ D models/lnd/clm/test/system/tests_pretag_bangkok
+ D models/lnd/clm/test/system/tests_pretag_calgary
+ D models/lnd/clm/test/system/tests_posttag_lightning_nompi
+
+List all files added and what they do:
+
+>>>>>>>>>>> Add plot of pergro testing
+ A models/lnd/clm/doc/UsersGuide/pergro.jpg
+>>>>>>>>>>> Add in plotting for pergro testing
+ A models/lnd/clm/tools/ncl_scripts/RMSbluefire.dat
+ A models/lnd/clm/tools/ncl_scripts/RMSintrepid.dat
+ A models/lnd/clm/tools/ncl_scripts/pergroPlot.ncl
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>> Remove tests no longer used, work on documentation
+ M models/lnd/clm/test/system/test_driver.sh ---- Remove calgary, work on doc
+ M models/lnd/clm/test/system/gen_test_table.sh - Remove note for calgary
+ M models/lnd/clm/test/system/README ------------ Clarify documentation
+ M models/lnd/clm/test/system/CLM_runcmnd.sh ---- Remove calgary
+>>>>>>>>>>> Fix ifort compiler problems, point to $CSMDATA locations of 1850
+>>>>>>>>>>> and 2000 PFT datasets
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.globalirrig ---- Change path
+ M models/lnd/clm/tools/mksurfdata/pftdyn_hist_simyr2000.txt - Change path
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.regional ------- Change path
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pftdyn --------- Change path
+ M models/lnd/clm/tools/mksurfdata/pftdyn_hist_simyr1850.txt - Change path
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.singlept ------- Change path
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.namelist ------- Change path
+ M models/lnd/clm/tools/mksurfdata/README -------------------- Update documentation
+ M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 -------------- outnc_double to
+ .true., fix implicit none statements, add documentation
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl ------------- Add ability to set
+ inputdata directory
+ M models/lnd/clm/tools/mksurfdata/creategridMod.F90 --------- Change where to loops
+>>>>>>>>>>> Fix bug in time axis and getregional indices
+ M models/lnd/clm/tools/ncl_scripts/ndeplintInterp.ncl - Fix time axis and check it
+ M models/lnd/clm/tools/ncl_scripts/README ------------- Add note about pergroPlot
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl
+>>>>>>>>>>> Fix CLM_USRDAT_NAME and add urban 1pt datasets
+ M models/lnd/clm/bld/clm.cpl7.template ------------------------ Set resolution for CLM_USRDAT_NAME
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - New urban pt1
+ datasets: 1x1_vancouverCAN, 1x1_mexicocityMEX, 1x1_urbanc_alpha, 1x1_asphaltjungleNJ
+>>>>>>>>>>> Remove SNICAR message about moving the sun
+ M models/lnd/clm/src/biogeophys/SNICARMod.F90
+>>>>>>>>>>> Finish off current User's Guide
+>>>>>>>>>>> Bring rel03->rel04 updates in, run ispell on everything
+>>>>>>>>>>> Work on mksurfdata and testing sections, give instructions for pergro
+>>>>>>>>>>> Add help from mksurfdata.pl and test_driver.sh scripts
+ M models/lnd/clm/doc/KnownBugs -- Add note that mkgriddata can not straddle Greenwich
+ M models/lnd/clm/doc/UsersGuide/tools.xml
+ M models/lnd/clm/doc/UsersGuide/preface.xml
+ M models/lnd/clm/doc/UsersGuide/clm_ug.xml
+ M models/lnd/clm/doc/UsersGuide/adding_files.xml
+ M models/lnd/clm/doc/UsersGuide/appendix.xml
+ M models/lnd/clm/doc/UsersGuide/config_cache.xml
+ M models/lnd/clm/doc/UsersGuide/custom.xml
+ M models/lnd/clm/doc/UsersGuide/single_point.xml
+ M models/lnd/clm/doc/UsersGuide/Makefile
+ M models/lnd/clm/doc/UsersGuide/addco2_datm.buildnml.diff
+ M models/lnd/clm/doc/UsersGuide/special_cases.xml
+
+Summary of testing:
+
+ bluefire: All PASS except...
+025 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+026 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+027 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+028 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+029 smG55 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+043 smLI1 TSM.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+044 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+045 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+046 blLI1 TBL.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+048 erL58 TER.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 12+84 arb_ic ............FAIL! rc= 12
+ bluefire interactive testing: All PASS except...
+013 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 5
+019 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 5
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+ bluefire/CCSM testing: All PASS except
+FAIL PST.f45_g37.I1850.bluefire.compare.clm3_7_10
+FAIL PET_PT.f45_g37.I1850.bluefire.compare.clm3_7_10
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm3_7_15
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_7_10
+FAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm3_7_10
+BFAIL PST.f10_f10.I8520CN.bluefire.compare.clm3_7_10
+BFAIL PET_PT.f10_f10.I8520CN.bluefire.compare.clm3_7_10
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm3_7_10
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm3_7_10
+ jaguar: All PASS except..
+005 smB51 TSM.sh _scsnf_dh clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 144 arb_ic ..............FAIL! rc= 10
+006 erB51 TER.sh _scsnf_dh clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+007 brB51 TBR.sh _scsnf_dh clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+026 smLI2 TSM.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+027 erLI2 TER.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+028 brLI2 TBR.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+030 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 96 arb_ic ...............FAIL! rc= 10
+031 erL58 TER.sh _sc_dh clm_std^nl_crcrop 20020115:NONE:1800 10x15 USGS 12+84 arb_ic ............FAIL! rc= 5
+ jaguar interactive testing: All PASS except...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+ edinburgh/ifort interactive testing: All PASS except...
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+014 erOC4 TER.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 115+115 arb_FAIL! rc= 5
+015 brOC4 TBR.sh _nrvansc_ds clm_urb1pt^nl_urb_br 19920812:NONE:3600 1x1_vancouverCAN navy 72+72 arbFAIL! rc= 5
+016 blOC4 TBL.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 4
+018 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+019 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+020 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 4
+ edinburgh/ifort: All PASS except...
+002 erA92 TER.sh _sc_dm clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v7 -3+-3 arb_ic ................FAIL! rc= 7
+003 brA92 TBR.sh _sc_dm clm_std^nl_urb_br 20030101:NONE:3600 4x5 gx3v7 -3+-3 arb_ic .............FAIL! rc= 6
+006 erD91 TER.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 72+72 cold ...............FAIL! rc= 7
+007 blD91 TBL.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 144 cold .................FAIL! rc= 5
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+ breeze,gale,hail,gust/ifort interactive testing: All PASS except...
+011 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 5
+020 smG53 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+021 erG53 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+022 brG53 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+023 blG53 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_14
+
+Changes answers relative to baseline: No bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_7_14
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Thu Apr 8 16:15:35 MDT 2010
+One-line Summary: Fix rcp=2.6/4.5 1-degree fndepdyn filenames
+
+Purpose of changes:
+
+Fix the names of the rcp=2.6/4.5 1-degree fndepdyn filenames (had a 100208 file creation
+date but should be 100407).
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1125 (T85, qtr-degree and urban pt surface datasets)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: Fix fndepdyn filenames
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Fix fndepdyn filenames
+
+Summary of testing: None
+
+===============================================================
+===============================================================
+Tag name: clm3_7_13
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Thu Apr 8 10:53:06 MDT 2010
+One-line Summary: Add in missing rcp=2.6/6 use-cases, and fix syntax errors in the namelist_defaults file
+
+Purpose of changes:
+
+Add in missing use cases for rcp=4.5 and rcp=6 transient future scenarios. Fix syntax errors in the namelist_defaults_clm
+file.
+
+Bugs fixed (include bugzilla ID): Above two problems
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1125 (T85, qtr-degree and urban pt surface datasets)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: Nonae
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+A models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp2.6_transient.xml
+A models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp6_transient.xml
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Fix syntax errors
+
+Summary of testing: None, other than script creation testing
+
+===============================================================
+===============================================================
+Tag name: clm3_7_12
+Originator(s): erik (erik)
+Date: Thu Apr 8 00:30:30 MDT 2010
+One-line Summary: rcp=2.6/4.5 datasets for fndepdyn and aerdepdat, fix some minor issues, new 1pt urban surfdata files
+
+Purpose of changes:
+
+Add in urban single-point surfdata files. Add in regridded ndepdyn/aerdep files: f09,
+f45, f10, f25. Fix name of f05, 1850 fsurdat file, add in new urban single point
+datasets, add back urbanc_alpha grid/frac files. Put rcp in filenames for aerdep/ndep
+regrid scripts. chomp frac filename in mksurfdata.pl, for urban single-point files. Make
+OPT=TRUE default for mksurfdata. Lengthen allowed gridname for mksurfdata.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1125 (T85, qtr-degree and urban pt surface datasets)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ aerdep/ndepdyn for rcp=2.6/4.5 f10, f45, f25, f09
+ fix name of f05 fsurdata file for 1850
+ get urbanc_alpha grid and frac files in
+ get in new urban single-point datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ csm_share to share3_100407
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/tools/mksurfdata/Makefile ------ make OPT=TRUE the default
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 -- lengthen gridname to 32
+M models/lnd/clm/tools/mksurfdata/mksurfdata.pl - make sure to chomp fracdata file
+
+M models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl - Add rcp to filename
+M models/lnd/clm/tools/ncl_scripts/ndepregrid.ncl --- Add rcp to filename
+
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - New datasets
+ aerdep/ndepdyn for rcp=2.6/4.5 f10, f45, f25, f09
+ fix name of f05 fsurdata file for 1850
+ get urbanc_alpha grid and frac files in
+ get in new urban single-point datasets
+
+Summary of testing:
+
+ bluefire interactive testing: All PASS except up to 014 smJ74
+010 smNB4 TSM.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 10
+011 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+012 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+010 smCA8 TSM.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 10
+
+===============================================================
+===============================================================
+Tag name: clm3_7_11
+Originator(s): erik (erik)
+Date: Wed Apr 7 11:59:22 MDT 2010
+One-line Summary: qtr-degree and T85 surfdata, rcp=2.6/4.5 datasets, doc updates
+
+Purpose of changes:
+
+Documentation updates, for users guide and namelist and configure xml files (rel04 to
+rel05 update). Fix missing deallocate (bug 1133), and line length for NEE. Changes in
+mksurfdata so that will run for qtr-degree. New rcp datasets for 4.5 and 2.6, aerdep
+(only f19)/ndepdyn/pftdyn datasets. Fix CN spinup test, fix test name for bluefire tests.
+Add in qtr-degree and T85 surfdata files.
+
+Bugs fixed (include bugzilla ID):
+ 1141 (CN spinup test)
+ 1137 (qtr-deg fsurdat)
+ 1136 (line length for NEE in histFlds)
+ 1135 (miss smG45 test)
+ 1133 (missing deallocate)
+ 1125 (T85, qtr-degree and urban pt surface datasets)
+ (partial T85 and qtr-degree)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1125 (T85, qtr-degree and urban pt surface datasets)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ new qtr-degree and T85 fsurdat
+ new rcp=8.5, f19 pftdyn, rcp=4.5 f09, f19, f10 pftdyn
+ new rcp=2.6 f09, f19, f10 pftdyn
+ new rcp=4.5/2.6 f19 aerdep 1850-2100 datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv
+ scripts to scripts4_100406a
+ drv to drvseq3_1_23
+
+List all files eliminated:
+
+ D models/lnd/clm/tools/mksurfdata/pftdyn_simyr1850.txt -- Rename with _hist_
+ D models/lnd/clm/tools/mksurfdata/pftdyn_simyr2000.txt -- Rename with _hist_
+
+List all files added and what they do:
+
+ A models/lnd/clm/tools/mksurfdata/pftdyn_hist_simyr2000.txt - Renamed from above
+ A models/lnd/clm/tools/mksurfdata/pftdyn_hist_simyr1850.txt - Renamed from above
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>> Turn supln off for spinup modes
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_do
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_do
+ M models/lnd/clm/test/system/tests_pretag_bluefire --------------- Change name of missing test smG45->smG55
+
+>>>>>>>>>>>>> Change names of pftdyn text files to include _hist_
+>>>>>>>>>>>>> Memory updates so uses less memory (allocate just before needed
+>>>>>>>>>>>>> deallocate after done). This is from the ccsm4_0_rel05 update
+>>>>>>>>>>>>> Mariana started the changes and Erik added some more.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.namelist
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.globalirrig
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.regional
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.singlept
+ M models/lnd/clm/tools/mksurfdata/README
+ M models/lnd/clm/tools/mksurfdata/mkurbanparMod.F90 ----- Memory updates
+
+ M models/lnd/clm/bld/clm.cpl7.template - Use $CASETOOLS for Makefile
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -
+ new qtr-degree and T85 fsurdat
+ new rcp=8.5, f19 pftdyn, rcp=4.5 f09, f19, f10 pftdyn
+ new rcp=2.6 f09, f19, f10 pftdyn
+ new rcp=4.5/2.6 f19 aerdep 1850-2100 datasets
+
+>>>>>>>>>>>>> Documentation udpates updating from rel04 to ccsm4_0_rel05
+ M models/lnd/clm/bld/configure -------------------------- change CVS to SVN
+ M models/lnd/clm/bld/config_files/config_definition.xsl - Correct name
+ M models/lnd/clm/bld/config_files/config_definition.xml - Add category for maxpft
+
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml - Work with categories
+ and improve descriptions, remove rpntpath
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xsl - Seperate out
+ stand-alone testing categories, improve documentation, work
+ on categories.
+
+>>>>>>>>>>>>> Documentation udpates updating from rel04 to ccsm4_0_rel05
+>>>>>>>>>>>>> Updates from Sam, Keith, and Sean, more doc on tools and appendix
+ M models/lnd/clm/doc/UsersGuide/co2_streams.txt
+ M models/lnd/clm/doc/UsersGuide/tools.xml
+ M models/lnd/clm/doc/UsersGuide/preface.xml
+ M models/lnd/clm/doc/UsersGuide/clm_stylesheet.dsl
+ M models/lnd/clm/doc/UsersGuide/clm_ug.xml
+ M models/lnd/clm/doc/UsersGuide/adding_files.xml
+ M models/lnd/clm/doc/UsersGuide/appendix.xml
+ M models/lnd/clm/doc/UsersGuide/custom.xml
+ M models/lnd/clm/doc/UsersGuide/single_point.xml
+ M models/lnd/clm/doc/UsersGuide/Makefile
+ M models/lnd/clm/doc/UsersGuide/addco2_datm.buildnml.diff
+ M models/lnd/clm/doc/UsersGuide/special_cases.xml
+ M models/lnd/clm/doc/KnownBugs --------------------------- Add notes on:
+ bugzilla bugs: 669, 1024, 1124, 1125, 1127
+
+ M models/lnd/clm/src/main/accFldsMod.F90 -- Deallocate outside #ifdef
+ M models/lnd/clm/src/main/histFldsMod.F90 - Shorten long line for NEE
+
+Summary of testing:
+
+ bluefire: All PASS up to 024, blHN1 test except...
+025 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+026 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+027 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+028 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+ bluefire interactive testing: All PASS except (up to 26 brAK8 test)
+006 smCA4 TSM.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 10
+007 erCA4 TER.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ....FAIL! rc= 5
+008 brCA4 TBR.sh _nrsc_ds clm_std^nl_urb_br 20021001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic .FAIL! rc= 5
+009 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 4
+010 smNB4 TSM.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 10
+011 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+012 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+013 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 4
+018 smCA8 TSM.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 10
+019 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 4
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+ jaguar interactive testing: All PASS up to smAK4 test except...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+008 blAK4 TBL.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -10 cold ............SKIPPED*
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+008 smCA4 TSM.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_10
+
+Changes answers relative to baseline: no (b4b)
+
+===============================================================
+===============================================================
+Tag name: clm3_7_10
+Originator(s): erik (erik)
+Date: Mon Mar 22 23:54:48 MDT 2010
+One-line Summary: Fix drydep so that BMOZ case will work
+
+Purpose of changes:
+
+Update externals, fix drydep bug (so that BMOZ case will run bug 1132). Add 10x15 and 4x5
+finidat files, so that bluefire.clm.auxtest PET and PST cases will work. Fix
+documentation on transient CO2.
+
+Bugs fixed (include bugzilla ID):
+ 1132 (clm failure for BMOZ compset)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1125 (T85, qtr-degree and urban pt surface datasets)
+ 1133 (missing deallocate)
+ 1135 (miss smG45 test)
+ 1136 (line length for NEE in histFlds)
+ 1137 (qtr-deg fsurdat)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1141 (CN spinup test)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ New 10x15 and 4x5 finidat files so that transient cases will work at those resolutions
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm, pio
+ Update to ccsm4_0_beta47 versions
+ scripts to scripts4_100322b
+ drv to drvseq3_1_20
+ datm to datm8_100225
+ pio to pio1_0_18
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/doc/UsersGuide/Makefile ---------- Fix conversion to xml
+M models/lnd/clm/doc/UsersGuide/special_cases.xml - Fix transient CO2 doc
+M models/lnd/clm/doc/UsersGuide/addco2_datm.buildnml.diff - Fix CO2 file
+
+M models/lnd/clm/tools/mksurfdata/mksurfdata.pl --- Fix historical rcp value
+M models/lnd/clm/tools/interpinic/runinit_ibm.csh - Fix so will run
+
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Add 10x15, 4x5
+ finidat files
+
+M models/lnd/clm/src/main/clm_comp.F90 ------- check drydep_method
+M models/lnd/clm/src/main/clmtypeInitMod.F90 - check drydep_method
+M models/lnd/clm/src/main/clm_atmlnd.F90 ----- check drydep_method, don't pass
+ drydep stuff unless drydep_method is DD_XLND
+
+Summary of testing:
+
+ bluefire/CCSM testing: All PASS except...
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm3_7_10
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_7_07
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_09
+
+Changes answers relative to baseline: No bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_7_09
+Originator(s): erik (erik)
+Date: Sun Mar 21 21:08:54 MDT 2010
+One-line Summary: Fix snow enthalpy bug, cndv datasets, various fixes
+
+Purpose of changes:
+
+Fix snow enthalpy bug from Dave Lawrence. Add rcp to mksurfdata.pl. Add new 2006 datasets
+for pftdyn files for mksurfdata. Fix history bug. New rcp 8.5 1-degree pftdyn dataset.
+Duplicate all cn datasets for cndv. pergro use cases output in double precision. Some
+work on documentation.
+
+Bugs fixed (include bugzilla ID):
+ 1128 (cndv needs the same input files as cn)
+ 1130 (History problem on restarts)
+ 1131 (pergro use cases need double output files)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1124 (Reported energy for pftdyn grid-cell not right)
+ 1125 (T85, qtr-degree and urban pt surface datasets)
+ 1132 (clm failure for BMOZ compset)
+ 1133 (missing deallocate)
+ 1135 (miss smG45 test)
+ 1136 (line length for NEE in histFlds)
+ 1137 (qtr-deg fsurdat)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1141 (CN spinup test)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+
+ New 1-degree rcp=8.5 pftdyn dataset with harvest for 2006
+ Duplicate cn datasets for cndv
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+ snow changes come from dlawren and also reviewed by oleson
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>>>>>>> Add AIM rcp datasets
+A models/lnd/clm/tools/mksurfdata/pftdyn_rcp6.0_simyr1850-2100.txt
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>> New 2006 file
+M models/lnd/clm/tools/mksurfdata/pftdyn_rcp2.6_simyr1850-2100.txt - New 2006 file
+M models/lnd/clm/tools/mksurfdata/pftdyn_rcp4.5_simyr1850-2100.txt - New 2006 file
+M models/lnd/clm/tools/mksurfdata/pftdyn_rcp8.5_simyr1850-2100.txt - New 2006 file
+M models/lnd/clm/tools/mksurfdata/mksurfdata.pl - Add rcp as argument
+M models/lnd/clm/tools/mksurfdata/README -------- Document mksurfdata.pl and rcp files
+
+M models/lnd/clm/tools/ncl_scripts/getregional_datasets.pl - Get rid of old masks
+
+M models/lnd/clm/tools/interpinic/runinit_ibm.csh - Get rid of old masks/grids, and sim_year
+M models/lnd/clm/tools/interpinic/README ---------- Update documentation
+
+M models/lnd/clm/tools/mkgriddata/mkgriddata.ccsm_dom - Add clm grid file
+M models/lnd/clm/tools/mkgriddata/README ------------- More documentation
+
+M models/lnd/clm/bld/namelist_files/use_cases/pergro.xml ------ Output history in double
+M models/lnd/clm/bld/namelist_files/use_cases/pergro0.xml ----- Output history in double
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Duplicate cn files for cndv
+ New 1-degree rcp=8.5 pftdyn file
+
+M models/lnd/clm/src/main/histFileMod.F90 - Make sure 3D fields defined before output
+
+M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90 - conserve enthalpy on snow combination
+
+M models/lnd/clm/doc/IMPORTANT_NOTES ----- Add notes about fine-mesh
+M models/lnd/clm/doc/UsersGuide/Makefile - Remove file for realclean
+
+Summary of testing:
+
+ bluefire:
+003 brA91 TBR.sh _sc_dh clm_std^nl_urb_br 20030101:NONE:3600 4x5 gx3v7 -3+-3 arb_ic .............FAIL! rc= 11
+004 blA91 TBL.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v7 -6 arb_ic ...................FAIL! rc= 7
+007 blD91 TBL.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v7 144 cold .................FAIL! rc= 7
+010 brE91 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v7 72+72 arb_ic ......FAIL! rc= 11
+011 blE91 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 48 arb_ic ............FAIL! rc= 7
+015 brF92 TBR.sh 17p_vodsrsc_dm clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v7 72+72 cold .......FAIL! rc= 11
+016 blF92 TBL.sh 17p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 48 cold .............FAIL! rc= 7
+019 brEH1 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 24+24 arbFAIL! rc= 13
+020 blEH1 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic FAIL! rc= 7
+023 brHN1 TBR.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -5+-5 cFAIL! rc= 13
+024 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -10 colFAIL! rc= 7
+025 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+026 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+027 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+028 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+ bluefire interactive testing:
+003 brA74 TBR.sh _nrsc_ds clm_std^nl_urb_br 20030101:NONE:1800 1x1_brazil navy -5+-5 arb_ic .....FAIL! rc= 13
+006 smCA4 TSM.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 10
+007 erCA4 TER.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ....FAIL! rc= 5
+008 brCA4 TBR.sh _nrsc_ds clm_std^nl_urb_br 20021001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic .FAIL! rc= 5
+009 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 4
+010 smNB4 TSM.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 10
+011 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+012 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+013 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 4
+016 brJ74 TBR.sh 4p_nrcasasc_ds clm_std^nl_urb_br 10001230:NONE:3600 1x1_tropicAtl test -3+-3 arb_icFAIL! rc= 11
+018 smCA8 TSM.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 10
+019 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 4
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+031 brF93 TBR.sh 17p_vodsrsc_do clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v7 72+72 cold .......FAIL! rc= 11
+032 blF93 TBL.sh 17p_vodsrsc_do clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 48 cold .............FAIL! rc= 7
+035 brL83 TBR.sh _nrsc_do clm_std^nl_urb_br 20020115:NONE:3600 5x5_amazon navy -10+-10 arb_ic ...FAIL! rc= 13
+040 blK74 TBL.sh 17p_cndvsc_s clm_std 19971231:NONE:1800 1x1_brazil navy -670 arb_ic ............FAIL! rc= 7
+045 bl754 TBLtools.sh mksurfdata tools__s namelist ..............................................FAIL! rc= 7
+047 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 7
+049 bl754 TBLtools.sh mksurfdata tools__s namelist ..............................................FAIL! rc= 2
+ jaguar interactive testing:
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+ edinburgh/ifort: interactive testing:
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+009 smCA4 TSM.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 10
+010 erCA4 TER.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ....FAIL! rc= 5
+011 brCA4 TBR.sh _nrsc_ds clm_std^nl_urb_br 20021001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic .FAIL! rc= 5
+013 smOC4 TSM.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 10
+014 erOC4 TER.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 115+115 arb_FAIL! rc= 5
+015 brOC4 TBR.sh _nrvansc_ds clm_urb1pt^nl_urb_br 19920812:NONE:3600 1x1_vancouverCAN navy 72+72 arbFAIL! rc= 5
+017 smNB4 TSM.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 10
+018 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+019 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+ edinburgh/ifort
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_08
+
+Changes answers relative to baseline: YES
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers:
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change: same climate
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ ccsm4_0_beta35
+ bluefire
+ -compset B_1850_TRACK1_CN -res f19_g16
+
+ MSS location of control simulations used to validate new climate:
+
+/DLAWREN/csm/b40.1850.track1.2deg.003.snow
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+http://www.cgd.ucar.edu/ccr/paleo/b40.snow/b40.1850.track1.2deg.003.snow-b40.1850.track1.2deg.003.control/
+http://www.cgd.ucar.edu/ccr/dlawren/research/clm4.0_dev/b40.1850.track1.2deg.003.snowa-b40.1850.track1.2deg.003.controla/setsIndex.html
+
+===============================================================
+===============================================================
+Tag name: clm3_7_08
+Originator(s): Mariana Vertenstein (mvertens)
+Date: Fri Mar 12 13:26:09 MST 2010
+One-line Summary: Removal of check for weights if dynamic land use is used
+
+Purpose of changes:
+This one line change enabled the vast majority of the CCSM tests to pass.
+
+Verified that ERI 20th century tests in CCSM test suite for ccsm4_0_beta46 now passed with this changed
+
+This was reviewed by Dave Lawrence and Sam Levis
+
+Everything from clm3_7_07 applies except for the following:
+
+M biogeophys/BiogeophysRestMod.F90
+ - if ( nsrest == 1 .or. (nsrest == 3 .and. fpftdyn /= ' ') )then
+ - ! Do NOT do any testing for restart or a pftdyn branch case
+ + if ( nsrest == 1 .or. fpftdyn /= ' ' )then
+ + ! Do NOT do any testing for restart or a pftdyn case
+ also added in a #if CNDV
+
+M biogeochem/CNDVEstablishmentMod.F90
+M main/clmtypeInitMod.F90
+ - fix for case when leaf area index is pathologically large
+ the original fix for this was not longer working - and the above changes address this
+
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_clm.xml
+ - fixed issues related to getting fndepdat for -bgc cndv
+
+===============================================================
+Tag name: clm3_7_07
+Originator(s): erik (erik)
+Date: Wed Mar 10 23:35:37 MST 2010
+One-line Summary: New finidat datasets for 1-deg, 2-deg, and abort if weights from finidat/fsurdat files are too different, and use fsurdat files as truth
+
+Purpose of changes:
+
+Use surfdata weights and stop if finidat file weights are too different. Use ccsm4init
+datasets for finidat files for 1-deg and 2-deg. In I compsets, setup for special I case
+finidat files. Update scripts. Drydep changes from Francis. Change cell_method to
+cell_methods. Allow clm_start_type to be overridden if on use_case. Only set orb_iyearad
+for standalone clm testing. Allow vars on use_cases to not be set for some configs. Make
+sure all BGC modes are set for variables on use_cases. Set cold-start for pergro cases.
+Update documentation.
+
+Bugs fixed (include bugzilla ID):
+ 1098 (use weights from surdat file rather than finidat file)
+ 1121 (history variable attribute cell_methods misnamed)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1132 (clm failure for BMOZ compset)
+ 1133 (missing deallocate)
+ 1135 (miss smG45 test)
+ 1136 (line length for NEE in histFlds)
+ 1137 (qtr-deg fsurdat)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1141 (CN spinup test)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+
+ New finidat files for 1-deg and 2-deg (from fully coupled simulations)
+ New datasets for I cases that are set in scripts
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, dlawren, slevis, oleson
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts
+
+ scripts to scripts4_100310c
+
+ This version of scripts sets up special finidat files that will be used
+ for all I cases (other cases use the fully coupled datasets that are stored
+ in the namelist_defaults_clm.xml database).
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - New finidat files
+M models/lnd/clm/src/biogeochem/DryDepVelocity.F90 ------------ Changes from Francis
+M models/lnd/clm/src/main/ncdio.F90 --------------------------- Change cell_method to cell_methods
+M models/lnd/clm/src/main/clm_initializeMod.F90 --------------- Remove second call to pftdyn_interp
+M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 --------- Compare weights only
+ if NOT restart and NOT branch with pftdyn. If weights are too different
+ abort, if close enough write a warning and continue using the surfdata
+ weights
+
+>>>>>>>>>>>>>>>> Some work on documentation
+M models/lnd/clm/doc/KnownBugs
+M models/lnd/clm/doc/UsersGuide/preface.xml
+M models/lnd/clm/doc/IMPORTANT_NOTES
+M models/lnd/clm/doc/Quickstart.GUIDE
+
+>>>>>>>>>>>>>>>> Allow clm_start_type to be overridden if on use_case
+>>>>>>>>>>>>>>>> Allow vars in use_cases to not be set for some configs
+M models/lnd/clm/bld/build-namelist
+
+>>>>>>>>>>>>>>>> Change start_type to clm_start_type for all use-cases
+>>>>>>>>>>>>>>>> Only set orb_iyearad for standalone clm testing
+M models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml
+M models/lnd/clm/bld/namelist_files/use_cases/1850_control.xml
+M models/lnd/clm/bld/namelist_files/use_cases/pergro.xml --------- Set start to cold
+M models/lnd/clm/bld/namelist_files/use_cases/pergro0.xml -------- Set start to cold
+M models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml
+
+Summary of testing:
+
+ bluefire: All PASS except...
+024 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -10 colFAIL! rc= 7
+025 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+026 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+027 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+028 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+006 smCA4 TSM.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 10
+007 erCA4 TER.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ....FAIL! rc= 5
+008 brCA4 TBR.sh _nrsc_ds clm_std^nl_urb_br 20021001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic .FAIL! rc= 5
+009 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 4
+010 smNB4 TSM.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 10
+011 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+012 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+013 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 4
+018 smCA8 TSM.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 10
+019 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 4
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+ bluefire/CCSM testing: All PASS except...
+FAIL ERS.f19_g16.I1850.bluefire.compare_hist.clm3_7_06
+FAIL ERS.f19_g16.I1850.bluefire.compare.clm3_7_06
+FAIL ERB.f09_g16.I_1948-2004.bluefire.compare.clm3_7_06
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm3_7_07
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_7_06
+SFAIL PST.f10_f10.I8520CN.bluefire.GC.201955
+SFAIL PET_PT.f10_f10.I8520CN.bluefire.GC.201955
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare_hist.clm3_7_06
+FAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm3_7_06
+ jaguar interactive testing: All PASS up to...
+005 smAK4 TSM.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -10 cold
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_06
+
+Changes answers relative to baseline: Different initial condition datasets,
+ also now using weights from surfdata files rather than finidat files
+
+===============================================================
+===============================================================
+Tag name: clm3_7_06
+Originator(s): erik (erik)
+Date: Wed Mar 10 16:35:57 MST 2010
+One-line Summary: Bring cndv branch to trunk
+
+Purpose of changes:
+
+Bring CNDV branch to trunk cndv16_clm3_7_05 (erik/slevis). Fix bug 978 for Sam (nl
+option for branch). Put CROP part of CNDV branch on it's own branch. Add a couple more
+history fields (LAND_USE_FLUX, and LAND_UPTAKE). Add HTOP to default output. SNICAR_FRC
+fix, test SNICAR_FRC. Fix VOC by making sure fsun240 is between 0 and 1. Fix CO2 PPMV for
+I cases. Add in script to create CO2 streams file that can be used by datm8. Update VOC
+documentation. Get in updates from ccsm4_0_rel branch. Remove 360x720 grid, files with
+gx3v5/gx1v5 masks. Fix bug 1120, by initializing displavegc+ for CN, Change start_type to
+clm_start_type, don't allow both fndepdat and fndepdyn. Don't allow spinup modes with
+supln, and don't allow both ad_spinup and exit_spinup. Move testing from gx3v5 to gx3v7
+mask
+
+Bugs fixed (include bugzilla ID):
+ 1104 (Restart problem with pftdyn mode)
+ 1118 (Restarts with SNICAR_FRC fail)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1121 (history variable attribute cell_methods misnamed)
+ 1132 (clm failure for BMOZ compset)
+ 1133 (missing deallocate)
+ 1135 (miss smG45 test)
+ 1136 (line length for NEE in histFlds)
+ 1137 (qtr-deg fsurdat)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1141 (CN spinup test)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Switch cndv for dgvm, Have configure disallow
+bad cases
+ Change configure to NOT allow supln with spinup cases
+ Do NOT allow exit_spinup and ad_spinup at the same time.
+ Only allow voc to be set for seq_ccsm NOT ext_ccsm_seq
+
+Describe any changes made to the namelist: Add override_nsrest namelist option
+
+List any changes to the defaults for the boundary datasets:
+ New pft-physiology files with extra fields for CNDV
+ Add new f05 1850 surfdata file
+ Remove 360x720 files, gx3v5, gx1v5 files
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: slevis, self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, csm_share, mct, and pio
+
+ scripts to scripts4_100306
+ drv to drvseq3_1_19
+ csm_share to share3_100228
+ mct to MCT2_7_0_100228
+ pio to pio1_0_15
+
+List all files eliminated: Remove DGVM files
+
+ D models/lnd/clm/src/biogeochem/DGVMLightMod.F90
+ D models/lnd/clm/src/biogeochem/DGVMReproductionMod.F90
+ D models/lnd/clm/src/biogeochem/DGVMAllocationMod.F90
+ D models/lnd/clm/src/biogeochem/DGVMEcosystemDynMod.F90
+ D models/lnd/clm/src/biogeochem/DGVMKillMod.F90
+ D models/lnd/clm/src/biogeochem/DGVMEstablishmentMod.F90
+ D models/lnd/clm/src/biogeochem/DGVMRestMod.F90
+ D models/lnd/clm/src/biogeochem/DGVMMod.F90
+ D models/lnd/clm/src/biogeochem/DGVMMortalityMod.F90
+ D models/lnd/clm/src/biogeochem/DGVMTurnoverMod.F90
+ D models/lnd/clm/src/biogeochem/DGVMFireMod.F90
+
+List all files added and what they do:
+
+>>>>>>>>>>>>> Add CNDV files
+ A models/lnd/clm/src/biogeochem/CNDVEstablishmentMod.F90
+ A models/lnd/clm/src/biogeochem/CNDVLightMod.F90
+ A models/lnd/clm/src/biogeochem/CNDVMod.F90
+ A models/lnd/clm/src/biogeochem/CNDVEcosystemDynIniMod.F90
+>>>>>>>>>>>>> Add script to convert CAM historical greenhouse gas file to CO2 history
+>>>>>>>>>>>>> file that can be used in datm streams
+ A models/lnd/clm/tools/ncl_scripts/getco2_historical.ncl
+>>>>>>>>>>>>> Add SNICAR_FRC and CNDV config files to test
+ A models/lnd/clm/test/system/config_files/_scsnf_dh
+ A models/lnd/clm/test/system/config_files/_scsnf_dm
+ A models/lnd/clm/test/system/config_files/_scsnf_do
+ A models/lnd/clm/test/system/config_files/17p_cndvsc_dh
+ A models/lnd/clm/test/system/config_files/17p_cndvsc_dm
+ A models/lnd/clm/test/system/config_files/17p_cndvsc_do
+ A models/lnd/clm/test/system/config_files/17p_cndvsc_h
+
+>>>>>>>>>>>>> Add files to describe how to add streams for CO2
+ A models/lnd/clm/doc/UsersGuide/addco2_datm.buildnml.diff
+ A models/lnd/clm/doc/UsersGuide/co2_streams.txt
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>> Document new getco2 script, use correct namelist in getregional script
+ M models/lnd/clm/tools/ncl_scripts/README
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl
+
+>>>>>>>>>>>>>>> Add K configure tests for CNDV, B configure tests for SNICAR_FRC,
+>>>>>>>>>>>>>>> and create_croplunit tests Change maxpft 17 tests to numpft+1
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_do
+ M models/lnd/clm/test/system/config_files/17p_cnnsc_h
+ M models/lnd/clm/test/system/config_files/17p_cnnsc_m
+ M models/lnd/clm/test/system/config_files/17p_cnnsc_o
+ M models/lnd/clm/test/system/config_files/17p_cnsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnsc_do
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnc13sc_do
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_do
+ M models/lnd/clm/test/system/config_files/17p_cnnsc_dh
+ M models/lnd/clm/test/system/config_files/17p_cnnsc_dm
+ M models/lnd/clm/test/system/config_files/17p_cnnsc_do
+ M models/lnd/clm/test/system/config_files/17p_cnnsc_ds
+
+ M models/lnd/clm/test/system/tests_pretag_bluefire
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_jaguar
+ M models/lnd/clm/test/system/tests_posttag_kraken
+ M models/lnd/clm/test/system/tests_posttag_breeze
+ M models/lnd/clm/test/system/tests_pretag_jaguar_nompi
+ M models/lnd/clm/test/system/tests_pretag_bangkok
+ M models/lnd/clm/test/system/tests_posttag_purempi_regression
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+ M models/lnd/clm/test/system/tests_pretag_calgary
+
+ M models/lnd/clm/test/system/input_tests_master - Add B and K tests
+ M models/lnd/clm/test/system/test_driver.sh ----- Update for bluefire changes
+ to scripts
+ M models/lnd/clm/test/system/mknamelist --------- Change start_type to
+ clm_start_type add in nrevsnfile
+ M models/lnd/clm/test/system/README.testnames --- Change K configure
+tests to mean CNDV
+
+>>>>>>>>>>>>>
+ M models/lnd/clm/bld/configure ---- Swap out cndv for dgvm. -bgc cndv turns on
+ both CN AND CNDV cpp tokens.
+ voc part of standalone_test arguments
+ supln and spinup options can't be on at the same
+time
+ exit_spinup and ad_spinup can't be on at the
+same time
+ M models/lnd/clm/bld/queryDefaultXML.pm ---------------- Swap cndv for DGVM
+ M models/lnd/clm/bld/config_files/config_definition.xml - Swap cndv for dgvm
+ voc in standalone_test, maxpft lists valid numbers
+category
+ M models/lnd/clm/bld/build-namelist ----- Change start_type to clm_start_type
+ Don't allow both fndepdyn and fndepdat to e set
+ Work with nrevsn, so not always given
+ M models/lnd/clm/bld/clm.cpl7.template -- Rename start_type to clm_start_type, and
+ let default be "default"
+ M models/lnd/clm/bld/namelist_files/checkdatmfiles.ncl ------ Update mask list
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml - Add override_nsrest,
+ mkghg_bndtvghg, rename start_type to clm_startype
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml - Add
+clm_start_type
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml ---- Remove domain
+files
+ with gx3v5 and gx1v5 masks
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ----- new pft-physiology files for CNDV
+ Use same fndepdat datasets for cndv
+ Remove 360x720 files,
+ files with gx3v5 and gx1v5 masks
+ New 1850 f05 fsurdat file
+ Add mkghg_bndtvghg dataset to point to CAM
+ historical greenhouse dataset
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml ----- Remove
+start_type
+
+ M models/lnd/clm/doc/UsersGuide/tools.xml
+ M models/lnd/clm/doc/UsersGuide/preface.xml
+ M models/lnd/clm/doc/UsersGuide/clm_ug.xml
+ M models/lnd/clm/doc/UsersGuide/adding_files.xml
+ M models/lnd/clm/doc/UsersGuide/appendix.xml
+ M models/lnd/clm/doc/UsersGuide/custom.xml
+ M models/lnd/clm/doc/UsersGuide/single_point.xml
+ M models/lnd/clm/doc/UsersGuide/Makefile
+ M models/lnd/clm/doc/UsersGuide/special_cases.xml
+
+>>>>>>>>>>>>> Add in landuseflux/landuptake, always use hardwire_sla for VOC
+>>>>>>>>>>>>> Set displavegc for CN not just CNDV, new fields for SNICAR_FRC restarts
+ M models/lnd/clm/src/biogeochem/CNSummaryMod.F90 --------- Calculate landuseflux/landuptake
+ M models/lnd/clm/src/biogeochem/CNPhenologyMod.F90 ------- Calculate pftmayexist for CNDV
+ Remove concurrent directives
+ M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90 ------- Always use hardwire_sla
+ Remove DGVM CPP ifdefs, fix for transient problem from Dave
+ loop over soil filter rather than non-lake
+ M models/lnd/clm/src/biogeochem/CNSetValueMod.F90 -------- Set displavegc etc. for CN as well as CNDV
+ Add some CNDV fields
+ M models/lnd/clm/src/main/clmtypeInitMod.F90 ------------- Remove agdd0/agdd5,fnpsn10,
+ initialize landuseflux/landuptake
+ M models/lnd/clm/src/main/CNiniSpecial.F90 --------------- initialize landuseflux/landuptake
+ M models/lnd/clm/src/main/clmtype.F90 -------------------- Swap DGVM vars for CNDV
+ add pftmayexist, landuseflux/landuptake
+ M models/lnd/clm/src/main/histFldsMod.F90 ---------------- Add LAND_USE_FLUX, LAND_UPTAKE, make HTOP active
+ Swap DGVM fields for CNDV
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 ---- Add fields needed for SNICAR_FRC
+
+ M models/lnd/clm/src/biogeochem/CNCStateUpdate2Mod.F90 --- Remove uneeded use statement
+ M models/lnd/clm/src/biogeochem/CNGapMortalityMod.F90 ---- CNDV changes
+ M models/lnd/clm/src/biogeochem/CNC13StateUpdate2Mod.F90 - Remove junk
+ M models/lnd/clm/src/biogeochem/CNGRespMod.F90 ----------- Remove junk
+ M models/lnd/clm/src/biogeochem/CNNStateUpdate1Mod.F90 --- Remove junk
+ M models/lnd/clm/src/biogeochem/CNBalanceCheckMod.F90 ----
+ M models/lnd/clm/src/biogeochem/CNNStateUpdate3Mod.F90 ---
+ M models/lnd/clm/src/biogeochem/CNFireMod.F90 ------------ CNDV section
+ M models/lnd/clm/src/biogeochem/CNMRespMod.F90 ----------- Remove junk
+ M models/lnd/clm/src/biogeochem/CNPrecisionControlMod.F90 Remove junk
+ M models/lnd/clm/src/biogeochem/CNDecompMod.F90 ---------- Pass lbp, ubp to CNAllocate
+ M models/lnd/clm/src/biogeochem/CNCStateUpdate1Mod.F90 --- Formatting changes
+ M models/lnd/clm/src/biogeochem/STATICEcosysDynMod.F90 --- Swap CNDV for DGVM
+ M models/lnd/clm/src/biogeochem/CNCStateUpdate3Mod.F90 --- Remove junk
+ M models/lnd/clm/src/biogeochem/CNC13StateUpdate1Mod.F90 - Remove junk
+ M models/lnd/clm/src/biogeochem/CNC13StateUpdate3Mod.F90 - Remove junk
+ M models/lnd/clm/src/biogeochem/CNrestMod.F90 ------------ Add CNDV section
+ M models/lnd/clm/src/biogeochem/CNAnnualUpdateMod.F90 ---- Add CNDV section
+ M models/lnd/clm/src/biogeochem/CNNStateUpdate2Mod.F90 --- Remove junk
+ M models/lnd/clm/src/biogeochem/C13SummaryMod.F90 -------- Remove junk
+ M models/lnd/clm/src/biogeochem/CNNDynamicsMod.F90 ------- Remove junk
+ M models/lnd/clm/src/biogeochem/CNAllocationMod.F90 ------ Pass pft loop indices in,
+ formatting changes remove junk
+ M models/lnd/clm/src/biogeochem/CNC13FluxMod.F90 --------- Remove junk
+ M models/lnd/clm/src/biogeochem/DryDepVelocity.F90 ------- Swap CNDV for DGVM
+ M models/lnd/clm/src/biogeochem/CNEcosystemDynMod.F90 ---- Pass lbp, ubp to CNDecompAlloc
+ M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90 - Use dwood from pft-physiology file
+ Add CNDV section
+
+ M models/lnd/clm/src/main/clm_varpar.F90 --------- Change some of the names around
+ M models/lnd/clm/src/main/CNiniTimeVar.F90 ------- CNDV section for litter fall
+ M models/lnd/clm/src/main/clm_comp.F90 ----------- Swap CNDV for DGVM
+ M models/lnd/clm/src/main/clm_initializeMod.F90 -- Swap CNDV for DGVM
+ M models/lnd/clm/src/main/accFldsMod.F90 --------- Swap CNDV for DGVM
+ M models/lnd/clm/src/main/subgridMod.F90 --------- Handle create_croplandunit correctly
+ M models/lnd/clm/src/main/pftdynMod.F90 ---------- Add CNDV subroutine: pftwt_init
+ For CNDV make pftwt_interp public
+and
+ M models/lnd/clm/src/main/iniTimeConst.F90 ------- Change dgvm vars init
+ M models/lnd/clm/src/main/restFileMod.F90 -------- Remove DGVM
+ M models/lnd/clm/src/main/controlMod.F90 --------- Add override_nsrest, swap CNDV for DGVM
+ M models/lnd/clm/src/main/initSurfAlbMod.F90 ----- Swap CNDV for DGVM
+ M models/lnd/clm/src/main/filterMod.F90 ---------- Swap CNDV for DGVM remove concurrent directives
+ M models/lnd/clm/src/main/clm_driver.F90 --------- Swap CNDV for DGVM
+ M models/lnd/clm/src/main/clm_varctl.F90 --------- Swap CNDV for DGVM, fix check for create_croplandunit
+ M models/lnd/clm/src/main/ndepFileMod.F90 -------- Remove junk
+ M models/lnd/clm/src/main/initGridCellsMod.F90 --- Fix create_croplandunit
+ M models/lnd/clm/src/main/pftvarcon.F90 ---------- New CNDV parameters, formatting changes,
+ Always read all parameters to make read easier to understand
+ M models/lnd/clm/src/main/surfrdMod.F90 ---------- Swap CNDV for DGVMA, fix create_croplandunit, change some names
+
+ M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90 ----- Remove junk
+ M models/lnd/clm/src/biogeophys/SurfaceRadiationMod.F90 - Remove junk add comments
+ M models/lnd/clm/src/biogeophys/Biogeophysics1Mod.F90 --- Remove junk
+ M models/lnd/clm/src/biogeophys/FrictionVelocityMod.F90 - Change comments remove DGVM
+ M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90 ---- Change formatting add comments
+ M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 ------- Remove DGVM
+ M models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90 - Remove junk
+ M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90 ----- Remove DGVM add in CNDV
+
+Summary of testing:
+
+ bluefire:
+FAIL! rc= 7
+025 smK51 TSM.sh 17p_cndvsc_dh clm_std 19981231:NONE:1800 10x15 USGS 48 arb_ic ..................PASS
+026 erK51 TER.sh 17p_cndvsc_dh clm_std 19981231:NONE:1800 10x15 USGS 10+38 arb_ic ...............PASS
+027 brK51 TBR.sh 17p_cndvsc_dh clm_std 19981231:NONE:1800 10x15 USGS 24+24 arb_ic ...............PASS
+028 blK51 TBL.sh 17p_cndvsc_dh clm_std 19981231:NONE:1800 10x15 USGS 48 arb_ic ..................FAIL! rc= 5
+029 smHN1 TSM.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -10 colPASS
+030 erHN1 TER.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -3+-7 cFAIL! rc= 13
+031 brHN1 TBR.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -5+-5 cFAIL! rc= 11
+032 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -10 colFAIL! rc= 7
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+ bluefire interactive testing: All PASS except...
+006 smCA4 TSM.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 10
+007 erCA4 TER.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ....FAIL! rc= 5
+008 brCA4 TBR.sh _nrsc_ds clm_std^nl_urb_br 20021001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic .FAIL! rc= 5
+009 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 4
+010 smNB4 TSM.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 10
+011 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+012 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+013 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 4
+018 smCA8 TSM.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 10
+019 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 4
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+032 blF93 TBL.sh 17p_vodsrsc_do clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v7 48 cold .............FAIL! rc= 5
+036 blL83 TBL.sh _nrsc_do clm_std^nl_urb 20020115:NONE:3600 5x5_amazon navy -10 arb_ic ..........FAIL! rc= 7
+040 blK74 TBL.sh 17p_cndvsc_s clm_std 19971231:NONE:1800 1x1_brazil navy -670 arb_ic ............FAIL! rc= 5
+ bluefire/CCSM testing: All PASS except.. (compare tests fail because CO2 level was changed for 2000 compsets)
+FAIL SMS_RLA.f45_f45.I.bluefire.compare_hist.clm3_7_05
+FAIL SMS_RLA.f45_f45.I.bluefire.compare.clm3_7_05
+FAIL SMS_RLB.f45_f45.I.bluefire.compare_hist.clm3_7_05
+FAIL SMS_RLB.f45_f45.I.bluefire.compare.clm3_7_05
+BFAIL ERS_D.f45_g37.I.bluefire.compare.clm3_7_05
+FAIL ERB.f09_g16.I_1948-2004.bluefire.compare.clm3_7_05
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm3_7_06
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_7_05
+BFAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm3_7_05
+FAIL PST.f10_f10.I8520CN.bluefire.compare.clm3_7_05
+BFAIL PET_PT.f10_f10.I8520CN.bluefire.compare.clm3_7_05
+BFAIL ERS_D.f19_g16.IRCP85CN.bluefire.compare.clm3_7_05
+ jaguar interactive testing: All PASS except...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_05
+
+Changes answers relative to baseline: I2000 cases are different because of new CCSM_CO2_PPMV
+
+===============================================================
+===============================================================
+Tag name: clm3_7_05
+Originator(s): erik (erik)
+Date: Wed Feb 24 00:33:08 MST 2010
+One-line Summary: Bring VOC branch source code to trunk
+
+Purpose of changes:
+
+Move VOC branch over to trunk (vocemis-drydep19_clm3_7_04), this includes source code
+changes for VOC and drydep. Ensure answers for f09, f19, f10 are identical to clm3_7_02
+(other than VOC fields). Split users guide into separate files by chapter. Remove dublin.
+Add rcp option to getregional dataset script.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1104 (Restart problem with pftdyn mode)
+ 1118 (Restarts with SNICAR_FRC fail)
+ 1121 (history variable attribute cell_methods misnamed)
+ 1132 (clm failure for BMOZ compset)
+ 1133 (missing deallocate)
+ 1135 (miss smG45 test)
+ 1136 (line length for NEE in histFlds)
+ 1137 (qtr-deg fsurdat)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1141 (CN spinup test)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ Fix date of 1x1_tropicAtl surfdata
+ New T62 fsurdat file with VOC
+ Fix syntax error in default_datm file
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts and drv
+
+ scripts to scripts4_100222 (uses new bluefire compiler with bug-fix for
+ reproducibility bug)
+ drv to drvseq3_1_17
+
+List all files eliminated:
+
+>>>>>>>>>>>>> Rename to edinburgh
+ D models/lnd/clm/test/system/tests_pretag_dublin
+ D models/lnd/clm/test/system/tests_pretag_dublin_nompi
+
+>>>>>>>>>>>>> Split into separate files
+ D models/lnd/clm/doc/UsersGuide/index.xml
+
+List all files added and what they do:
+
+ A models/lnd/clm/test/system/tests_pretag_edinburgh ------- Rename dublin files
+ A models/lnd/clm/test/system/tests_pretag_edinburgh_nompi - Rename dublin files
+
+>>>>>>>>>>>>> Split Users Guide into separate files by chapter
+ A models/lnd/clm/doc/UsersGuide/tools.xml
+ A models/lnd/clm/doc/UsersGuide/preface.xml
+ A models/lnd/clm/doc/UsersGuide/clm_ug.xml
+ A models/lnd/clm/doc/UsersGuide/adding_files.xml
+ A models/lnd/clm/doc/UsersGuide/config_cache.xml
+ A models/lnd/clm/doc/UsersGuide/custom.xml
+ A models/lnd/clm/doc/UsersGuide/get_Icaselist.pl --- Script to list I cases
+ A models/lnd/clm/doc/UsersGuide/single_point.xml
+ A models/lnd/clm/doc/UsersGuide/special_cases.xml
+
+>>>>>>>>>>>>> Add module to handle dry-deposition velocity
+ A models/lnd/clm/src/biogeochem/DryDepVelocity.F90
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>> Remove dublin
+ M models/lnd/clm/test/system/test_driver.sh
+ M models/lnd/clm/test/system/input_tests_master - Decrease mexicoCity run length to 157
+ M models/lnd/clm/test/system/CLM_runcmnd.sh
+
+>>>>>>>>>>>>> Fix bug in dynamic PFT file generation example, let intel allow lines of
+>>>>>>>>>>>>> any length
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pftdyn - Use 1850 veg file to start from
+ M models/lnd/clm/tools/mksurfdata/Makefile ---------- Remove intel -132 so can be any
+length
+ M models/lnd/clm/tools/mkgriddata/Makefile ---------- Remove intel -132 so can be any
+ length, add SMP option
+ M models/lnd/clm/tools/mkdatadomain/Makefile -------- Remove intel -132 so can be any
+length
+
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.pl -- Add absolute_path and
+ ability to use rcp
+ M models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl - Add rcp
+ correct namelist for domainfile
+
+>>>>>>>>>>>>>
+ M models/lnd/clm/bld/clm.cpl7.template ---- Turn rtm off for PTS_MODE, remove lnd_in
+and Filepath files from clmconf
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml - Fix syntax error in
+2.5x3.33 domain file
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -- Fix date of
+surfdata_1x1_tropicAtl file
+
+>>>>>>>>>>>>> Split into separate files by Chapter
+ M models/lnd/clm/doc/UsersGuide/Makefile
+
+>>>>>>>>>>>>> Source code changes to use MEGAN VOC and dry-deposition
+
+ M models/lnd/clm/src/biogeochem/STATICEcosysDynMod.F90 -- Add readAnnualVegetation
+ subroutine for dry-deposition, use some F90 NetCDF,
+ get mlai difference between months for dry-deposition
+ M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90 ------ New MEGAN VOC module
+ remove concurrent directives
+ M models/lnd/clm/src/main/clm_varpar.F90 ---------------- Change comment on nvoc
+ M models/lnd/clm/src/main/clm_comp.F90 ------------------ Interp monthly veg for
+ drydep on clm_init2
+ M models/lnd/clm/src/main/clm_initializeMod.F90 --------- add readAnnualVegetation
+ M models/lnd/clm/src/main/accFldsMod.F90 ---------------- 24hr and 10day accumulators for
+ t_veg, fsd, fsi, fsun, laip, remove concurrent directives
+ M models/lnd/clm/src/main/clmtypeInitMod.F90 ------------ Add new VOC and drydep variables
+ M models/lnd/clm/src/main/iniTimeConst.F90 -------------- Read in VOC emission
+ factors, remove concurrent directives
+ M models/lnd/clm/src/main/clm_atmlnd.F90 ---------------- Add VOC and drydep fluxes
+ M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90 ------ Pass VOC and drydep fluxes
+ M models/lnd/clm/src/main/clm_driver.F90 ---------------- Always call VOC emission
+ and call depvel_compute
+ M models/lnd/clm/src/main/clmtype.F90 ------------------- Add some VOC and drydep
+ variables, move sandfrac/clayfrac for all not just CASA
+ add accumulation variables
+ M models/lnd/clm/src/main/histFldsMod.F90 --------------- Bunch of new inactive
+ variables for VOC fluxes
+ M models/lnd/clm/src/main/inicFileMod.F90 --------------- Li Xu: correct ncd_iolocal and snow_fraction
+ M models/lnd/clm/src/biogeophys/FrictionVelocityMod.F90 - Deal with drydep velocity
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 --- Add mlaidiff to restart
+ file, if fsun set to NaN on restart set it to spval
+ M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90 ----- Keep track of boundary
+ layer resistance
+
+Summary of testing:
+
+ Baseline tests are identical other than VOC flux fields..
+> grep RMS
+> /ptmp/erik/test-driver.612049/TBL.4p_vodsrsc_dh.clm_std^nl_urb.20021231:NONE:3600.1.9x2.5^0.9x1.25.gx1v6.48.arb_ic/cprnc.clmrun.clm2.h0.2002-12-31-00000.nc.out
+> | grep -v 0.0000E+00
+ RMS BIOGENCO 1.0058E-01
+ RMS ISOPRENE 2.9500E+02
+ RMS MONOTERP 2.9129E+00
+ RMS ORVOC 3.3526E-01
+ RMS OVOC 3.3526E-01
+ RMS VOCFLXT 2.9641E+02
+
+ bluefire:
+004 blA91 TBL.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 7
+011 blE91 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 arb_ic ............FAIL! rc= 7
+016 blF92 TBL.sh 17p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 cold .............FAIL! rc= 7
+020 blEH1 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic FAIL! rc= 7
+024 blHN1 TBL.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -10 colFAIL! rc= 7
+025 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+026 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+027 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+028 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+029 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+033 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic .........FAIL! rc= 7
+041 blH52 TBL.sh 17p_cnnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 48 cold .........FAIL! rc= 7
+043 smLI1 TSM.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 8
+044 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+045 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+046 blLI1 TBL.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+050 blJ61 TBL.sh 4p_casasc_dh clm_std^nl_urb 20021230:NONE:1800 1.9x2.5 gx1v6 48 cold ...........FAIL! rc= 7
+ bluefire interactive testing:
+004 blA74 TBL.sh _nrsc_ds clm_std^nl_urb 20030101:NONE:1800 1x1_brazil navy -10 arb_ic ..........FAIL! rc= 7
+006 smCA4 TSM.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 10
+007 erCA4 TER.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ....FAIL! rc= 5
+008 brCA4 TBR.sh _nrsc_ds clm_std^nl_urb_br 20021001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic .FAIL! rc= 5
+009 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 4
+010 smNB4 TSM.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 10
+011 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+012 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+013 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 4
+017 blJ74 TBL.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic FAIL! rc= 7
+018 smCA8 TSM.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 10
+019 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 4
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+023 blAK4 TBL.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -10 cold ............FAIL! rc= 7
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+028 blL78 TBL.sh _nrsc_s clm_std^nl_urb 20021231:NONE:1800 1x1_brazil navy -10 arb_ic ...........FAIL! rc= 7
+032 blF93 TBL.sh 17p_vodsrsc_do clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 cold .............FAIL! rc= 7
+036 blL83 TBL.sh _nrsc_do clm_std^nl_urb 20020115:NONE:3600 5x5_amazon navy -10 arb_ic ..........FAIL! rc= 7
+041 bl744 TBLtools.sh mksurfdata tools__s namelist ..............................................FAIL! rc= 7
+043 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 7
+045 bl754 TBLtools.sh mksurfdata tools__s globalirrig ...........................................FAIL! rc= 6
+ bluefire/CCSM testing:
+PASS SMS_RLA.f45_f45.I.bluefire
+PASS SMS_RLB.f45_f45.I.bluefire
+PASS SMS_ROA.f45_f45.I.bluefire
+RUN ERS_D.f45_g37.I.bluefire.GC.092123
+PASS PST.f45_g37.I1850.bluefire.cpl
+PASS PST.f45_g37.I1850.bluefire.atm
+PASS PST.f45_g37.I1850.bluefire.lnd
+PASS PST.f45_g37.I1850.bluefire.ice
+PASS PST.f45_g37.I1850.bluefire.ocn
+PASS PST.f45_g37.I1850.bluefire.glc
+PASS PET_PT.f45_g37.I1850.bluefire
+PASS ERS.f19_g16.I1850.bluefire
+PASS ERB.f09_g16.I_1948-2004.bluefire
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+PEND ERH_D.f10_f10.I1850CN.bluefire.GC.092123
+PASS PST.f10_f10.I8520CN.bluefire.cpl
+PASS PST.f10_f10.I8520CN.bluefire.atm
+PASS PST.f10_f10.I8520CN.bluefire.lnd
+PASS PST.f10_f10.I8520CN.bluefire.ice
+PASS PST.f10_f10.I8520CN.bluefire.ocn
+PASS PST.f10_f10.I8520CN.bluefire.glc
+ jaguar/CCSM testing:
+PASS ERS_D.f09_g16.I1850.jaguar
+PASS PST.f10_f10.I8520CN.jaguar.cpl
+PASS PST.f10_f10.I8520CN.jaguar.atm
+PASS PST.f10_f10.I8520CN.jaguar.lnd
+PASS PST.f10_f10.I8520CN.jaguar.ice
+PASS PST.f10_f10.I8520CN.jaguar.ocn
+PASS PST.f10_f10.I8520CN.jaguar.glc
+PASS PET_PT.f10_f10.I8520CN.jaguar
+ jaguar interactive testing:
+004 blA74 TBL.sh _nrsc_ds clm_std^nl_urb 20030101:NONE:1800 1x1_brazil navy -10 arb_ic ..........FAIL! rc= 7
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+008 blAK4 TBL.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -10 cold ............FAIL! rc= 7
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+012 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+013 smJ74 TSM.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -1100 arb_ic FAIL! rc= 8
+014 erJ74 TER.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -10+-10 arb_ic FAIL! rc= 5
+015 brJ74 TBR.sh 4p_nrcasasc_ds clm_std^nl_urb_br 10001230:NONE:3600 1x1_tropicAtl test -3+-3 arb_icFAIL! rc= 5
+016 blJ74 TBL.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic FAIL! rc= 4
+ edinburgh/ifort interactive testing:
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+009 smCA4 TSM.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 10
+010 erCA4 TER.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ....FAIL! rc= 5
+011 brCA4 TBR.sh _nrsc_ds clm_std^nl_urb_br 20021001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic .FAIL! rc= 5
+012 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 4
+013 smOC4 TSM.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 10
+014 erOC4 TER.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 115+115 arb_FAIL! rc= 5
+015 brOC4 TBR.sh _nrvansc_ds clm_urb1pt^nl_urb_br 19920812:NONE:3600 1x1_vancouverCAN navy 72+72 arbFAIL! rc= 5
+016 blOC4 TBL.sh _nrvansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 4
+017 smNB4 TSM.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 10
+018 erNB4 TER.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 57+100 arb_FAIL! rc= 5
+019 brNB4 TBR.sh _nrmexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arFAIL! rc= 5
+020 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 157 arb_ic FAIL! rc= 4
+ edinburgh/ifort:
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+012 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+016 blE91 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 arb_ic ............FAIL! rc= 7
+ breeze,gale,hail,gust/ifort interactive testing:
+004 blA74 TBL.sh _nrsc_ds clm_std^nl_urb 20030101:NONE:1800 1x1_brazil navy -10 arb_ic ..........FAIL! rc= 7
+008 smCA4 TSM.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 10
+009 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 4
+010 smCA8 TSM.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 10
+011 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 4
+015 blL54 TBL.sh _sc_ds clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 48 arb_ic ..................FAIL! rc= 7
+019 blR53 TBL.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@1850 48 cold .......FAIL! rc= 7
+020 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+021 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+022 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+023 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_04
+
+Changes answers relative to baseline: No bit-for-bit (except voc flux fields)
+
+===============================================================
+===============================================================
+Tag name: clm3_7_04
+Originator(s): erik (erik)
+Date: Wed Feb 17 23:22:23 MST 2010
+One-line Summary: Bring VOC branch (vocemis-drydep18_clm3_7_03) tools, testing, and build to trunk (everything other than VOC code changes)
+
+Purpose of changes:
+
+Move VOC branch (vocemis-drydep18_clm3_7_03) to trunk for support functionality, tools,
+testing, everything but the code changes. This includes optimization of mksurfdata,
+adding new tests, adding drydep to build, listen to cpl flag if aerosols are sent, update
+documentation about configure variables, remove pio and carbon_aero config options
+(always build with pio), remove local Macro's files for Darwin (yong_g95, and
+breeze_intel), and remove gx1v3, gx1v4 masks, and 1x1.25 and 2x2.5 grids. All NCL regrid
+scripts to be able to use GRDFIL env variable to set location of a grid file just
+created. turn rtm off if PTS_MODE is TRUE. mksurfdata optimization includes: create
+subroutines for landuse normalization, add OpenMP parallelism, optimize memory so
+deallocate when done, and put OMP threads, veg filenames and optimization level on
+mksurfdata files. Move shr_drydepInputMod.F90 to drv/shr/seq_drydepMod.F90 (from
+csm_share to drv). Update externals. Work on documentation using output logs from scripts
+and moving documentation into separate chapters.
+
+Bugs fixed (include bugzilla ID):
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 1105 (Turn RTM mode off for PTS_MODE)
+ 1110 (dt limit error, for mexicocity)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1104 (Restart problem with pftdyn mode)
+ 1121 (history variable attribute cell_methods misnamed)
+ 1118 (Restarts with SNICAR_FRC fail)
+ 1133 (missing deallocate)
+ 1135 (miss smG45 test)
+ 1136 (line length for NEE in histFlds)
+ 1137 (qtr-deg fsurdat)
+ 1139 (LAND and PFTS 1D vector averaging doesn't work)
+ 1141 (CN spinup test)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ 1157 (Problem with VOC interpolation in mksurfdata)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system:
+ Remove carbon_aero and pio from configure (always build with pio)
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ Remove 1x1.25, 2x2.5, and 2.5x3.33 grid resolutions
+ Remove gx1v3, gx1v4, gx1v5 land masks, add drydep defaults.
+ Update 2.65x3.33@2000, 1x1_tropicAtl@2000, f09@1850 and 1x1_tropicAtl@1000-1004 pftdyn
+ add mksrf_fvegtyp@1000-1004
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, csm_share, drv, datm, and timing
+
+ csm_share to share3_100215
+ scripts to scripts4_100216
+ drv to drvseq3_1_16
+ datm to datm8_100215
+ timing to timing_091021
+
+List all files eliminated:
+
+>>>>>>>>>>>> Remove local Macros files, and remove maxpft=4 vodsrcsc tests
+ D models/lnd/clm/bld/config_files/Macros.yong_g95
+ D models/lnd/clm/bld/config_files/Macros.breeze_intel
+ D models/lnd/clm/test/system/config_files/4p_vodsrsc_m
+ D models/lnd/clm/test/system/config_files/4p_vodsrsc_h
+
+ D models/lnd/clm/doc/UsersGuide/index.xml -- Rename to clm_ug.xml
+ Divide most of the content into separate chapters.
+
+List all files added and what they do:
+
+ A models/lnd/clm/test/system/config_files/_scnv_dh ---- No-VOC debug-hybrid mode
+ A models/lnd/clm/test/system/config_files/_scnv_do ---- No-VOC debug-pure SMP hybrid mode
+ A models/lnd/clm/test/system/config_files/17p_scnv_dh - 17pft no-VOC debug-hybrid mode
+ A models/lnd/clm/test/system/config_files/17p_scnv_do - 17pft no-VOC debug-pure SMP mode
+ A models/lnd/clm/test/system/nl_files/clm_drydep ------ Turn on drydep in namelist
+ A models/lnd/clm/tools/mksurfdata/mkvocef.F90 --------- VOC emissions
+ A models/lnd/clm/bld/namelist_files/namelist_defaults_drydep.xml - Drydep namelist defaults
+ A models/lnd/clm/doc/UsersGuide/tools.xml --------- Tools chapter
+ A models/lnd/clm/doc/UsersGuide/preface.xml ------- Preface and introduction chapter
+ A models/lnd/clm/doc/UsersGuide/clm_ug.xml -------- Change name of index.xml
+ Move most of the contents into separate chapters
+ A models/lnd/clm/doc/UsersGuide/adding_files.xml -- Adding files chapter
+ A models/lnd/clm/doc/UsersGuide/config_cache.xml -- Sample config cache file so can
+ run build-namelist for documentation
+ A models/lnd/clm/doc/UsersGuide/custom.xml -------- Customizing chapter
+ A models/lnd/clm/doc/UsersGuide/get_Icaselist.pl -- Script to get list of I cases
+ A models/lnd/clm/doc/UsersGuide/single_point.xml -- Single point chapter
+ A models/lnd/clm/doc/UsersGuide/special_cases.xml - Special cases chapter
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/config_files/README - Add nv no-VOC config case
+ M models/lnd/clm/test/system/README.testnames ---- Add drydep testnames
+ M models/lnd/clm/test/system/test_driver.sh ------ Use generic_linux_intel mach for breeze
+ also changes to get jaguar to work
+ M models/lnd/clm/test/system/input_tests_master -- Add drydep and no-VOC tests, cut
+ back Mexicocity test to 158 steps
+
+ M models/lnd/clm/tools/mksurfdata/mkvarctl.F90 -------- Add mksrf_gridnm and mksrf_fvocef
+ M models/lnd/clm/tools/mksurfdata/ncdio.F90 ----------- Write out error codes on a problem.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.regional - Add voc file.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pftdyn --- Add voc file, use 1850 veg file.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.singlept - Add voc file.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.namelist - Add voc file.
+ M models/lnd/clm/tools/mksurfdata/mkharvestMod.F90 ---- Move file definition for harvest to init sub
+ M models/lnd/clm/tools/mksurfdata/mkfileMod.F90 ------- Write out OpenMP threads
+ and OPT TRUE or FALSE, VOC fields, and veg filenames
+ M models/lnd/clm/tools/mksurfdata/mkorganic.F90 ------- Remove test and use 3D areaave
+ M models/lnd/clm/tools/mksurfdata/Makefile ------------ Add in SMP option to turn on OpenMP
+ M models/lnd/clm/tools/mksurfdata/mkurbanparMod.F90 --- Remove single level fields
+ and use 4D areaave
+ M models/lnd/clm/tools/mksurfdata/areaMod.F90 --------- Add 3D and 4D areaave interfaces
+ and add OpenMP directives
+ M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 -------- Create subroutines shared
+ by dynpft loop and surfdata: change_landuse and normalizencheck_landuse.
+ Allocate memory as late as possible and deallocate as soon as possible.
+ Add: mksrf_gridnm and mksrf_fvocef to namelist
+ Add mkvocef and add to output file
+ M models/lnd/clm/tools/mksurfdata/Srcfiles ------------ Add mkvocef.F90
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl ------- Allow command line arguments
+ to pick resolutions and simulation-years. Read in namelist database information
+ for checking and using defaults. Add in mksrf_fvocef, and use mksrf_gridnm to
+ give output file same name as the input grid resolution name.
+ M models/lnd/clm/tools/mksurfdata/mkpftMod.F90 -------- Remove testing, add 3D areaave
+
+ M models/lnd/clm/tools/ncl_scripts/README ----------- Remove script no longer available.
+ M models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl - Allow env var GRDFIL to give
+ grid file to use
+ M models/lnd/clm/tools/ncl_scripts/ndepregrid.ncl --- Allow env var GRDFIL to give
+ grid file to use
+
+ M models/lnd/clm/tools/mkgriddata/README -- Fix typo
+
+ M models/lnd/clm/bld/configure --------------------------------- Remove carbon_aero
+ and pio (always build with pio), and make voc default on
+ M models/lnd/clm/bld/queryDefaultNamelist.pl ------------------- Add drydep file.
+ M models/lnd/clm/bld/config_files/config_sys_defaults.xml ------ Change mach
+ defaults, remove darwin make linux edinburgh_pgi
+ M models/lnd/clm/bld/config_files/config_definition.xml -------- Remove carbon_aero
+ and pio, make default for mode ext_ccsm_seq, make voc on,
+ work on documentation
+ M models/lnd/clm/bld/build-namelist ---------------------------- Add drydep, remove
+ carbon_aero and pio
+ M models/lnd/clm/bld/clm.cpl7.template ------------------------- Turn rtm off for PTS_MODE
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml ---- Add mksrf_fvegtyp,
+ remove 1x1.25, 2x2.5, and 2.5x3.33 grid resolutions
+ remove gx1v3, gx1v4, gx1v5 land masks, add drydep_method, and drydep_list
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml - Remove 1x1.25,
+ 2x2.5, and gx1v3, gx1v4, gx1v5 domain files
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -- Remove 1x1.25,
+ and gx1v3, gx1v4, gx1v5 files
+ update 2.65x3.33@2000, 1x1_tropicAtl@2000, f09@1850
+ and 1x1_tropicAtl@1000-1004 pftdyn
+ add mksrf_fvegtyp@1000-1004
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml -- Remove gx1v4, gx1v5 start dates
+
+ M models/lnd/clm/doc/UsersGuide/Makefile --- Get makefile to use scripts to make
+ log info to put into document, and separate out document
+ into chapters
+
+ M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90 --- Use atm_aero in infodata
+ to determine: caerdep_filled, and dustdep_filled
+ some changes to get ready for VOC and drydep branch to come to trunk
+
+Summary of testing:
+
+ bluefire: All PASS up to 019 brEH1
+ bluefire interactive testing: All PASS except...
+004 blJ74 TBL.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -100
+arb_ic FAIL! rc= 5
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+ bluefire/CCSM testing:
+PASS SMS_RLB.f45_f45.I.bluefire
+PASS ERH_D.f10_f10.I1850CN.bluefire
+ jaguar interactive testing: All PASS except...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+ edinburgh/ifort: All PASS except...
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+012 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+ breeze,gale,hail,gust/ifort interactive testing: All PASS except...
+020 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+021 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+022 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+023 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_03
+
+Changes answers relative to baseline: 1x1_tropicAtl changes due to new surfdata file
+
+===============================================================
+===============================================================
+Tag name: clm3_7_03
+Originator(s): erik (erik)
+Date: Wed Feb 10 11:29:56 MST 2010
+One-line Summary: Add in more future scenario datasets, new history fields from Keith
+
+Purpose of changes:
+
+Add in pftdyn dataset for 1-degree rcp-8.5. Add in interpolated aerdep/ndepdyn scenario
+files for f10, f09. Add in code change from Keith O. for average of top soil layers. Add
+in rcp for ndep and aerdep regrid scripts. Allow 2000-2100 sim_year_range for 1-degree
+resolution, and have a aerdep dataset for 1-degree for 2000-2100 (copy other 1850-2100
+datasets). There is also a 1-degree 1850-2100 aerosol dataset, but the file is large (~9GB).
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1104 (Restart problem with pftdyn mode)
+ 1118 (Restarts with SNICAR_FRC fail)
+ 1121 (history variable attribute cell_methods misnamed)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist:
+ Add 2000-2100 simulation year range as allowed option
+
+ Two new history fields:
+
+ TSOI_10CM = soil temperature in top 10cm of soil (K)
+ SOILWATER_10CM = soil liquid water + ice in top 10cm of soil (kg/m2)
+
+List any changes to the defaults for the boundary datasets: New datasets for rcp=8.5
+ f09, rcp=8.5 pftdyn for 1850-200 (use same file for 2000-2100)
+ faerdep, for 1850-2100 rcp=8.5, f10, f45, f25, f09 (and 2000-2100) and f19 for 2000-2100
+ fndepdyn, for 1850-2000 rcp=8.5 f09 (use same file for 2000-2100), f25, f45, f10
+ fndepdyn for 1850-2100 rcp=2.6 for native f19
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, history changes from Keith Oleson, reviewed by Dave Lawrenece
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: use-case for 2000-2100 for high resolution (1 degree and up)
+
+>>>>>>>>>> Add a use-case for a future scenario that only includes 2000-2100
+>>>>>>>>>> this is for 1-degree and higher resolution where we can't include the
+>>>>>>>>>> historical period and have resonable sized files.
+ A models/lnd/clm/bld/namelist_files/use_cases/2000-2100_rcp8.5_transient.xml
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>> Get regridding scripts working for rcp's
+ M models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl - Allow rcp to be set, more printing
+ M models/lnd/clm/tools/ncl_scripts/ndepregrid.ncl --- Allow rcp to be set, allow more sim_year_ranges
+
+>>>>>>>>>> New rcp=8.5 datasets, and 2000-2100 support for 1-degree
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - New rcp=8.5 datasets
+ f09, rcp=8.5 pftdyn for 1850-200 (use same file for 2000-2100)
+ faerdep, for 1850-2100 rcp=8.5, f10, f45, f25, f09 (and 2000-2100) and f19 for 2000-2100
+ fndepdyn, for 1850-2000 rcp=8.5 f09 (use same file for 2000-2100), f25, f45, f10
+ fndepdyn for 1850-2100 rcp=2.6 for native f19
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --- Allow 2000-2100 sim-year range
+
+ M models/lnd/clm/doc/UsersGuide/index.xml --- Update documentation with namelist examples
+
+>>>>>>>>>> Code changes from Keith Oleson to add 10cm soil temperature and soil water history variables.
+ M models/lnd/clm/src/main/clmtypeInitMod.F90 ------ Add h2osoi_liqice_10cm and t_soi_10cm
+ M models/lnd/clm/src/main/clmtype.F90 ------------- Add h2osoi_liqice_10cm and t_soi_10cm
+ M models/lnd/clm/src/main/histFldsMod.F90 --------- Add TSOI_10CM and SOILWATER_10CM,
+ on by default and output as average by default.
+ M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 - Calculate 10cm soil averages for non-urban points
+
+Summary of testing:
+
+ bluefire: All PASS except...
+022 erHN1 TER.sh 17p_cnsc_dh clm_transient_rcp8.5 20051220:NONE:1800 1.9x2.5 gx1v6@1850-2100 -3+-7 cFAIL! rc= 7
+025 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+026 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+027 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+028 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+029 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+ bluefire interactive testing: All PASS except...
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+034 erL83 TER.sh _nrsc_do clm_std^nl_urb 20020115:NONE:3600 5x5_amazon navy -5+-5 arb_ic ........FAIL! rc= 6
+035 brL83 TBR.sh _nrsc_do clm_std^nl_urb_br 20020115:NONE:3600 5x5_amazon navy -10+-10 arb_ic ...FAIL! rc= 3
+036 blL83 TBL.sh _nrsc_do clm_std^nl_urb 20020115:NONE:3600 5x5_amazon navy -10 arb_ic ..........FAIL! rc= 3
+ bluefire/CCSM testing:
+PASS SMS_RLA.f45_f45.I.bluefire
+PASS SMS_RLA.f45_f45.I.bluefire.compare_hist.clm3_7_02
+PASS SMS_RLA.f45_f45.I.bluefire.compare.clm3_7_02
+PEND SMS_RLB.f45_f45.I.bluefire.GC.140232
+PEND SMS_ROA.f45_f45.I.bluefire.GC.140232
+PASS ERS_D.f45_g37.I.bluefire
+PASS ERS_D.f45_g37.I.bluefire.compare_hist.clm3_7_02
+PASS ERS_D.f45_g37.I.bluefire.compare.clm3_7_02
+PEND PET.f45_g37.I1850.bluefire.GC.140232
+PEND ERS.f19_g16.I1850.bluefire.GC.140232
+RUN ERB.f09_g16.I_1948-2004.bluefire.GC.140232
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_7_02
+PASS ERH_D.f10_f10.I1850CN.bluefire
+PASS ERH_D.f10_f10.I1850CN.bluefire.compare.clm3_7_02
+PEND PET.f10_f10.I8520CN.bluefire.GC.140232
+FAIL ERS_D.f19_g16.I8521CNR85.bluefire
+BFAIL ERS_D.f19_g16.I8521CNR85.bluefire.compare.clm3_7_02
+ jaguar interactive testing: All PASS except...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+ edinburgh/lf95: All PASS up to smL58 (test 29) except...
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+012 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+020 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_02
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_7_02
+Originator(s): erik (erik)
+Date: Sat Feb 6 00:43:49 MST 2010
+One-line Summary: Start adding in new rcp=8.5 datasets, remove some junk, change some env_conf variables, add user_nl_clm
+
+Purpose of changes:
+
+New pftdyn, ndep and aerdep files for rcp=8.5 future scenario 2005+ . Need to have the
+ability to handle four future scenarios: minicam (rcp4.5), aim (rcp 6), image (rcp2.6),
+and message (rcp8.5). Add in ndepdat datasets for rcp2.6 for future decades. Add in
+ndepdyn, pftdyn, aerdep datasets for rcp8.5, and ndepdyn for rcp4.5 (f19) and pftdyn for
+f10 as well. Some changes to scripts, remove CLM_DEMAND, add CLM_NML_USE_CASE,
+CLM_CO2_TYPE and user namelist. Add user_nl to clm.cpl7.template, remove CLM_DEMAND add
+CLM_CO2_TYPE and CLM_NML_USE_CASE (in favor of use_case's,). Make sure driver/scripts
+updated with this change. Update documentation Users-Guide with comments from Keith and
+Sam. Remove clm copy of mkSrcfiles/mkDepends. Remove run-ibm. Remove following options
+from configure and config_definition: clm_exe, clm_exedir, and clm_bld. Remove
+CASE/CCSM/CAM tests from test_driver.sh. Add some tests for new rcp=8.5.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1104 (Restart problem with pftdyn mode)
+ 1118 (Restarts with SNICAR_FRC fail)
+ 1121 (history variable attribute cell_methods misnamed)
+ 1143 (Problems with mksurfdata and ifort)
+ 1144 (Bug in ndeplintInterp.ncl in calculation of time axis)
+ 1147 (mkgriddata can't straddle over Greenwich)
+ 1153 (Problem with ndeplintInterp for historical case)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Some changes to configure
+ Remove unused configure options: clm_exe, -clm_exedir, and -clm_bld.
+ Move standalone testing options to the end of help. Use CCSM version
+ of mkSrcfiles/mkDepends, separate config vars into categories, work
+ on documentation with comments from Keith/Sam.
+
+Describe any changes made to the namelist: Check for some files based on rcp
+
+ Add in ability to add a user namelist in your case directory to input
+ namelist items at configure time. Simply add a file called "user_nl_clm"
+ as a valid namelist and the items in that namelist will show up in the initial
+ BuildConf/clm.buildnml.csh file.
+
+List any changes to the defaults for the boundary datasets:
+ fsurdat: f10, 1850
+ fpftdyn: f10, 1850-2000, 1850-2100 (rcp=8.5)
+ fpftdyn: f19, 1850-2100 (rcp=8.5)
+ faerdep: f19, 1850-2100 (rcp=8.5)
+ fndepdat: f19, decadal averages (rcp=2.6)
+ fndepdyn: f19, 1850-2100 (rcp=8.5 and rcp=4.5)
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, pio
+
+ scripts to scripts4_100204
+ drv to drvseq3_1_15
+ pio to pio1_0_8
+
+List all files eliminated:
+
+>>>>>>>>>>>>>>> Remove CCSM, scam, cam, and run-ibm script testing from test-system
+>>>>>>>>>>>>>>> Use .clm.auxtest lists in CCSM scripts for CCSM testing
+ D models/lnd/clm/test/system/TSM_ccsmseq.sh
+ D models/lnd/clm/test/system/TSMext_ccsmseq_cam.sh
+ D models/lnd/clm/test/system/TCT_ccsmseq.sh
+ D models/lnd/clm/test/system/TCSruncase.sh
+ D models/lnd/clm/test/system/TSMruncase.sh
+ D models/lnd/clm/test/system/TCBext_ccsmseq_cam.sh
+ D models/lnd/clm/test/system/TSCext_ccsmseq_scam.sh
+ D models/lnd/clm/test/system/tests_posttag_lightning
+ D models/lnd/clm/test/system/config_files/scam_ds
+ D models/lnd/clm/test/system/config_files/ext_ccsm_seq_10x15_dh
+ D models/lnd/clm/test/system/config_files/ext_ccsm_seq_4x5_dh
+ D models/lnd/clm/test/system/config_files/ext_ccsm_seq_1.9x2.5_dh
+ D models/lnd/clm/test/system/config_files/ext_ccsm_seq_64x128_s
+ D models/lnd/clm/test/system/config_files/ext_ccsm_seq_0.9x1.25_dh
+ D models/lnd/clm/bld/run-ibm.csh
+ D models/lnd/clm/bld/create_newcase
+>>>>>>>>>>>>>>> Remove mkSrcfiles/mkDepends duplicated from ccsm scripts
+ D models/lnd/clm/bld/mkSrcfiles
+ D models/lnd/clm/bld/mkDepends
+
+List all files added and what they do:
+
+>>>>>>>>>>>>> no-RTM mode configurations for hybrid and mpi-only testing
+ A models/lnd/clm/test/system/config_files/_nrsc_dh
+ A models/lnd/clm/test/system/config_files/_nrsc_dm
+>>>>>>>>>>>>> Add transient_rcp8.5 use-case option
+ A models/lnd/clm/test/system/nl_files/clm_transient_rcp8.5
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/README.testnames --- Update test names
+ M models/lnd/clm/test/system/test_driver.sh ----- Remove CLM_SEQCCSMROOT
+ M models/lnd/clm/test/system/input_tests_master - Remove CCSM/cam tests, add rcp8.5 test
+ M models/lnd/clm/test/system/README ------------- Remove doc on CLM_SEQCCSMROOT
+>>>>>>>>>>>>> Remove CCSM, cam, scam, and run-ibm tests from test lists
+ M models/lnd/clm/test/system/tests_pretag_bluefire
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi
+ M models/lnd/clm/test/system/tests_pretag_jaguar
+ M models/lnd/clm/test/system/tests_posttag_kraken
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+ M models/lnd/clm/test/system/tests_pretag_bangkok
+ M models/lnd/clm/test/system/tests_posttag_intrepid
+ M models/lnd/clm/test/system/tests_pretag_dublin
+ M models/lnd/clm/test/system/tests_pretag_dublin_nompi
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression
+ M models/lnd/clm/test/system/tests_pretag_calgary
+
+>>>>>>>>>>>>> New location for mkDepends, correct documentation
+ M models/lnd/clm/tools/mkgriddata/README ----- Correct mention of download
+ M models/lnd/clm/tools/mksurfdata/Makefile --- Change location of mkDepends
+ M models/lnd/clm/tools/interpinic/Makefile --- Change location of mkDepends
+ M models/lnd/clm/tools/mkgriddata/Makefile --- Change location of mkDepends
+ M models/lnd/clm/tools/mkdatadomain/Makefile - Change location of mkDepends
+
+>>>>>>>>>>>>> Remove unused configure options: clm_exe, -clm_exedir, and -clm_bld
+>>>>>>>>>>>>> Move standalone testing options to the end of help
+>>>>>>>>>>>>> Use CCSM version of mkSrcfiles/mkDepends, separate config vars into
+>>>>>>>>>>>>> categories, work on documentation
+ M models/lnd/clm/bld/configure -------------------------- Move clm standalone testing
+ options to the end of the help, remove: -clm_exe,
+ -clm_exedir, and -clm_bld options
+ M models/lnd/clm/bld/config_files/Makefile.in ----------- Use CCSM version of mkSrcfiles/mkDepends
+ M models/lnd/clm/bld/config_files/config_definition.xsl - Separate variables into categories.
+ M models/lnd/clm/bld/config_files/config_definition.xml - Add categories, add
+ description changes from Keith/Sam, remove clm_exe, clm_exedir, and clm_bld
+
+ M models/lnd/clm/bld/listDefaultNamelist.pl -- Also loop over rcp values
+
+ M models/lnd/clm/bld/build-namelist ----- have rcp value impact filenames retrieved
+ M models/lnd/clm/bld/clm.cpl7.template -- Add CLM_NML_USE_CASE and CLM_CO2_TYPE,
+ remove CLM_DEMAND add user_nl_clm namelist, remove -clm_bld.
+ M models/lnd/clm/bld/README ------------- Update documentation after removing the
+ clm stand-alone build/run scripts.
+
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml ------- Remove defaults
+ as in namelist_defaults files, apply suggestions from Keith/Sam
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml - Change rcp default to -999.9
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xsl ------- Put description above valid values
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ----- New datasets:
+ fsurdat: f10, 1850
+ fpftdyn: f10, 1850-2000, 1850-2100 (rcp=8.5)
+ fpftdyn: f19, 1850-2100 (rcp=8.5)
+ faerdep: f19, 1850-2100 (rcp=8.5)
+ fndepdat: f19, decadal averages (rcp=2.6)
+ fndepdyn: f19, 1850-2100 (rcp=8.5 and rcp=4.5)
+>>>>>>>>>>>>> Update documentation, add in documentation on changes added in here
+ M models/lnd/clm/doc/UsersGuide/index.xml -- Spellcheck, more work on doc, update
+ for changes that came in on this tag.
+ M models/lnd/clm/doc/index.shtml ----------- Correct test table.
+
+Summary of testing:
+
+ bluefire: All PASS except...
+009 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic
+........FAIL! rc= 10
+010 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72
+arb_ic ......FAIL! rc= 5
+011 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72
+arb_ic ......FAIL! rc= 5
+012 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic
+........FAIL! rc= 4
+013 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic
+.........FAIL! rc= 10
+017 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic
+.........FAIL! rc= 7
+ bluefire interactive testing: All PASS
+ bluefire/CCSM testing: All PASS, except..
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm3_7_02
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_7_01
+FAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm3_7_01
+FAIL PET.f10_f10.I8520CN.bluefire.compare.clm3_7_01
+FAIL ERS_D.f19_g16.I8521CNR85.bluefire
+BFAIL ERS_D.f19_g16.I8521CNR85.bluefire.generate.clm3_7_02
+BFAIL ERS_D.f19_g16.I8521CNR85.bluefire.compare.clm3_7_01
+ Special testing:
+PASS ERS.f19_g16.I8521CNR85.bluefire
+ breeze,gale,hail,gust/ifort interactive testing: All PASS except...
+020 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+021 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+022 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_01
+
+Changes answers relative to baseline: f10 because of new surface dataset
+ Other resolutions will be bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_7_01
+Originator(s): erik (Erik Kluzek)
+Date: Fri Jan 29 12:58:12 MST 2010
+One-line Summary: OpenMP fix for pftdyn, start adding in rcp's, update ndeplintInterp.ncl script
+
+Purpose of changes:
+
+Changes to ndeplintInterp script to add the ability to generate ndepdyn datasets for future scenarios
+2005+. Add rcp as input to build-namelist and add use-cases with different rcp's. Small bug-fixes to
+mksurfdata. Add lists for 1850-2100 for the rcp's. Update drv and scripts to latest. Update documentation.
+Fix from Mariana on OpenMP problem in pftdyn. Remove lightning from tests, start adding in
+edinburgh.
+
+Bugs fixed (include bugzilla ID):
+ 1102 (OpenMP problem with pftdyn mode)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1104 (Restart problem with pftdyn mode)
+ 1118 (Restarts with SNICAR_FRC fail)
+ 1121 (history variable attribute cell_methods misnamed)
+ 1153 (Problem with ndeplintInterp for historical case)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, code-changes came from Mariana-Vertenstein, reviewed by Pat Worley
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, and datm
+
+ scripts to scripts4_100125
+ drv to drvseq3_1_13
+ datm to datm8_100122
+
+List all files eliminated:
+
+ D models/lnd/clm/tools/mksurfdata/pftdyn_simyr1850-2005.txt --> rename to pftdyn_hist_simyr1850-2005.txt
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>>> List of surface datasets to create pftdyn files
+ A models/lnd/clm/tools/mksurfdata/pftdyn_rcp2.6_simyr1850-2100.txt
+ A models/lnd/clm/tools/mksurfdata/pftdyn_rcp4.5_simyr1850-2100.txt
+ A models/lnd/clm/tools/mksurfdata/pftdyn_rcp8.5_simyr1850-2100.txt
+>>>>>>>>>>>>>>> Add use-cases for future scenarios
+ A models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp8.5_transient.xml
+ A models/lnd/clm/bld/namelist_files/use_cases/1850-2100_rcp4.5_transient.xml
+
+ A models/lnd/clm/tools/mksurfdata/pftdyn_hist_simyr1850-2005.txt --> renamed
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>> Add edinburgh, remove lightning
+ M models/lnd/clm/test/system/test_driver.sh --------- Remove lightning, add edinburgh
+ M models/lnd/clm/test/system/TCBext_ccsmseq_cam.sh -- Delete csm_share/dshr directory no longer exists
+ M models/lnd/clm/test/system/CLM_runcmnd.sh --------- Remove lightning, add edinburgh
+
+ M models/lnd/clm/tools/mksurfdata/mkharvestMod.F90 -- Fix small compiler bug for jaguar
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pftdyn - Point to new name for historical pftdyn file
+
+>>>>>>>>>>>>>> Handle future scenarios for dynamic Nitrogen-Deposition file creation
+ M models/lnd/clm/tools/ncl_scripts/ndeplintInterp.ncl -- Add ability to handle future scenario data, and also leave
+ previous historical data the same as before, or
+
+>>>>>>>>>>>>>> Start adding ability to handle future scenarios for different RCP's
+ M models/lnd/clm/bld/build-namelist ------------------------------- Add rcp,
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml ------- Add rcp, new sim_year, sim_year_range values, allow blank
+ for hist_type1d_pertape
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml - Add default for rcp as -999.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ----- Add new Nitrogen deposition decadal datasets for RCP4.5/RCP8.5
+
+ M models/lnd/clm/doc/UsersGuide/index.xml -- Add more in, add notes on namelist, tools, and special cases
+
+>>>>>>>>>>>> Changes from Mariana V. to fix bug 1102, OpenMP bug with pftdyn cases
+ M models/lnd/clm/src/main/clm_initializeMod.F90 - Don't pass decomp bounds down
+ M models/lnd/clm/src/main/pftdynMod.F90 --------- Get decomp bounds here
+ M models/lnd/clm/src/main/clm_driver.F90 -------- Call pftdyn_interp on own OMP loop
+
+Summary of testing:
+
+ bluefire:
+022 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+023 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+024 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+025 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+026 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+049 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+050 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 4
+051 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 4
+052 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 4
+ bluefire interactive testing:
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7022
+brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+050 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ bluefire special testing:
+ Run I8520 from Dec/1/1850 @f09_g16 for 14 months with 64 tasks and 4 threads and make sure identical
+ to same with 128 tasks and 1 thread (require openMP build) comparing clm history files. clm3_7_00 4-thread case for this fails. But, answers are identical with clm3_7_00 for 128 tasks and 1 thread.
+ jaguar:
+020 blJ62 TBL.sh 4p_casasc_dm clm_std^nl_urb 20021230:NONE:1800 1.9x2.5 gx1v6 48 cold ...........FAIL! rc= 5
+026 erP65 TSM_ccsmseq.sh ERS f19_g15 I ..........................................................FAIL! rc= 4
+027 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 4
+ jaguar interactive testing:
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+012 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+ jaguar/special testing:
+ Run I8520 from Dec/1/1850 @f09_g16 for 5 months with 416 tasks and 4 threads and make sure identical
+ to same with 1 thread (require openMP build) comparing clm history files. clm3_7_00 4-thread case for this fails.
+ jaguar/CCSM testing:
+PASS ERS_D.f09_g16.I1850.jaguar
+PASS PET.f10_f10.I8520CN.jaguar.cpl
+PASS PET.f10_f10.I8520CN.jaguar.atm
+PASS PET.f10_f10.I8520CN.jaguar.lnd
+PASS PET.f10_f10.I8520CN.jaguar.ice
+PASS PET.f10_f10.I8520CN.jaguar.ocn
+PASS PET.f10_f10.I8520CN.jaguar.glc
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+020 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+ dublin/ifort interactive testing: All PASS except...
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+030 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ edinburgh/pgi interactive testing: All PASS except...
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+030 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+
+CLM tag used for the baseline comparison tests if applicable: clm3_7_00
+
+Changes answers relative to baseline: No bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_7_00
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Fri Jan 22 22:48:09 MST 2010
+One-line Summary: Update to datm8, redirect aquifer overflow to drainage, add gx3v7
+ masks, script to extract regional datasets, add harvesting for CN,
+ modify shrubs, include urban model, ice stream for snowcapping,
+ new build-namelist system, scale solar by solar zenith angle in
+ datm, deep soil with bedrock at bottom, organic matter in soils,
+ SNICAR for snow radiation, sparce dense aero, snow cover changes
+
+Software engineering changes:
+
+ Update to cpl7 and scripts.
+ Remove offline and cpl6 modes.
+ Remove support for CASA model.
+ Update to datm8 atmospheric data model.
+ Add gx3v7 land mask for T31 and fv-4x5 horizontal resolutions.
+ Add gx1v6 land mask for f05, f09, and f19 horigonzl resolutions.
+ Add tx1v1 land mask and 1.9x2.5_tx1v1 horizontal resolution.
+ Add in 2.5x3.33 horizontal resolution.
+ Add in T62 horizontal resolution so can run at same resolution as input datm data.
+ Allow first history tape to be 1D.
+ Add ability to use own version of input datasets with CLM_USRDAT_NAME variable.
+ Add a script to extract out regional datasets.
+ New build-namelist system with XML file describing all namelist items.
+ Add glacier_mec use-case and stub glacier model.
+ Add ncl script to time-interpolate between 1850 and 2000 for fndepdat dataset, for fndepdyn version.
+ Make default of maxpatch_pft=numpft+1 instead of 4.
+ Only output static 3D fields on first h0 history file to save space.
+ Add new fields for VOC (Volatile Organic Compounds) on some surface datasets, that will be
+ needed for the new MEGAN VOC model (NOT incorporated yet).
+ Add irrigation area to mksurfdata tool (NOT used in CLM yet).
+ Add multiple elevation class option for glaciers in mksurfdata tool (NOT used in CLM yet).
+ Add ascale field to land model in support of model running on it's own grid.
+
+Science changes:
+
+ Change to freezing temperature constant
+ Forcing height at atm plus z0+d on each tile
+ Effective porosity divide by zero fix
+ Sparse/dense canopy aerodynamic parameters
+ Ground/snow emissivity smooth transition
+ Thermal and hydraulic properties of organic soil
+ Init h2osoi=0.3
+ Snow compaction fix
+ Snow T profile during layer splitting fix
+ Snow burial fraction
+ Snow cover fraction
+ SNICAR (snow aging, black carbon and dust deposition, vertical distribution of solar energy)
+ Remove SNOWAGE, no longer used
+ Deep soil (15 layers, ~50m), 5 new layers are hydrologically inactive bed rock
+ Ground evap (beta), stability, and litter resistance
+ Organic/mineral soil hydraulic conductivity percolation theory
+ Richards equation modifications
+ Normalization of frozen fraction of soil formulation
+ One-step solution for soil moisture and qcharge
+ Changes to rsub_max for drainage and decay factor for surface runoff
+ Fixed diurnal cycle of solar radiation in offline forcing data
+ Back to CLM3 lakes and wetlands datasets, but 1% rather than 5% threshold (same for glacier)
+ Changes to pft physiology file from CN
+ New grass optical properties
+ New surface dataset assuming no herbaceous understory
+ Direct versus diffuse radiation offline
+ New VOC model (MEGAN)
+ Snow-capped runoff goes to new ice stream and routed to ocean as ice
+ Dust model always on, LAI threshold parameter change from 0.1 to 0.3
+ Daylength control on vcmax
+ SAI and get_rad_dtime fix
+ Always run with MAXPATCH_PFT=npfts + 1 instead of 4
+ Transient land cover/use mode - datasets, energy and water balance
+ RTM sub-cycling
+ Twostream bug fix
+ Update soil colors
+ 2m relative humidity
+ Fix for aquifer leak (SoilHydrologyMod, BalanceCheckMod)
+ New nitrogen deposition file (units and sum of NOx, NHy)
+
+Quickstart to new cpl7 scripts...
+
+ cd scripts
+ ./create_newcase -help # get help on how to run create_newcase
+ ./create_newcase -case testI -mach bluefire -res f19_g16 -compset I # create new "I" case for bluefire at 1.9x2.5_gx1v6 res
+ # "I" case is clm active, datm7, and inactive ice/ocn
+ cd testI
+ ./xmlchange -help # Get help on editor for XML files
+ ./xmlchange env_conf.xml env_mach_pes # Edit configure files if needed
+ configure -case # create scripts
+ ./xmlchange env_build.xml # Edit build files if needed
+ testI.build # build model and create namelists
+ ./xmlchange env_run.xml # Edit run files if needed
+ bsub < testI.run # submit script
+ # (NOTE: edit env_run.xml to set RESUBMIT to number of times to automatically resubmit)
+Quickstart to use of regional extraction scripts and PERSONAL datasets:
+
+ # Run the script to create an area to put your files (assume CSMDATA set to standard inputdata)
+ cd scripts
+ setenv MYCSMDATA $HOME/myinputdata
+ link_dirtree $CSMDATA $MYCSMDATA
+
+ # Run the extraction for data from 52-73 North latitude, 190-220 longitude
+ # that creates 13x12 gridcell region from the f19 (1.9x2.5) global resolution over
+ # Alaska
+ cd ../models/lnd/clm/tools/ncl_scripts
+ setenv MYID 13x12pt_f19_alaskaUSA
+ getregional_datasets.pl -sw 52,190 -ne 73,220 -id $MYID -mycsmdata $MYCSMDATA
+
+ # Now create a case that uses these datasets
+ cd ../../../../../scripts
+ create_newcase -case testregional -compset I -mach bluefire -res pt1_pt1 -skip_rundb
+ cd testregional
+ $EDITOR env_conf.xml # change CLM_BLDNML_OPTS to include "-clm_usr_name $MYID" (expand $MYID)
+ $EDITOR env_mach_pes.xml # Change tasks/threads as appropriate (defaults to serial)
+ xmlchange -file env_run.xml -id DIN_LOC_ROOT_CSMDATA -val $MYCSMDATA
+
+ # Do other changes to xml files as appropriate
+ # configure as normal, then edit the datm namelist
+
+ configure -case
+
+ # Then build and run the case as normal
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1102 (OpenMP problem with pftdyn mode)
+ 1121 (history variable attribute cell_methods misnamed)
+ 1118 (Restarts with SNICAR_FRC fail)
+
+Describe any changes made to build system:
+
+ Change directory structure to match CCSM.
+ Add BGP target.
+ Add choice between ESMF and MCT frameworks.
+ Start removing #ifdef and directives that supported Cray-X1 Phoenix as now decommisioned.
+ Make default of maxpatch_pft=numpft+1 instead of 4 for all configurations.
+ By default turn on CLAMP when either CN or CASA is enabled
+ New SNICAR_FRC, CARBON_AERO, and C13 CPP ifdef tokens.
+
+ New options added to configure:
+
+ -comp_intf Component interface to use (ESMF or MCT) (default MCT)
+ -nofire Turn off wildfires for bgc setting of CN (default includes fire for CN)
+ -pio Switch enables building with Parallel I/O library. [on | off] (default is on)
+ -snicar_frc Turn on SNICAR radiative forcing calculation. [on | off] (default is off)
+
+Describe any changes made to the namelist:
+
+ NOTE: build-namelist now checks the validity of your namelist you generate by looking at data in the namelist_definition.xml
+ file. In order to add new namelist items you need to change the code and also edit this file. To view information
+ on the namelist view the file:
+ models/lnd/clm/bld/namelist_files/namelist_definition.xml
+ in your browser and you'll see the names, type, description and valid_values for all namelist variables.
+
+ Changes to build-namelist:
+ Transient sim_year ranges (i.e. 1850-2000)
+ Remove cam_hist_case option.
+ Make sure options ONLY used for stand-alone testing have a "drv_" or "datm_" prefix in them and list these
+ options all together and last when asking for help from build-namelist.
+ New options to build-namelist:
+ -clm_usr_name "name" Dataset resolution/descriptor for personal datasets. Default: not used
+ Example: 1x1pt_boulderCO_c090722 to describe location,
+ number of pts, and date files created
+ New list options to build-namelist:
+ build-namelist -res list # List valid resolutions
+ build-namelist -mask list # List valid land-masks
+ build-namelist -sim_year list # List valid simulation years and simulation year ranges
+ build-namelist -clm_demand list # List namelist variables including those you could demand to be included.
+ build-namelist -use_case list # List valid use-cases
+
+ New use-cases for:
+
+ 1850_control = Conditions to simulate 1850 land-use
+ 2000_control = Conditions to simulate 2000 land-use
+20thC_transient = Simulate transient land-use, and aerosol deposition changes from 1850 to 2005
+ glacier_mec = Placeholder for running IG cases with the ice sheet model glimmer
+
+ New namelist items:
+
+ urban_hac = OFF, ON or ON_WASTEHEAT (default OFF) Flag for urban Heating and Air-Conditioning
+ OFF = Building internal temperature is un-regulated.
+ ON = Building internal temperature is bounded to reasonable range.
+ ON_WASTEHEAT = Building internal temperature is bounded and resultant waste
+ heat is given off.
+ urban_traffic = .true. or .false. (default .false.) Flag to include additional multiplicative factor of urban traffic
+ to sensible heat flux.
+ fsnowoptions = filename file for snow/aerosol optical properties (required)
+ fsnowaging = filename file for snow aging parameters (required)
+ faerdep = filename file of aerosol deposition (required)
+
+ New history variables: (note watt vs. W in units, 26 vs. 76)
+ BCDEP total BC deposition (dry+wet) from atmosphere kg/m^2/s
+ C13_PRODUCT_CLOSS C13 total carbon loss from wood product pools gC13/m^2/s
+ DSTDEP total dust deposition (dry+wet) from atmosphere kg/m^2/s
+ EFLX_DYNBAL dynamic land cover change conversion energy flux W/m^2
+ FGR12 heat flux between soil layers 1 and 2 watt/m^2
+ FSAT fractional area with water table at surface unitless
+ FSH_NODYNLNDUSE sensible heat flux not including correction for land use change
+ watt/m^2
+ GC_HEAT1 initial gridcell total heat content J/m^2
+ GC_HEAT2 post land cover change total heat content J/m^2 inactive
+ GC_ICE1 initial gridcell total ice content mm/s
+ GC_ICE2 post land cover change total ice content mm/s inactive
+ GC_LIQ1 initial gridcell total liq content mm
+ GC_LIQ2 initial gridcell total liq content mm inactive <<<< name??
+ H2OSNO_TOP mass of snow in top snow layer kg
+ HEAT_FROM_AC sensible heat flux put into canyon due to heat removed from air conditioning
+ watt/m^2
+ HK hydraulic conductivity mm/s inactive
+ LWup upwelling longwave radiation watt/m^2 inactive
+ NBP net biome production, includes fire, landuse, and harvest flux, positive for sink
+ gC/m^2/s
+ OCDEP total OC deposition (dry+wet) from atmosphere kg/m^2/s
+ PBOT atmospheric pressure Pa
+ PCO2 atmospheric partial pressure of CO2 Pa
+ PRODUCT_CLOSS total carbon loss from wood product pools gC/m^2/s
+ PRODUCT_NLOSS total N loss from wood product pools gN/m^2/s
+ Qair atmospheric specific humidity kg/kg inactive
+ Qanth anthropogenic heat flux watt/m^2 inactive
+ Qtau momentum flux kg/m/s^2
+ QFLX_LIQ_DYNBAL liq dynamic land cover change conversion runoff flux mm/s
+ QFLX_ICE_DYNBAL ice dynamic land cover change conversion runoff flux mm/s
+ QRUNOFF_NODYNLNDUSE total liquid runoff not including correction for land use change (does not include QSNWCPICE)
+ mm/s
+ QSNWCPICE excess snowfall due to snow capping mm/s
+ QSNWCPICE_NODYNLNDUSE excess snowfall due to snow capping not including correction for land use change
+ mm/s
+ QSNWCPLIQ excess rainfall due to snow capping mm/s inactive
+ SMP soil matric potential mm inactive
+ SNOAERFRC2L surface forcing of all aerosols in snow, averaged only when snow is present (land)
+ watt/m^2
+ SNOAERFRCL surface forcing of all aerosols in snow (land) watt/m^2
+ SNOBCFRCL surface forcing of BC in snow (land) watt/m^2
+ SNOBCMCL mass of BC in snow column kg/m2
+ SNOBCMSL mass of BC in top snow layer kg/m2
+ SNOdTdzL top snow layer temperature gradient (land) K/m
+ SNODSTFRC2L surface forcing of dust in snow, averaged only when snow is present (land)
+ watt/m^2
+ SNODSTFRCL surface forcing of dust in snow (land) watt/m^2
+ SNODSTMCL mass of dust in snow column kg/m2
+ SNODSTMSL mass of dust in top snow layer kg/m2
+ SNOFSRND direct nir reflected solar radiation from snow watt/m^2 inactive
+ SNOFSRNI diffuse nir reflected solar radiation from snow watt/m^2 inactive
+ SNOFSRVD direct vis reflected solar radiation from snow watt/m^2 inactive
+ SNOFSRVI diffuse vis reflected solar radiation from snow watt/m^2 inactive
+ SNOFSDSND direct nir incident solar radiation on snow watt/m^2 inactive
+ SNOFSDSNI diffuse nir incident solar radiation on snow watt/m^2 inactive
+ SNOFSDSVD direct vis incident solar radiation on snow watt/m^2 inactive
+ SNOFSDSVI diffuse vis incident solar radiation on snow watt/m^2 inactive
+ SNOLIQFL top snow layer liquid water fraction (land) fraction inactive
+ SNOOCMCL mass of OC in snow column kg/m2
+ SNOOCMSL mass of OC in top snow layer Kg/m2
+ SNOOCFRC2L surface forcing of OC in snow, averaged only when snow is present (land)
+ SNOOCFRCL surface forcing of OC in snow (land) watt/m^2
+ watt/m^2
+ SNORDSL top snow layer effective grain radius m^-6 inactive
+ SNOTTOPL snow temperature (top layer) K/m inactive <<< units?
+ SWup upwelling shortwave radiation watt/m^2 inactive
+ URBAN_AC urban air conditioning flux watt/m^2
+ URBAN_HEAT urban heating flux watt/m^2
+ Wind atmospheric wind velocity magnitude m/s inactive
+ WOOD_HARVESTC wood harvest (to product pools) gC/m^2/s
+ WOOD_HARVESTN wood harvest (to product pools) gN/m^2/s
+
+ History field name changes:
+
+ ANNSUM_PLANT_NDEMAND => ANNSUM_POTENTIAL_GPP
+ ANNSUM_RETRANSN => ANNMAX_RETRANSN
+ C13_DWT_PROD10C_LOSS => C13_PROD10C_LOSS
+ C13_DWT_PROD100C_LOSS => C13_PROD100C_LOSS
+ C13_DWT_PROD10N_LOSS => C13_PROD10N_LOSS
+ C13_DWT_PROD100C_LOSS => C13_PROD100C_LOSS
+ DWT_PROD100N_LOSS => PROD10N_LOSS
+ DWT_PROD100N_LOSS => PROD100N_LOSS
+ DWT_PROD100C_LOSS => PROD10C_LOSS
+ DWT_PROD100C_LOSS => PROD100C_LOSS
+ HCSOISNO => HC
+ TEMPSUM_PLANT_NDEMAND => TEMPSUM_POTENTIAL_GPP
+ TEMPSUM_RETRANSN => TEMPMAX_RETRANSN
+
+ History field names deleted:
+ SNOWAGE, TSNOW, FMICR, FCO2, DMI, QFLX_SNOWCAP
+
+ Add new urban oriented _U, and _R (Urban and Rural) for:
+ EFLX_LH_TOT, FGR, FIRA, FSH, FSM, Q2M, QRUNOFF, RH2M, SoilAlpha, TG, TREFMNAV, TREFMXAV, and TSA
+ (missing _R for SoilAlpha)
+
+Describe timing and memory performance:
+
+Versions of any externally defined libraries:
+
+ scripts scripts4_100108b
+ drv vocemis-drydep12_drvseq3_1_11
+ datm datm8_091218
+ socn stubs1_2_02/socn
+ sice stubs1_2_02/sice
+ sglc stubs1_2_02/sglc
+ csm_share vocemis-drydep13_share3_091217
+ esmf_wrf_timemgr esmf_wrf_timemgr_090402
+ timing timing_090929
+ mct MCT2_7_0_100106
+ pio pio60_prod
+ cprnc cprnc_081022
+
+Summary of testing:
+ bluefire: All PASS up to...017 blF92 TBL.sh 17p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 cold .............PASS
+ bluefire interactive testing: All PASS up to..
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ bluefire/CCSM testing:
+PASS ERH.f09_g16.B20TRCN.bluefire
+BFAIL ERH.f09_g16.B20TRCN.bluefire.compare.ccsm4_0_beta38 --- compset names changed -- but cpl.log files compare exactly
+!
+> ../Tools/check_exactrestart.pl cpl.log.100109-171753 $FISHOME/ccsm4_0_beta38/scripts/ERH.f09_g16.B20TRTR1CN.bluefire.G
+.172652/logs/cpl.log.100108-181015
+log files match!
+PASS
+PASS SMS_RLA.f45_f45.I.bluefire
+PASS SMS_RLB.f45_f45.I.bluefire
+PASS SMS_ROA.f45_f45.I.bluefire
+PASS ERS_D.f45_g37.I.bluefire
+PASS ERS_D.f45_g37.I.bluefire.compare_hist.clm3_6_58+datm8
+PASS ERS_D.f45_g37.I.bluefire.compare.clm3_6_58+datm8
+PASS PET.f45_g37.I1850.bluefire.cpl
+PASS PET.f45_g37.I1850.bluefire.atm
+PASS PET.f45_g37.I1850.bluefire.lnd
+PASS PET.f45_g37.I1850.bluefire.ice
+PASS PET.f45_g37.I1850.bluefire.ocn
+PASS PET.f45_g37.I1850.bluefire.glc
+PASS PET.f45_g37.I1850.bluefire.compare.clm3_6_58+datm8
+PASS ERS.f19_g16.I1850.bluefire
+PASS ERS.f19_g16.I1850.bluefire.compare_hist.clm3_6_58+datm8
+PASS ERS.f19_g16.I1850.bluefire.compare.clm3_6_58+datm8
+PASS ERB.f09_g16.I_1948-2004.bluefire
+PASS ERB.f09_g16.I_1948-2004.bluefire.compare.clm3_6_58+datm8
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire <<<<<<<<<<<<<<<<<<<<<<<<< Failed before bug 1063
+PASS ERH_D.f10_f10.I1850CN.bluefire
+PASS PET.f10_f10.I8520CN.bluefire.cpl
+PASS PET.f10_f10.I8520CN.bluefire.atm
+PASS PET.f10_f10.I8520CN.bluefire.lnd
+PASS PET.f10_f10.I8520CN.bluefire.ice
+PASS PET.f10_f10.I8520CN.bluefire.ocn
+PASS PET.f10_f10.I8520CN.bluefire.glc
+ jaguar interactive testing: All PASS up to...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ dublin/lf95 interactive testing: None PASS because of bug 1092
+ dublin/lf95: None PASS because of bug 1092
+ dublin/INTEL interactive testing: ALL PASS except (and didn't compare to baseline)
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+028 smM94 TSMncl_tools.sh ndepregrid ............................................................FAIL! rc= 6
+030 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ dublin/INTEL: All PASS up to 021 smJ92 TSM.sh (and didn't compare to baseline)
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+012 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+
+===============================================================
+===============================================================
+Tag name: clm3_6_64
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Fri Jan 22 22:19:24 MST 2010
+One-line Summary: Update documentation and README/Quickstart files, set NetCDF large-file format on by default in template, update pio, update some fsurdat files to vocemis-drydep versions, add 2.5x3.33_gx3v7 frac file, make gx3v7 default for 4x5 res
+
+Purpose of changes:
+
+Setup makefiles for docbook UsersGuide to output both pdf and html formats. Work on documentation of new _esmf driver files. Work on documentation. Make sure documentation of clm xml variables is good. Add note about CASA NOT being supported. Work on README/Quickstart files, and move the files from the top level to clm doc directory, but leave a file at top level pointing to these files. Make large file support default, remove LND_CDF64. Add in VOC surfdata files from voc branch: T42, T31, T21, T5, 4x5, 10x15-pftdyn. Add in new 2x5x3.33_gx3v7 frac file. Make default mask for 4x5 gx3v7.
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1102 (OpenMP problem with pftdyn mode)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: Turn NetCDF large-file support on by default
+
+List any changes to the defaults for the boundary datasets:
+
+ New fsurdat files for: T42, T31, T21, T5, 4x5
+ New fpftdyn file for 10x15 for 1850-2000, new frac file for 2.5x3.33_gx3v7
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): pio
+
+ pio updated to google version: http://parallelio.googlecode.com/svn/trunk_tags/pio1_0_7/pio
+
+List all files eliminated:
+
+ D Quickstart.userdatasets --- Move to models/lnd/clm/doc
+ D Quickstart.GUIDE ---------- Move to models/lnd/clm/doc
+ D README.DGVM --------------- Move to models/lnd/clm/doc
+ D KnownBugs ----------------- Move to models/lnd/clm/doc
+ D models/lnd/clm/doc/docs.html ----------------- Remove in favor of DocBook documentation
+ D models/lnd/clm/doc/UsersGuide/clm_head.shtml - Remove in favor of DocBook documentation
+ D models/lnd/clm/doc/UsersGuide/tree.html ------ Remove in favor of DocBook documentation
+ D models/lnd/clm/doc/UsersGuide/clm_foot.shtml - Remove in favor of DocBook documentation
+ D models/lnd/clm/doc/UsersGuide/index.shtml ---- Remove in favor of DocBook documentation
+
+List all files added and what they do:
+
+ A models/lnd/clm/doc/Quickstart.userdatasets - Move from top level
+ A models/lnd/clm/doc/IMPORTANT_NOTES --------- Add important notes about what's scientifically valided/expected to work
+ A models/lnd/clm/doc/Quickstart.GUIDE -------- Move from top level
+ A models/lnd/clm/doc/KnownBugs --------------- Move from top level
+ A models/lnd/clm/doc/UsersGuide/Makefile ----- Makefile to build Users-Guide
+ A models/lnd/clm/doc/index.shtml ------------- Add HTML guide to documentation
+ A models/lnd/clm/doc/CodeReference/Filepath -- Filepath to source-code to build Code Reference Guide
+ A models/lnd/clm/doc/CodeReference/Makefile -- Makefile to build Code Reference Guide using Protex
+
+ A models/lnd/clm/test/system/Makefile -------- Makefile to build HTML test table
+
+ A models/lnd/clm/test/system/config_files/_nrsc_do --- Add smp only option for no-RTM seq-ccsm default mode
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/bld/clm.cpl7.template -------------------------- Set large_file_format to true by default
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml --- Add in vocemis-drydep branch fsurdat files for: T42, T31, T21, T5,
+ and 4x5 resolution (as well as 10x15 1850-2005 pftdyn file)
+ Add in 2.5x3.33_gx3v7 frac file, and make gx3v7 mask the default
+ for 4x5 resolution.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml -- 2.5x3.33_gx3v7 domain file
+>>>>>>>>>>>>> Update documentation and README text files
+ M models/lnd/clm/test/system/README
+ M models/lnd/clm/tools/mksurfdata/README
+ M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 ------------------ Make namelist documentation a little more clea
+ M models/lnd/clm/tools/ncl_scripts/convertUrbanOffline2Seq.ncl -- Document datm as datm8
+ M models/lnd/clm/tools/ncl_scripts/README
+ M models/lnd/clm/tools/interpinic/README
+ M models/lnd/clm/tools/mkdatadomain/README
+ M models/lnd/clm/tools/README
+ M models/lnd/clm/bld/README
+ M models/lnd/clm/doc/UsersGuide/index.xml ---- Update docbook UsersGuide
+ M models/lnd/clm/doc/README
+ M README
+
+ M models/lnd/clm/test/system/tests_pretag_bluefire ----------- Remove LD1 (2.65x3.33 res) tests
+ M models/lnd/clm/test/system/tests_posttag_hybrid_regression - Remove LD1 tests
+ M models/lnd/clm/test/system/input_tests_master -------------- Remove LD1 tests
+
+Summary of testing:
+
+ bluefire: All PASS except...
+004 blA91 TBL.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 7 << 4x5 fsurdat
+007 blD91 TBL.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v5 144 cold .................FAIL! rc= 7 << 4x5 fsurdat
+012 blE91 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 arb_ic ............FAIL! rc= 7 << 4x5 fsurdat
+017 blF92 TBL.sh 17p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 cold .............FAIL! rc= 7 << 4x5 fsurdat
+022 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+023 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+024 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+025 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+026 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+030 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic .........FAIL! rc= 7 << 10x15 fpftdyn different
+042 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 6
+043 blLI1 TBL.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 5
+ bluefire interactive testing: All PASS except...
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+032 blF93 TBL.sh 17p_vodsrsc_do clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 cold .............FAIL! rc= 7 << 4x5 fsurdat
+040 blL83 TBL.sh _nrsc_do clm_std^nl_urb 20020115:NONE:3600 5x5_amazon navy -10 arb_ic ..........FAIL! rc= 5 << no _nrsc_do in previous
+041 bl744 TBLtools.sh mksurfdata tools__s namelist ..............................................FAIL! rc= 7 << script error
+043 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 7 << script error
+045 bl754 TBLtools.sh mksurfdata tools__s globalirrig ...........................................FAIL! rc= 3 << script error
+050 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 3
+ bluefire/CCSM testing: All PASS except...
+BFAIL SMS_RLA.f45_f45.I.bluefire.compare.clm3_6_58+datm8 <<<<<<<<<<< 4x5 surfdata file different than previous version
+BFAIL SMS_RLB.f45_f45.I.bluefire.compare.clm3_6_58+datm8 <<<<<<<<<<< 4x5 surfdata file different
+BFAIL SMS_ROA.f45_f45.I.bluefire.compare.clm3_6_58+datm8 <<<<<<<<<<< 4x5 surfdata file different
+FAIL ERS_D.f45_g37.I.bluefire.compare_hist.clm3_6_58+datm8 <<<<<<<< 4x5 surfdata file different
+FAIL ERS_D.f45_g37.I.bluefire.compare.clm3_6_58+datm8 <<<<<<<<<<<<< 4x5 surfdata file different
+FAIL PET.f45_g37.I1850.bluefire.compare.clm3_6_58+datm8 <<<<<<<<<<< 4x5 surfdata file different
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire <<<<<<<<<<<<<<<<<<<<<<<<<<< Previous failure
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.generate.clm3_6_64 <<<<<<<< Previous failure
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_6_58+datm8 <<< Previous failure
+BFAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm3_6_58+datm8 <<<<<<< 10x15 pftdyn file different
+BFAIL PET.f10_f10.I8520CN.bluefire.compare.clm3_6_58+datm8 <<<<<<<<< 10x15 pftdyn file different
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_63
+
+Changes answers relative to baseline: Only for the following resolutions because of new fsurdat files: T42, T31, T5, 4x5
+ and for dynamic PFT at 10x15 resolution because of a new pftdyn file
+
+===============================================================
+===============================================================
+Tag name: clm3_6_63
+Originator(s): erik (erik)
+Date: Sat Jan 9 20:37:53 MST 2010
+One-line Summary: Get answers to be identical with ccsm4_0_beta38 for 1 and 2 degree transient cases
+
+Purpose of changes:
+
+Get answers to be identical to ccsm4_0_beta38 for both 1 and 2 degree transient cases. Update scripts to
+very latest. Tweak test_suite for CN so that can run with finidat file, and can run interactive on dublin
+by turning CCSM_BLD to off.
+
+Bugs fixed (include bugzilla ID):
+ 1098 (Use finidat weights instead of weights from fpftdyn file)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1102 (OpenMP problem with pftdyn mode)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+>>>>>> Add configuration files for CN with default of numpft+1 maxpft
+A models/lnd/clm/test/system/config_files/_cnnsc_h
+A models/lnd/clm/test/system/config_files/_cnnsc_m
+A models/lnd/clm/test/system/config_files/_cnnsc_o
+A models/lnd/clm/test/system/config_files/_cnnsc_dh
+A models/lnd/clm/test/system/config_files/_cnnsc_dm
+A models/lnd/clm/test/system/config_files/_cnnsc_do
+A models/lnd/clm/test/system/config_files/_cnnsc_ds
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>> Tweak test suite so can test for a CN transient case with a startup file, needed to find bug
+M models/lnd/clm/test/system/test_driver.sh ------------ Turn CCSM_BLD to off for interactive use
+M models/lnd/clm/test/system/input_tests_master -------- Tweak CN tests so can use finidat file
+>>>>>>>>>>>>>>> Get answers to be identical with ccsm4_0_beta38
+M models/lnd/clm/src/main/clm_initializeMod.F90 -------- add extra call to pftdyn_interp after restart
+M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 -- Read weights directly into clm_type rather than
+ a temporary array.
+
+Summary of testing:
+
+ bluefire: All PASS up to...
+017 blF92 TBL.sh 17p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 cold .............PASS
+ bluefire interactive testing: All PASS up to..
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ bluefire/CCSM testing:
+PASS ERH.f09_g16.B20TRCN.bluefire
+BFAIL ERH.f09_g16.B20TRCN.bluefire.compare.ccsm4_0_beta38 --- compset names changed -- but cpl.log files compare exactly!
+> ../Tools/check_exactrestart.pl cpl.log.100109-171753 $FISHOME/ccsm4_0_beta38/scripts/ERH.f09_g16.B20TRTR1CN.bluefire.G.172652/logs/cpl.log.100108-181015
+log files match!
+PASS
+PASS SMS_RLA.f45_f45.I.bluefire
+BFAIL SMS_RLA.f45_f45.I.bluefire.compare.clm3_6_58+datm8 <<< f45_f45 NOT allowed in baseline
+PASS SMS_RLB.f45_f45.I.bluefire
+BFAIL SMS_RLB.f45_f45.I.bluefire.compare.clm3_6_58+datm8 <<< f45_f45 NOT allowed in baseline
+PASS SMS_ROA.f45_f45.I.bluefire
+BFAIL SMS_ROA.f45_f45.I.bluefire.compare.clm3_6_58+datm8 <<< f45_f45 NOT allowed in baseline
+PASS ERS_D.f45_g37.I.bluefire
+PASS ERS_D.f45_g37.I.bluefire.compare_hist.clm3_6_58+datm8
+PASS ERS_D.f45_g37.I.bluefire.compare.clm3_6_58+datm8
+PASS PET.f45_g37.I1850.bluefire.cpl
+PASS PET.f45_g37.I1850.bluefire.atm
+PASS PET.f45_g37.I1850.bluefire.lnd
+PASS PET.f45_g37.I1850.bluefire.ice
+PASS PET.f45_g37.I1850.bluefire.ocn
+PASS PET.f45_g37.I1850.bluefire.glc
+PASS PET.f45_g37.I1850.bluefire.compare.clm3_6_58+datm8
+PASS ERS.f19_g16.I1850.bluefire
+PASS ERS.f19_g16.I1850.bluefire.compare_hist.clm3_6_58+datm8
+PASS ERS.f19_g16.I1850.bluefire.compare.clm3_6_58+datm8
+PASS ERB.f09_g16.I_1948-2004.bluefire
+PASS ERB.f09_g16.I_1948-2004.bluefire.compare.clm3_6_58+datm8
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire <<<<<<<<<<<<<<<<<<<<<<<<< Failed before bug 1063
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_6_58+datm8
+PASS ERH_D.f10_f10.I1850CN.bluefire
+BFAIL ERH_D.f10_f10.I1850CN.bluefire.compare.clm3_6_58+datm8 <<< f10_f10 NOT allowed in baseline
+PASS PET.f10_f10.I8520CN.bluefire.cpl
+PASS PET.f10_f10.I8520CN.bluefire.atm
+PASS PET.f10_f10.I8520CN.bluefire.lnd
+PASS PET.f10_f10.I8520CN.bluefire.ice
+PASS PET.f10_f10.I8520CN.bluefire.ocn
+PASS PET.f10_f10.I8520CN.bluefire.glc
+BFAIL PET.f10_f10.I8520CN.bluefire.compare.clm3_6_58+datm8 <<< f10_f10 NOT allowed in baseline
+ jaguar interactive testing: All PASS up to...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ dublin/lf95 interactive testing: None PASS because of bug 1092
+ dublin/lf95: None PASS because of bug 1092
+ dublin/INTEL interactive testing: ALL PASS except (and didn't compare to baseline)
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+028 smM94 TSMncl_tools.sh ndepregrid ............................................................FAIL! rc= 6
+030 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ dublin/INTEL: All PASS up to 021 smJ92 TSM.sh (and didn't compare to baseline)
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+012 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_58+datm8
+
+Changes answers relative to baseline: No bit-for-bit -- really now!
+
+===============================================================
+===============================================================
+Tag name: clm3_6_62
+Originator(s): erik (erik)
+Date: Fri Jan 8 04:50:59 MST 2010
+One-line Summary: Fix startup of PFT transient cases so properly use data from pftdyn file rather than finidat file
+
+Purpose of changes:
+
+Attempt to fix bug 1098 so that properly use the PFT weights interpolated from the fpftdyn file rather than using the
+weights from the input finidat file.
+
+Bugs fixed (include bugzilla ID): Attempt to fix -- but only a partial fix, answers were still different
+ 1098 (Use finidat weights instead of weights from fpftdyn file)
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1102 (OpenMP problem with pftdyn mode)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts and mct
+
+ scripts to scripts4_100107b
+ mct to MCT2_7_0_100106
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 --- Fix bug with fpftdyn weights
+ M models/lnd/clm/test/system/test_driver.sh ------------- Fix name of ifort Macros file
+
+Summary of testing:
+
+ bluefire: All PASS except, up to 045 erLD1
+022 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+023 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+024 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+025 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+026 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+ bluefire interactive testing: All PASS up to..
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ bluefire/CCSM testing: All PASS except CN spinup as below
+PASS ERS.f09_g16.I8520CN.bluefire
+PASS ERS.f09_g16.I8520CN.bluefire.compare_hist.clm3_6_58+datm8
+PASS ERS.f09_g16.I8520CN.bluefire.compare.clm3_6_58+datm8
+PASS SMS_RLA.f45_g37.I.bluefire
+PASS SMS_RLA.f45_g37.I.bluefire.compare_hist.clm3_6_58+datm8
+PASS SMS_RLA.f45_g37.I.bluefire.compare.clm3_6_58+datm8
+PASS SMS_RLB.f45_g37.I.bluefire
+PASS SMS_RLB.f45_g37.I.bluefire.compare_hist.clm3_6_58+datm8
+PASS SMS_RLB.f45_g37.I.bluefire.compare.clm3_6_58+datm8
+PASS SMS_ROA.f45_g37.I.bluefire
+PASS SMS_ROA.f45_g37.I.bluefire.compare_hist.clm3_6_58+datm8
+PASS SMS_ROA.f45_g37.I.bluefire.compare.clm3_6_58+datm8
+PASS ERS_D.f45_g37.I.bluefire
+PASS ERS_D.f45_g37.I.bluefire.compare_hist.clm3_6_58+datm8
+PASS ERS_D.f45_g37.I.bluefire.compare.clm3_6_58+datm8
+PASS PET.f45_g37.I1850.bluefire.cpl
+PASS PET.f45_g37.I1850.bluefire.atm
+PASS PET.f45_g37.I1850.bluefire.lnd
+PASS PET.f45_g37.I1850.bluefire.ice
+PASS PET.f45_g37.I1850.bluefire.ocn
+PASS PET.f45_g37.I1850.bluefire.glc
+PASS PET.f45_g37.I1850.bluefire.compare.clm3_6_58+datm8
+PASS ERS.f19_g16.I1850.bluefire
+PASS ERS.f19_g16.I1850.bluefire.compare_hist.clm3_6_58+datm8
+PASS ERS.f19_g16.I1850.bluefire.compare.clm3_6_58+datm8
+PASS ERB.f09_g16.I_1948-2004.bluefire
+PASS ERB.f09_g16.I_1948-2004.bluefire.compare.clm3_6_58+datm8
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+BFAIL ERB.f09_g16.I1850SPINUPCN.bluefire.compare.clm3_6_58+datm8
+PASS ERH_D.f09_g16.I1850CN.bluefire
+PASS ERH_D.f09_g16.I1850CN.bluefire.compare.clm3_6_58+datm8
+PASS PET.f19_g16.I8520CN.bluefire.cpl
+PASS PET.f19_g16.I8520CN.bluefire.atm
+ jaguar: All PASS up to 021 smJ05
+ jaguar interactive testing: All FAIL except...
+001 smA74 TSM.sh _nrsc_ds clm_std^nl_urb 20030101:NONE:1800 1x1_brazil navy -10 arb_ic ..........PASS
+002 erA74 TER.sh _nrsc_ds clm_std^nl_urb 20030101:NONE:1800 1x1_brazil navy -5+-5 arb_ic ........PASS
+003 brA74 TBR.sh _nrsc_ds clm_std^nl_urb_br 20030101:NONE:1800 1x1_brazil navy -5+-5 arb_ic .....PASS
+004 blA74 TBL.sh _nrsc_ds clm_std^nl_urb 20030101:NONE:1800 1x1_brazil navy -10 arb_ic ..........PASS
+005 smAK4 TSM.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -10 cold ............PASS
+008 blAK4 TBL.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -10 cold ............PASS
+ dublin/lf95: None pass because of bug 1092
+ dublin/pgi: All PASS except...
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+012 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+030 sm952 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dh ext_ccsm_seq_cam 48 .......................FAIL! rc= 4
+ dublin/ifort interactive: All PASS up to...
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+020 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_58+datm8 (ccsm4_0_beta38)
+
+Changes answers relative to baseline: Identical without fpftdyn files
+ with and without finidat files. But, can be
+ different to roundoff or more for transient cases.
+
+===============================================================
+===============================================================
+Tag name: clm3_6_61
+Originator(s): erik (erik)
+Date: Thu Jan 7 00:55:20 MST 2010
+One-line Summary: Comment out endrun on finidat and fsurdat weights being incomptable, and go back to using finidat weights
+
+Purpose of changes:
+
+Most of our finidat files have weights incompatible with our new fsurdat files. Hence, we went back to allowing
+the weights to be different and to using the finidat weights so that answers would be the same as before.
+Also hardwire the logfile for datm and clm so that can run testsuite on jaguar. Also add in cppdef required
+for breeze.
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1098 (Use finidat weights instead of weights from fpftdyn file)
+ 1102 (OpenMP problem with pftdyn mode)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts
+ scripts to scripts4_100107
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/test_driver.sh -------------------- Add -cppdefs '-DFORTRANUNDERSCORE' for breeze.
+ M models/lnd/clm/bld/build-namelist ---------------------------- For standalone testing hardwire clm and
+ datm output log files
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml - Add comment remove logfile
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 ---------- Put implicit none in right place, comment
+ out abort if weights too different, and use finidat weights instead of fsurdat weights. Hence
+ this version is identical to clm3_6_58, other than the use of datm8 (which is roundoff different).
+
+Summary of testing:
+
+ bluefire:
+022 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+023 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+024 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+025 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+026 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+051 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 4
+052 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 4
+053 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 4
+054 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 4
+ bluefire/CCSM testing:
+PEND SMS_RLA.f45_f45.I.bluefire.200614
+PEND SMS_RLB.f45_f45.I.bluefire.200614
+PASS SMS_ROA.f45_f45.I.bluefire
+PASS ERS_D.f45_g37.I.bluefire
+PASS PET.f45_g37.I1850.bluefire.cpl
+PASS PET.f45_g37.I1850.bluefire.atm
+PASS PET.f45_g37.I1850.bluefire.lnd
+PASS PET.f45_g37.I1850.bluefire.ice
+PASS PET.f45_g37.I1850.bluefire.ocn
+PASS PET.f45_g37.I1850.bluefire.glc
+PASS ERS.f19_g16.I1850.bluefire
+PASS ERB.f09_g16.I_1948-2004.bluefire
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+PASS ERH_D.f10_f10.I1850CN.bluefire
+PASS PET.f10_f10.I8520CN.bluefire.cpl
+PASS PET.f10_f10.I8520CN.bluefire.atm
+PASS PET.f10_f10.I8520CN.bluefire.lnd
+PASS PET.f10_f10.I8520CN.bluefire.ice
+PASS PET.f10_f10.I8520CN.bluefire.ocn
+PASS PET.f10_f10.I8520CN.bluefire.glc
+ jaguar interactive testing:
+004 blA74 TBL.sh _nrsc_ds clm_std^nl_urb 20030101:NONE:1800 1x1_brazil navy -10 arb_ic ..........FAIL! rc= 5
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+008 blAK4 TBL.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -10 cold ............FAIL! rc= 5
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+012 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+013 smJ74 TSM.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -1100 arb_ic FAIL! rc= 8
+014 erJ74 TER.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -10+-10 arb_ic FAIL! rc= 5
+015 brJ74 TBR.sh 4p_nrcasasc_ds clm_std^nl_urb_br 10001230:NONE:3600 1x1_tropicAtl test -3+-3 arb_icFAIL! rc= 5
+016 blJ74 TBL.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic FAIL! rc= 4
+ dublin/lf95: All Fail due to bug 1092
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_58 (but with datm8 rather than datm7)
+
+Changes answers relative to baseline: No bit-for-bit except for transient cases which are different
+
+===============================================================
+===============================================================
+Tag name: clm3_6_60
+Originator(s): erik (erik)
+Date: Tue Jan 5 23:59:43 MST 2010
+One-line Summary: Fix clm template
+
+Purpose of changes:
+
+Fix the broken clm template. Update externals for very latest scripts tag.
+
+Bugs fixed (include bugzilla ID): Fix clm template which was broken
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1102 (OpenMP problem with pftdyn mode)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): Update scripts
+
+ scripts to scripts4_100105b
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/test/system/test_driver.sh -- update bl to ccsm4_0_beta38
+M models/lnd/clm/bld/clm.cpl7.template ------- fix so can work
+
+Summary of testing:
+
+ bluefire/CCSM testing:
+PASS SMS_RLA.f45_f45.I.bluefire
+PASS SMS_RLB.f45_f45.I.bluefire
+PASS SMS_ROA.f45_f45.I.bluefire
+FAIL ERS.f19_g16.I_1850.bluefire
+FAIL ERS.f19_g16.I_1850-2000.bluefire
+PASS ERB.f09_g16.I_1948-2004.bluefire
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire
+PASS ERH_D.f10_f10.I_1850_CN.bluefire
+PASS PET.f10_f10.I8520CN.bluefire.cpl
+PASS PET.f10_f10.I8520CN.bluefire.atm
+PASS PET.f10_f10.I8520CN.bluefire.lnd
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_59
+
+Changes answers relative to baseline: no bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_6_59
+Originator(s): erik (erik)
+Date: Tue Jan 5 17:44:48 MST 2010
+One-line Summary: Update to datm8, fix so wts used are from fsurdat file NOT finidat file
+
+Purpose of changes:
+
+Changes needed for beta34 ESMF upgrade. Use new datm8 model which is more flexible and
+has new options as well as parallel IO. Add in 4x5_gx3v7 frac file. Remove use for
+ESMF_mod. Abort if finidat weights are significantly different from surfdata file
+weights. Change name of driver and initializeMod to have a clm_ prefix. Convert UG
+outline from html to DocBook. Make changes to code documentation for high level
+subroutines. Remove documentation of namelist items in controlMod and have it point
+to the documentation in the xml namelist file. Fix "called from" in code documentation
+and remove a lot of the concurrent directives. New files from Tony for esmf interface.
+Alpha release testing will start with this version.
+
+Bugs fixed (include bugzilla ID):
+ 1084 (don't use only for ESMF_Mod)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ 1088 (change name of driver module)
+ 1093 (namelist tweaks) -- partial
+
+Known bugs (include bugzilla ID):
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1092 (Problems running in debug mode on dublin with datm8)
+ 1102 (OpenMP problem with pftdyn mode)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system:
+ Let CCSM build determine FORTRAN mangle definitions
+ Directory structure changes slightly with new ESMF interfaces and new datm8
+
+Describe any changes made to the namelist:
+ Make default for hist_crtinic NONE so clm.i files are NOT made by default.
+ Change names of options to build-namelist that only are for clm stand-alone testing.
+ Add a drv_ or datm_ prefix, and separate how these options are displayed in the help
+ Also add an option to several commands for "list" so that you can list the variables
+ for clm_demand, for resolution, and for use-cases.
+ Also update build-namelist to work with the new datm8
+
+List any changes to the defaults for the boundary datasets: Add in 4x5_gx3v7 dataset
+
+Describe any substantial timing or memory changes: datm8 is approx. 30% faster
+ datm8 also allows you to enable parallel I/O
+
+Code reviewed by: self, oleson, slevis, dlawren review of weights change
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, datm, csm_share, drv, pio
+
++scripts to scripts4_100103b
++drv to vocemis-drydep12_drvseq3_1_11
++datm to datm8_091218
++csm_share to vocemis-drydep13_share3_091217
++pio to pio60_prod
+
+List all files eliminated:
+
+ D models/lnd/clm/test/system/config_files/_mexsc_ds --- Rename with nr in name
+ D models/lnd/clm/test/system/config_files/_vansc_ds --- Rename with nr in name
+ D models/lnd/clm/src/main/driver.F90 ------------------ Rename with clm_ prefix
+ D models/lnd/clm/src/main/initializeMod.F90 ----------- Rename with clm_ prefix
+ D models/lnd/clm/src/biogeophys/DriverInitMod.F90 ----- Rename to clm_driverInitMod
+
+List all files added and what they do:
+
+ A models/lnd/clm/doc/UsersGuide/index.xml ---------------- Users Guide Outline in docbook format
+>>>>>>>>>>> Version with "nr" so that RTM is turned off for non-global tests
+ A models/lnd/clm/test/system/config_files/_nrsc_s
+ A models/lnd/clm/test/system/config_files/17p_nrsc_ds
+ A models/lnd/clm/test/system/config_files/4p_nrcasasc_ds
+ A models/lnd/clm/test/system/config_files/_nrsc_ds
+ A models/lnd/clm/test/system/config_files/_nrmexsc_ds
+ A models/lnd/clm/test/system/config_files/_nrvansc_ds
+ A models/lnd/clm/test/system/config_files/17p_nrcnnsc_ds
+>>>>>>>>>>> New files from Tony for ESMF interfaces
+ A models/lnd/clm/src/main/cpl_esmf/lnd_comp_esmf.F90
+ A models/lnd/clm/src/main/cpl_esmf/lnd_comp_mct.F90
+
+ A models/lnd/clm/src/main/clm_initializeMod.F90 ---------- Rename with clm_ prefix
+ Also change so that dyn pft is always called before reading in the restart
+ file.
+ A models/lnd/clm/src/main/clm_driver.F90 ----------------- Rename with clm_ prefix
+ A models/lnd/clm/src/biogeophys/clm_driverInitMod.F90 ---- Rename from driverInitMod
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/config_files/README -- Note about nr means no-RTM
+ M models/lnd/clm/test/system/test_driver.sh ------- Some tweaks for dublin/intrepid
+ M models/lnd/clm/test/system/mknamelist ----------- Changes for datm namelists, and
+ change for new options names for
+ build-namelist
+ M models/lnd/clm/test/system/nl_files/clm_per ----- Change case and source to
+ drv_case and datm_source
+ M models/lnd/clm/test/system/nl_files/clm_std ----- Change case and source to
+ drv_case and datm_source
+ M models/lnd/clm/test/system/nl_files/clm_ndepdyn - Change case and source to
+ drv_case and datm_source
+ M models/lnd/clm/test/system/nl_files/clm_pftdyn -- Change case and source to
+ drv_case and datm_source
+ M models/lnd/clm/test/system/nl_files/clm_per0 ---- Change case and source to
+ drv_case and datm_source
+ M models/lnd/clm/test/system/nl_files/clm_spin ---- Change case and source to
+ drv_case and datm_source
+ M models/lnd/clm/test/system/nl_files/clm_urb1pt -- Change case and source to
+ drv_case and datm_source
+ M models/lnd/clm/test/system/input_tests_master --- Use nr in test names
+ M models/lnd/clm/test/system/CLM_runcmnd.sh ------- Change name for laptop
+ M models/lnd/clm/test/system/TSM.sh --------------- Change datm restart files
+
+ M models/lnd/clm/bld/clm.cpl7.template ------------ Change template to not put RTM
+ time-step in when rtm is off
+ M models/lnd/clm/bld/configure -------------------- Change to new datm dir structure
+ M models/lnd/clm/bld/listDefaultNamelist.pl ------- Change name of datm namelist
+ M models/lnd/clm/bld/build-namelist --------------- New list options, update for
+ new datm8 namelist.
+
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml ---- Document pio options
+ new datm8 namelist items, CASA nameist items, and fget_archdev
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -- New 4x5_gx3v7 frac file
+ set default of hist_crtinic to NONE
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml - New datm8 defaults
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xsl ---- Add section for
+ CASA nl items, and a commented out section for the pio items
+
+ M models/lnd/clm/src/biogeochem/CNCStateUpdate2Mod.F90 ---- Fix called from, rm concurrnt directives
+ M models/lnd/clm/src/biogeochem/CASAPhenologyMod.F90 ------ Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNGapMortalityMod.F90 ----- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNC13StateUpdate2Mod.F90 -- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNNStateUpdate1Mod.F90 ---- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/DGVMEcosystemDynMod.F90 --- Fix called from
+ M models/lnd/clm/src/biogeochem/CNBalanceCheckMod.F90 ----- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNNStateUpdate3Mod.F90 ---- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CASAMod.F90 --------------- Doc routine as private, fix called from, rm con dirct.
+ M models/lnd/clm/src/biogeochem/CNPrecisionControlMod.F90 - Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNSummaryMod.F90 ---------- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNCStateUpdate1Mod.F90 ---- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNCStateUpdate3Mod.F90 ---- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNC13StateUpdate1Mod.F90 -- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/DGVMMod.F90 --------------- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNC13StateUpdate3Mod.F90 -- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNAnnualUpdateMod.F90 ----- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNNStateUpdate2Mod.F90 ---- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/C13SummaryMod.F90 --------- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNNDynamicsMod.F90 -------- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNC13FluxMod.F90 ---------- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNSetValueMod.F90 --------- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90 -- Fix called from, rm con dirt.
+
+ M models/lnd/clm/src/main/clm_comp.F90 ------------- Add clm_ prefix to calls
+ M models/lnd/clm/src/main/pftdynMod.F90 ------------ Fix called from, rm con dirt.
+ M models/lnd/clm/src/main/histFileMod.F90 ---------- Add more documentation, rm con direct.
+ M models/lnd/clm/src/main/clm_atmlnd.F90 ----------- Change documentation of units for nee
+ M models/lnd/clm/src/main/restFileMod.F90 ---------- Change called from documentation
+ M models/lnd/clm/src/main/controlMod.F90 ----------- Remove namelist items documentation
+ point to xml files for documenation
+ Work with code documentation
+ Get rid of notes about aerdep
+ files going away
+ M models/lnd/clm/src/main/clm_time_manager.F90 ----- Fix called from doc
+ M models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90 - Add documentation
+ M models/lnd/clm/src/main/domainMod.F90 ------------ Clarify which driver in doc
+ M models/lnd/clm/src/main/clmtype.F90 -------------- Work on code documentation
+ M models/lnd/clm/src/main/histFldsMod.F90 ---------- Work on code documentation and formatting
+
+ M models/lnd/clm/src/riverroute/RtmMod.F90 - Fix called from
+
+ M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90 ------ Remove KO and fix called from in code doc, rm con dirct.
+ M models/lnd/clm/src/biogeophys/Biogeophysics1Mod.F90 ---- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeophys/Biogeophysics2Mod.F90 ---- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90 ----- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeophys/Hydrology1Mod.F90 -------- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 -------- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeophys/BiogeophysicsLakeMod.F90 - Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeophys/UrbanMod.F90 ------------- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeophys/HydrologyLakeMod.F90 ----- Fix called from, rm con dirt.
+ M models/lnd/clm/src/biogeophys/SNICARMod.F90 ------------ Fix called from.
+
+ M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 - Check that weights when read
+ in agree reasonably closely with fsurdat weights
+
+Summary of testing:
+
+ bluefire:
+004 blA91 TBL.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 7
+007 blD91 TBL.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v5 144 cold .................FAIL! rc= 7
+012 blE91 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 arb_ic ............FAIL! rc= 7
+017 blF92 TBL.sh 17p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 cold .............FAIL! rc= 7
+021 blEH1 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic FAIL! rc= 7
+022 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+023 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+024 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+025 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+026 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+028 erC45 TER.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -3+-7 arb_ic .......FAIL! rc= 13
+029 brC45 TBR.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -5+-5 arb_ic .......FAIL! rc= 11
+030 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic .........FAIL! rc= 7
+034 blC61 TBL.sh _sc_dh clm_std^nl_urb 20021001:NONE:1800 1.9x2.5 gx1v6 48 cold .................FAIL! rc= 7
+038 blH52 TBL.sh 17p_cnnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 48 cold .........FAIL! rc= 7
+043 blLI1 TBL.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 7
+049 blJ61 TBL.sh 4p_casasc_dh clm_std^nl_urb 20021230:NONE:1800 1.9x2.5 gx1v6 48 cold ...........FAIL! rc= 7
+050 smJ05 TSM.sh 4p_casasc_h clm_std^nl_lfiles 19800101:NONE:1800 0.47x0.63 gx1v6 48 arb_ic .....FAIL! rc= 10
+051 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 4
+052 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 3
+053 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 3
+054 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 3
+ bluefire interactive testing:
+004 blA74 TBL.sh _nrsc_ds clm_std^nl_urb 20030101:NONE:1800 1x1_brazil navy -10 arb_ic ..........FAIL! rc= 5
+009 blCA4 TBL.sh _nrsc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ........FAIL! rc= 5
+013 blNB4 TBL.sh _nrmexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 5
+017 blJ74 TBL.sh 4p_nrcasasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic FAIL! rc= 5
+019 blCA8 TBL.sh _nrsc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic .FAIL! rc= 5
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+023 blAK4 TBL.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -10 cold ............FAIL! rc= 7
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ......FAIL! rc= 6
+028 blL78 TBL.sh _nrsc_s clm_std^nl_urb 20021231:NONE:1800 1x1_brazil navy -10 arb_ic ...........FAIL! rc= 5
+032 blF93 TBL.sh 17p_vodsrsc_do clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 cold .............FAIL! rc= 7
+033 smL83 TSM.sh _nrsc_do clm_std^nl_urb 20020115:NONE:3600 5x5_amazon navy -10 arb_ic ..........FAIL! rc= 4
+034 erL83 TER.sh _nrsc_do clm_std^nl_urb 20020115:NONE:3600 5x5_amazon navy -5+-5 arb_ic ........FAIL! rc= 5
+035 brL83 TBR.sh _nrsc_do clm_std^nl_urb_br 20020115:NONE:3600 5x5_amazon navy -10+-10 arb_ic ...FAIL! rc= 5
+036 blL83 TBL.sh _nrsc_do clm_std^nl_urb 20020115:NONE:3600 5x5_amazon navy -10 arb_ic ..........FAIL! rc= 4
+041 bl744 TBLtools.sh mksurfdata tools__s namelist ..............................................FAIL! rc= 7
+043 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 7
+045 bl754 TBLtools.sh mksurfdata tools__s globalirrig ...........................................FAIL! rc= 6
+050 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ dublin/lf95: No testing as all tests fail due to bug 1092
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_58
+
+Changes answers relative to baseline: datm8 causes answers to be roundoff different
+ Change to use of fsurdat instead of finidat weights means answers may change
+ for non-coldstart cases.
+
+===============================================================
+===============================================================
+Tag name: clm3_6_58
+Originator(s): erik (erik)
+Date: Tue Dec 8 12:56:47 MST 2009
+One-line Summary: Fix rpointer, correct units for export of nee, start adding testing for intrepid
+
+Purpose of changes:
+ Only update the rpointer file when restart files are written NOT when clm.i initial
+ files are written. This was causing problems to restart the model when it was
+ aborting before it completed it's period to run for.
+ Correct the units for the export of NEE from kg C to kg CO2 (kgCO2/m2/s)
+ Remove some concurent directives in the code and the unicosmp target_os in
+ configure as we no longer have Phoenix.
+ Add bgp target_os to configure, only set Fortran mangling if NOT using the
+ CCSM build in configure.
+ Add CN atm spinup data source as option to configure and to test_driver.sh.
+ Update version of external to test with to ccsm4_0_beta35
+ Add ability to test on intrepid to test_driver.sh.
+
+Bugs fixed (include bugzilla ID):
+ 1079 (rpointer file updated with clm.i files)
+ 1082 (Add bgp, don't do Fortran mangling for CCSM build)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+
+Known bugs (include bugzilla ID): 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1070 (pftdyn datasets bad for f19, 2.5x3.33)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ 1102 (OpenMP problem with pftdyn mode)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Have configure write out unresolved filepaths
+ when called from cpl7.template
+
+Describe any changes made to the namelist: drv namelist updated
+
+List any changes to the defaults for the boundary datasets:
+ Add 2.5x3.33 resolution
+ Remove 2x2.5 res files
+ New f09, f19 finidat files
+ New f09, f19, f10, 1x1_tropicAtl fsurdat/fpftdyn files (only f19 change answers)
+ New f05, 5x5_amazon, 1x1_brazil 2000 fsurdat file (b4b)
+ New f03, f09, f19, f03, f10 ndepdyn files (changes 1851-1924, 1996-2004)
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): to ccsm4_0_beta33 versions
+
+ scripts to scripts4_091027b
+ drv to vocemis-drydep12_drvseq3_0_37
+ datm7 to datm7_090928
+ socn/sice/sglc to stubs1_2_02
+ csm_share to share3_091013
+ timing to timing_090929
+ mct to MCT2_6_0_090926
+ pio to pio57_prod
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M Quickstart.userdatasets --- Update notes about setting user-defined datasets
+>>>>>>>>>>>>>>> Update jaguar, kraken, and bluefire env vars to latest scripts
+>>>>>>>>>>>>>>> Remove blAK8 test as ocean-only and no clm files to compare
+ M models/lnd/clm/test/system/README.testnames ---------------- Add 2.5x3.33 test resolution
+ M models/lnd/clm/test/system/tests_driver.sh ----------------- Update env vars
+ to whats in scripts4_091015 for jaguar, kraken and bluefire
+ M models/lnd/clm/test/system/tests_pretag_bluefire_nompi ----- Rm blAK8
+ M models/lnd/clm/test/system/input_tests_master -------------- Rm blAK8, add 1x1_tropicAtl@1850,1850-2000, and 2.5x3.33 tests
+ M models/lnd/clm/test/system/tests_posttag_nompi_regression -- Rm blAK8
+
+>>>>>>>>>>>>>>> Allow configure to write out unresolved Filepath, make TopCCSMBld
+>>>>>>>>>>>>>>> Makefile closer to CPL7 version
+ M models/lnd/clm/bld/configure -------------------------- Add clm_root option
+ add ability to set comp_intf to cpl_$COMP, allow ability to check for
+ directories existance resolving env vars that are set. Create a subroutine
+ is_valid_directory to check for directories instead of "-d".
+ M models/lnd/clm/bld/config_files/config_definition.xml - Allow cpl_$COMP rm lapacklibdir
+ M models/lnd/clm/bld/clm.cpl7.template -- Set COMP based on COMP_INTERFACE,
+ add clm_root to configure, don't resolve CODEROOT and CASEROOT on output
+ M models/lnd/clm/bld/config_files/TopCCSMBldMakefile.in - Changes to make closer to scripts4_091015 version.
+>>>>>>>>>>>>>>> Change drv namelist names,
+ M models/lnd/clm/bld/build-namelist ----- Change drv namelist names: cpl_io_numtasks/cpl_io_typename
+>>>>>>>>>>>>>>> Change drv namelist names, add 2.5x3.33 resolution
+>>>>>>>>>>>>>>> Remove 2x2.5 res files
+>>>>>>>>>>>>>>> New f09, f19 finidat files
+>>>>>>>>>>>>>>> New f09, f19, f10, 1x1_tropicAtl fsurdat/fpftdyn files (only f19 change answers)
+>>>>>>>>>>>>>>> New f05, 5x5_amazon, 1x1_brazil 2000 fsurdat file (b4b)
+>>>>>>>>>>>>>>> New f03, f09, f19, f03, f10 ndepdyn files (changes 1851-1924, 1996-2004)
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml -----Change drv namelist
+ names: cpl_io_numtasks/cpl_io_typename, add 2.5x3.33 resolution
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml -- Add 2.5x3.33 res domainfile
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml --- New f19, f09 finidat files
+ new f09, f19, f10 fsurdat/fpftdyn files
+ new 2.5x3.33: fatmgrid, flndtopo, fatmtopo, fatmlndfrc, faerdep, fndepdat files
+ new f05, 5x5_amazon, 1x1_brazil 2000 fsurdat files
+ new f03, f09, f19, f03, f10 ndepdyn files
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml --- Change drv namelist
+ names: cpl_io_numtasks/cpl_io_typename
+
+ M models/lnd/clm/src/main/histFldsMod.F90 -- GC_HEAT2, GC_LIQ2, GC_ICE2 NOT on by default
+>>>>>>>>>>>>>>> Remove dips in 20th Century transient Nitrogen deposition
+>>>>>>>>>>>>>>> for 1855 and 2000.
+ M models/lnd/clm/tools/ncl_scripts/ndeplintInterp.ncl --- Remove 1855-1915 and 2000
+ from list of input files. The 1850 dataset had too high of CO2 and hence
+ to high Nitrogen deposition, which gives a unrealistic dip near the
+ beginning.
+
+>>>>>>>>>>>>>>> Documentation changes of ProTex comments to fit the ProTex standard
+M tools/mksurfdata/mkglcmec.F90
+M tools/mksurfdata/mkfmax.F90
+M tools/mksurfdata/ncdio.F90
+M tools/mksurfdata/mklaiMod.F90
+M tools/mksurfdata/mkglacier.F90
+M tools/mksurfdata/mkharvestMod.F90
+M tools/mksurfdata/creategridMod.F90
+M tools/mksurfdata/mkorganic.F90
+M tools/mksurfdata/mklanwat.F90
+M tools/mksurfdata/mksoicol.F90
+M tools/mksurfdata/mkrank.F90
+M tools/mksurfdata/mkelev.F90
+M tools/mksurfdata/mkurban.F90
+M tools/mksurfdata/mkurbanparMod.F90
+M tools/mksurfdata/mksoitex.F90
+M tools/mksurfdata/mkirrig.F90
+M tools/mksurfdata/domainMod.F90
+M tools/mksurfdata/areaMod.F90
+M tools/mksurfdata/mksrfdat.F90
+M tools/mksurfdata/mkpftMod.F90
+M tools/mkgriddata/mkgriddata.F90
+M tools/mkgriddata/creategridMod.F90
+M tools/mkdatadomain/create_domain.F90
+M src/biogeochem/DGVMLightMod.F90
+M src/biogeochem/DGVMReproductionMod.F90
+M src/biogeochem/DGVMAllocationMod.F90
+M src/biogeochem/DGVMEcosystemDynMod.F90
+M src/biogeochem/CASAMod.F90
+M src/biogeochem/DGVMKillMod.F90
+M src/biogeochem/DUSTMod.F90
+M src/biogeochem/DGVMEstablishmentMod.F90
+M src/biogeochem/STATICEcosysDynMod.F90
+M src/biogeochem/DGVMRestMod.F90
+M src/biogeochem/DGVMMod.F90
+M src/biogeochem/CNrestMod.F90
+M src/biogeochem/VOCEmissionMod.F90
+M src/biogeochem/DGVMMortalityMod.F90
+M src/biogeochem/DGVMTurnoverMod.F90
+M src/biogeochem/DGVMFireMod.F90
+M src/biogeochem/CNEcosystemDynMod.F90
+M src/main/inicFileMod.F90
+M src/main/organicFileMod.F90
+M src/main/spmdGathScatMod.F90
+M src/main/clm_varpar.F90
+M src/main/CNiniTimeVar.F90
+M src/main/dynlandMod.F90
+M src/main/accumulMod.F90
+M src/main/clm_comp.F90
+M src/main/driver.F90
+M src/main/decompInitMod.F90
+M src/main/ncdio.F90
+M src/main/getdatetime.F90
+M src/main/subgridRestMod.F90
+M src/main/accFldsMod.F90
+M src/main/subgridMod.F90
+M src/main/fileutils.F90
+M src/main/aerdepMod.F90
+M src/main/initializeMod.F90
+M src/main/pftdynMod.F90
+M src/main/iniTimeConst.F90
+M src/main/histFileMod.F90
+M src/main/pft2colMod.F90
+M src/main/clm_atmlnd.F90
+M src/main/restFileMod.F90
+M src/main/controlMod.F90
+M src/main/initSurfAlbMod.F90
+M src/main/clm_time_manager.F90
+M src/main/cpl_mct/lnd_comp_mct.F90
+M src/main/ndepFileMod.F90
+M src/main/subgridAveMod.F90
+M src/main/initGridCellsMod.F90
+M src/main/CASAiniTimeVarMod.F90
+M src/main/CNiniSpecial.F90
+M src/main/pftvarcon.F90
+M src/main/snowdp2lev.F90
+M src/main/spmdMod.F90
+M src/main/surfrdMod.F90
+M src/main/domainMod.F90
+M src/main/decompMod.F90
+M src/main/areaMod.F90
+M src/main/iobinary.F90
+M src/main/do_close_dispose.F90
+M src/main/mkarbinitMod.F90
+M src/riverroute/RtmMod.F90
+M src/riverroute/RunoffMod.F90
+M src/biogeophys/BalanceCheckMod.F90
+M src/biogeophys/SurfaceRadiationMod.F90
+M src/biogeophys/SoilTemperatureMod.F90
+M src/biogeophys/SnowHydrologyMod.F90
+M src/biogeophys/UrbanInputMod.F90
+M src/biogeophys/Biogeophysics1Mod.F90
+M src/biogeophys/Biogeophysics2Mod.F90
+M src/biogeophys/FracWetMod.F90
+M src/biogeophys/UrbanInitMod.F90
+M src/biogeophys/FrictionVelocityMod.F90
+M src/biogeophys/TridiagonalMod.F90
+M src/biogeophys/SurfaceAlbedoMod.F90
+M src/biogeophys/Hydrology1Mod.F90
+M src/biogeophys/Hydrology2Mod.F90
+M src/biogeophys/BiogeophysicsLakeMod.F90
+M src/biogeophys/BiogeophysRestMod.F90
+M src/biogeophys/SoilHydrologyMod.F90
+M src/biogeophys/UrbanMod.F90
+M src/biogeophys/QSatMod.F90
+M src/biogeophys/HydrologyLakeMod.F90
+M src/biogeophys/SNICARMod.F90
+M src/biogeophys/DriverInitMod.F90
+M src/biogeophys/BareGroundFluxesMod.F90
+M src/biogeophys/CanopyFluxesMod.F90
+
+Summary of testing:
+
+ bluefire:
+022 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic
+........FAIL! rc= 10
+023 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72
+arb_ic ......FAIL! rc= 5
+024 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72
+arb_ic ......FAIL! rc= 5
+025 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic
+........FAIL! rc= 4
+026 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic
+.........FAIL! rc= 10
+031 smC61 TSM.sh _sc_dh clm_std^nl_urb 20021001:NONE:1800 1.9x2.5 gx1v6 -6 cold
+.................FAIL! rc= 10
+032 erC61 TER.sh _sc_dh clm_std^nl_urb 20021001:NONE:1800 1.9x2.5 gx1v6 10+38 cold
+..............FAIL! rc= 5
+033 brC61 TBR.sh _sc_dh clm_std^nl_urb_br 20021001:NONE:1800 1.9x2.5 gx1v6 -3+-3 cold
+...........FAIL! rc= 5
+034 blC61 TBL.sh _sc_dh clm_std^nl_urb 20021001:NONE:1800 1.9x2.5 gx1v6 48 cold
+.................FAIL! rc= 4
+035 smH52 TSM.sh 17p_cnnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 96 cold
+.........FAIL! rc= 8
+036 erH52 TER.sh 17p_cnnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 10+38
+cold ......FAIL! rc= 5
+037 brH52 TBR.sh 17p_cnnsc_dm clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS@2000 72+72
+cold ...FAIL! rc= 5
+038 blH52 TBL.sh 17p_cnnsc_dm clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 48 cold
+.........FAIL! rc= 4
+039 smI59 TSMcnspinup.sh 17p_cnadspinupsc_dm 17p_cnexitspinupsc_dm 17p_cnsc_dm clm_std
+20020115:NONEFAIL! rc= 5
+040 smLI1 TSM.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic
+........................FAIL! rc= 10
+041 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic
+......................FAIL! rc= 5
+ bluefire interactive testing: All PASS up to...
+020 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic........FAIL! rc= 10
+ bluefire/CCSM testing:
+PASS SMS_RLA.f45_f45.I.bluefire
+PASS SMS_RLB.f45_f45.I.bluefire
+PASS SMS_ROA.f45_f45.I.bluefire
+PASS ERS_D.f45_g35.I_2000.bluefire
+PASS PET.f45_g35.I_1850.bluefire.cpl
+PASS PET.f45_g35.I_1850.bluefire.atm
+PASS PET.f45_g35.I_1850.bluefire.lnd
+PASS PET.f45_g35.I_1850.bluefire.ice
+PASS PET.f45_g35.I_1850.bluefire.ocn
+PASS PET.f45_g35.I_1850.bluefire.glc
+PASS ERS.f19_g16.I_1850.bluefire
+FAIL ERS.f19_g16.I_1850-2000.bluefire <-- script fails, but cpl log same
+PASS ERB.f09_g16.I_1948_2004.bluefire
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire <-- recv lnd Sl_t different
+PASS ERH_D.f10_f10.I_1850_CN.bluefire
+PASS PET.f10_f10.ICN8520.bluefire.cpl
+PASS PET.f10_f10.ICN8520.bluefire.atm
+PASS PET.f10_f10.ICN8520.bluefire.lnd
+PASS PET.f10_f10.ICN8520.bluefire.ice
+PASS PET.f10_f10.ICN8520.bluefire.ocn
+PASS PET.f10_f10.ICN8520.bluefire.glc
+ jaguar: All PASS
+ jaguar interactive testing: All PASS except
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+007 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 6
+009 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+011 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+013 smJ74 TSM.sh 4p_casasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -1100 arb_ic .FAIL! rc= 8
+014 erJ74 TER.sh 4p_casasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -10+-10 arb_ic FAIL! rc= 5
+015 brJ74 TBR.sh 4p_casasc_ds clm_std^nl_urb_br 10001230:NONE:3600 1x1_tropicAtl test -3+-3 arb_ic FAIL! rc= 5
+ dublin/lf95 interactive testing:
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+007 brAL4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 6
+026 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 6
+027 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+030 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ dublin/lf95:
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+012 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+030 sm952 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dh ext_ccsm_seq_cam 48 .......................FAIL! rc= 8
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to..
+019 blR53 TBL.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@1850 48 cold ......FAIL! rc= 7
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_53
+
+Changes answers relative to baseline: Only f19 due to new fsurdat file (off by roundoff)
+ and startup for f09 and f19 with CN change due
+ to new finidat files. 20th Century simulations
+ with CN change because the ndep data set is different
+ from 1851-1924, and 2001-2004.
+
+===============================================================
+===============================================================
+Tag name: clm3_6_53
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Tue Sep 22 16:15:39 MDT 2009
+One-line Summary: Fix so that T31_gx3v7 file is actually included
+
+Purpose of changes:
+
+Add new optics file from Mark Flanner. Fix so T31_gx3v7 file included. Change testing
+for 48x96 to gx3v7. Update datm so that pt1_pt1 res works. Fix clm template so
+that RTM is turned off for pt1_pt1 resolution.
+
+Bugs fixed (include bugzilla ID):
+ 1042 (Bug with domain directory name in datm for pt1_pt1 resolution)
+ 789 -- change so that RTM is off should make single-point mode faster
+
+Known bugs (include bugzilla ID): 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ 1102 (OpenMP problem with pftdyn mode)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: In template turn off RTM if grid=pt1
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: Add in T31_gx3v7 frac file, update snicar optics file
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): datm7
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M Quickstart.userdatasets
+M models/lnd/clm/test/system/input_tests_master
+M models/lnd/clm/bld/clm.cpl7.template
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+
+Summary of testing: Limited
+
+ bluefire:
+ bluefire interactive testing:
+ bluefire/CCSM testing:
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_53
+
+Changes answers relative to baseline: None -- bit for bit
+
+===============================================================
+===============================================================
+Tag name: clm3_6_52
+Originator(s): erik (erik)
+Date: Thu Sep 17 11:07:19 MDT 2009
+One-line Summary: Add T31_gx3v7 support, remove forganic, read from fsurdat, add script to extract regional datasets, work with CN output, add more urban/rural fields
+
+Purpose of changes:
+
+ Add T31_gx3v7 files needed. Read organic fields from fsurdat file, remove forganic file.
+ Add in script to extract regional datasets. Change CN output fields list, add NBP (Net
+ Biome Production field). New Urban/Rural fields from Keith. Update bluefire compiler
+ to XLF12 (causes some restart issues listed below).
+
+ This tag includes new scripts to extract regional datasets from the global datasets
+ in order to run for a specific region of interest. The scripts are available in the
+ models/lnd/clm/tools/ncl_scripts directory, the main script is the
+ getregional_datasets.pl perl script and it has a command line interface and help with
+ the "-help" option. There's also a README file in the directory containing the scripts,
+ and more information in the Quickstart.userdatasets file at the top level.
+
+ Quickstart to use of regional extraction scripts:
+
+ # Run the script to create an area to put your files (assume CSMDATA set to standard inputdata)
+ cd scripts
+ setenv MYCSMDATA $HOME/myinputdata
+ link_dirtree $CSMDATA $MYCSMDATA
+
+ # Run the extraction for data from 52-73 North latitude, 190-220 longitude
+ # that creates 13x12 gridcell region from the f19 (1.9x2.5) global resolution over
+ # Alaska
+ cd ../models/lnd/clm/tools/ncl_scripts
+ setenv MYID 13x12pt_f19_alaskaUSA
+ getregional_datasets.pl -sw 52,190 -ne 73,220 -id $MYID -mycsmdata $MYCSMDATA
+
+ # Now create a case that uses these datasets
+ cd ../../../../../scripts
+ create_newcase -case testregional -compset I -mach bluefire -res pt1_pt1 -skip_rundb
+ cd testregional
+ $EDITOR env_conf.xml # change CLM_BLDNML_OPTS to include "-clm_usr_name $MYID" (expand $MYID)
+ $EDITOR env_mach_pes.xml # Change tasks/threads as appropriate (defaults to serial)
+ xmlchange -file env_run.xml -id DIN_LOC_ROOT_CSMDATA -val $MYCSMDATA
+ $EDITOR Tool/Templates/datm.cpl7.template.csh # Add the following line before the check on DOMAINFILE (expand $MYID)
+
+if ( $DOMAINFILE == "unset" ) set DOMAINFILE = "domain.lnd.$MYID.nc"
+
+ # Do other changes to xml files as appropriate
+ # configure as normal, then edit the datm namelist
+
+ configure -case
+
+ # Then build and run the case as normal
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID): 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1042 (Bug with domain directory name in datm for pt1_pt1 resolution)
+ 1063 (Problem in restarts for CCSM spinup data)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ 1102 (OpenMP problem with pftdyn mode)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Filepath, bluefire compiler to XLF12
+
+ Filepath for stub-components changes
+ In scripts and in test_driver.sh update compiler for bluefire to XLF12
+ (this causes the restart issue for certain cases below).
+
+Describe any changes made to the namelist: Remove forganic (read organic from fsurdat file)
+
+List any changes to the defaults for the boundary datasets: Add 48x96_gx3v7 fracdata
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, oleson(urban/rural), slevis (CN fields, new NBP field)
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm7, stub-comps, csm_share
+
+ scripts to scripts4_090916
+ drv to vocemis-drydep12_drvseq3_0_29
+ datm7 to datm7_090915
+ socn/sice/sglc to stubs1_2_01
+ csm_share to share3_090902
+
+List all files eliminated: Remove noOrganicSoilDataset use case
+
+ Remove the use case that removed the requirement for the forganic dataset.
+
+ D models/lnd/clm/bld/namelist_files/use_cases/noOrganicSoilDataset.xml
+
+List all files added and what they do:
+
+ Add stylesheet for namelist defaults files.
+
+ A models/lnd/clm/bld/namelist_files/namelist_defaults.xsl
+
+ Scripts to extract regions of interest from global grids and put them into the place
+ expected by build-namelist with the clm_usr_name option.
+
+ A models/lnd/clm/tools/ncl_scripts/getregional_datasets.pl --- Main script to extract regional datasets.
+ This one has a command line interface.
+ A models/lnd/clm/tools/ncl_scripts/getregional_datasets.ncl -- Support script to do the actual work.
+ This one works based on settings of a bunch of environment variables.
+
+List all existing files that have been modified, and describe the changes:
+
+ M Quickstart.userdatasets - Add notes about using getregional_datasets.pl
+ M Quickstart.GUIDE -------- Fix typo
+
+ M models/lnd/clm/test/system/test_driver.sh ---- Update seqccsm version to beta26
+ Also update bluefire to XLF12.
+
+ M models/lnd/clm/tools/ncl_scripts/README ----- Add note about new getregional_datasets scripts
+
+>>>>>>>>>>>>>>> Get configure working with new scripts/stub-components
+ M models/lnd/clm/bld/configure ------- Change Filepath for stub components, remove
+ write_filepath_ccsm use ccsmbld version
+
+>>>>>>>>>>>>>>> Remove forganic, add T31_gx3v7, have query NOT return user filenames
+>>>>>>>>>>>>>>> for transient files when sim_year_range=constant.
+>>>>>>>>>>>>>>> Add style sheets for namelist_defaults files.
+ M models/lnd/clm/bld/queryDefaultXML.pm -- Skip filenames set to "null"
+ M models/lnd/clm/bld/build-namelist ------ Remove forganic
+
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml ------------ Remove forganic,
+ add gx3v7
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml ------ Remove forganic,
+ add stylesheet.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml --------- Add T31_gx3v7
+ domainfile, add stylesheet.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml ---------- Add stylesheet.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ---------- Make
+ gx3v7 default for T31, remove reference to forganic, add stylesheet.
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml ---- Set transient
+ files to null for sim_year_range=constant, add stylesheet.
+
+>>>>>>>>>>>>>>> Add NBP, change which CN fields active/inactive, add new Urban/Rural
+>>>>>>>>>>>>>>> fields, remove forganic read organic soil from fsurdat
+ M models/lnd/clm/src/biogeochem/CNSummaryMod.F90 --------- Add nbp, update doc for nee, nep,
+ work with formatting
+ M models/lnd/clm/src/biogeochem/C13SummaryMod.F90 -------- Add nbp, update doc for nee, nep,
+ work with formatting
+ M models/lnd/clm/src/biogeochem/CNSetValueMod.F90 -------- Set nbp and work with formatting
+ M models/lnd/clm/src/main/organicFileMod.F90 ------------- Remove forganic use fsurdat
+ M models/lnd/clm/src/main/clmtypeInitMod.F90 ------------- New urban/rural fields and nbp,
+ work with formatting
+ M models/lnd/clm/src/main/controlMod.F90 ----------------- Remove forganic
+ M models/lnd/clm/src/main/clm_varctl.F90 ----------------- Remove forganic
+ M models/lnd/clm/src/main/clmtype.F90 -------------------- Add urban/rural (oleson) and nbp,
+ and update doc on nep, nee
+ M models/lnd/clm/src/main/histFldsMod.F90 ---------------- Add urban/rural fields (oleson),
+ Change which CN fields on/off, add NBP
+ M models/lnd/clm/src/biogeophys/SurfaceRadiationMod.F90 -- Add urban/rural (oleson)
+ M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90 --- Add urban/rural (oleson)
+ M models/lnd/clm/src/biogeophys/Biogeophysics1Mod.F90 ---- Add urban/rural (oleson)
+ M models/lnd/clm/src/biogeophys/Biogeophysics2Mod.F90 ---- Add urban/rural (oleson)
+ M models/lnd/clm/src/biogeophys/UrbanInitMod.F90 --------- Add urban/rural (oleson)
+ M models/lnd/clm/src/biogeophys/UrbanMod.F90 ------------- Add urban/rural (oleson)
+
+Summary of testing:
+
+ bluefire: All PASS except...
+022 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+023 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+024 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+025 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+026 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+051 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+052 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 6
+054 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 6
+ bluefire interactive testing: All PASS up to...
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ bluefire/CCSM testing: All PASS except for branch tests that fail due to XLF12
+PASS ERS_D.f45_g35.I_2000.bluefire
+PASS PET.f45_g35.I_1850.bluefire.cpl
+PASS PET.f45_g35.I_1850.bluefire.atm
+PASS PET.f45_g35.I_1850.bluefire.lnd
+PASS PET.f45_g35.I_1850.bluefire.ice
+PASS PET.f45_g35.I_1850.bluefire.ocn
+PASS PET.f45_g35.I_1850.bluefire.glc
+PASS ERS.f19_g16.I_1850.bluefire
+PASS ERS.f19_g16.I_1850-2000.bluefire
+FAIL ERB.f09_g16.I_1948_2004.bluefire <<<< FAIL's due to compiler upgrade to XLF12
+FAIL ERB.f09_g16.I1850SPINUPCN.bluefire <<<< FAIL's due to compiler upgrade to XLF12
+>>>>>>>>>>>>> NOTE This same problem exists in clm3_6_51 if you update the compiler to
+>>>>>>>>>>>>> XLF12.
+PASS ERH_D.f10_f10.I_1850_CN.bluefire
+PASS PET.f10_f10.ICN8520.bluefire.cpl
+PASS PET.f10_f10.ICN8520.bluefire.atm
+PASS PET.f10_f10.ICN8520.bluefire.lnd
+PASS PET.f10_f10.ICN8520.bluefire.ice
+PASS PET.f10_f10.ICN8520.bluefire.ocn
+PASS PET.f10_f10.ICN8520.bluefire.glc
+ jaguar: All PASS
+ jaguar interactive testing: All PASS up to...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ dublin/lf95 interactive testing: All PASS up to...
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+ dublin/lf95: All PASS except (up to...)
+008 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+009 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+010 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+012 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+011 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+012 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+ change path for cprnc on jaguar
+M models/lnd/clm/test/system/input_tests_master -------------- single-column tests are cold-starts
+M models/lnd/clm/test/system/tests_pretag_dublin_nompi ------- Add single-column tests
+M models/lnd/clm/test/system/tests_posttag_nompi_regression -- Add single-column tests
+M models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl ---------- Remove assumption about order of dimensions
+M models/lnd/clm/tools/ncl_scripts/ndepregrid.ncl ------------ Remove assumption about order of dimensions
+M models/lnd/clm/tools/ncl_scripts/pftdyntest2raw.ncl -------- Remove assumption about order of dimensions
+M models/lnd/clm/bld/config_files/Makefile.in ---------------- For ifort only add -132 to FIXEDFLAGS
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml Go back to old fndepdyn files from clm3_6_47
+M models/lnd/clm/src/main/clm_time_manager.F90 --------------- Label sub as "clm::" and change data to
+ intent(inout) to comply with ESMF3
+ (From Dani Bundy-Coleman)
+
+Summary of testing:
+
+ bluefire: All PASS except
+022 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+023 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+024 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+025 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+026 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+051 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+052 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 6
+054 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 6
+ bluefire interactive testing: All PASS except
+021 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 arb_ic ........FAIL! rc= 5
+022 brAK4 TBR.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 arb_ic ........FAIL! rc= 5
+026 brAK8 TBR.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 arb_ic ....FAIL! rc= 6
+027 blAK8 TBL.sh _sc_ds clm_std^nl_ptsmode_ocn 20030101:NONE:1800 1.9x2.5 gx1v6 -10 arb_ic ......FAIL! rc= 6
+051 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ bluefire/CCSM testing: All PASS
+PASS ERS_D.f45_g35.I_2000.bluefire
+PASS PET.f45_g35.I_1850.bluefire.cpl
+PASS PET.f45_g35.I_1850.bluefire.atm
+PASS PET.f45_g35.I_1850.bluefire.lnd
+PASS PET.f45_g35.I_1850.bluefire.ice
+PASS PET.f45_g35.I_1850.bluefire.ocn
+PASS PET.f45_g35.I_1850.bluefire.glc
+PASS ERS.f19_g16.I_1850.bluefire
+PASS ERS.f19_g16.I_1850-2000.bluefire
+PASS ERB.f09_g16.I_1948_2004.bluefire
+PASS ERB.f09_g16.I1850SPINUPCN.bluefire
+PASS ERH_D.f10_f10.I_1850_CN.bluefire
+PASS PET.f10_f10.ICN8520.bluefire.cpl
+PASS PET.f10_f10.ICN8520.bluefire.atm
+PASS PET.f10_f10.ICN8520.bluefire.lnd
+PASS PET.f10_f10.ICN8520.bluefire.ice
+PASS PET.f10_f10.ICN8520.bluefire.ocn
+PASS PET.f10_f10.ICN8520.bluefire.glc
+ jaguar interactive testing: All PASS up to...
+006 erAK4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 1.9x2.5 gx1v6 -5+-5 cold ..........FAIL! rc= 7
+ lightning/ifort interactive testing: All PASS
+ dublin/lf95 interactive testing: All PASS up to...
+006 erAL4 TER.sh _sc_ds clm_std^nl_ptsmode 20030101:NONE:1800 10x15 USGS -5+-5 cold .............FAIL! rc= 7
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+020 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_48
+
+Changes answers relative to baseline: No (bit-for-bit)
+
+===============================================================
+===============================================================
+Tag name: clm3_6_48
+Originator(s): erik (erik)
+Date: Wed Aug 12 19:22:59 MDT 2009
+One-line Summary: New aerosol/nitrogen deposition datasets, mksurfdata work, scm work, clm_usr_name option to build-namelist
+
+Purpose of changes:
+
+Add in 0.47x0.63, 0.9x1.25 finidat file for CN and 1850, and 0.47x0.63 surface dataset.
+Add in datasets at f09, f10, f05, f02 for aerosol (excepting f02 and f05) and nitrogen
+deposition from J-F. Work with mksurfdata so that 0.23x0.31 dataset will work (Forrest).
+Update csm_share, and get scam working. Add scam tests in. Add clm_usr_name option for
+personal datasets to build-namelist. Add a noOrganicSoilDataset use-case so it won't add
+in forganic file. Work on using ccsm build files for stand-alone testing. Get testing
+going on dublin.
+
+Bugs fixed (include bugzilla ID):
+ 813 (use CCSM build files in testing -- partial)
+ 1010 (error in mksurfdata for qtr degree)
+ 1014 (shr_scam checkSurface can NOT run an I case)
+ 1023 (SCM mode check for lnd_present)
+
+Known bugs (include bugzilla ID): 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 966 (Hybrid restart problem on bluefire)
+ 971 (abort on lahey with MPI)
+ 972 (abort on intel with MPI)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1007 (interpinic error with Linux/lahey)
+ 1017 (SCM mode can NOT restart)
+ 1025 (SCM mode can NOT use a global finidat file)
+ 1029 (ifort compilation error in pio)
+ 1031 (Can't run SMS_D.f09_g16.ICN8520)
+ 1032 (Problem running SCM mode on Lahey)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ 1102 (OpenMP problem with pftdyn mode)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Get ccsm_bld option working
+
+Describe any changes made to the namelist: Add -clm_usr_name option for user-datasets,
+ add noOrganicSoilDataset use case (leaves forganic file off)
+
+ Add the clm_usr_name option to build-namelist, document how this is done in the
+ Quickstart.userdataset README file. In short...
+
+ if -clm_usr_name is set to ${MYDATAID} then do the following...
+
+ surfdata: copy files into:
+ $MYCSMDATA/lnd/clm2/surfdata/surfdata_${MYDATAID}_simyr${SIM_YEAR}.nc
+ fatmgrid: copy files into:
+ $MYCSMDATA/lnd/clm2/griddata/griddata_${MYDATAID}.nc
+ fatmlndfrc: copy files into:
+ $MYCSMDATA/lnd/clm2/griddata/fracdata_${MYDATAID}_${MASK}.nc
+ faerdep: copy files into:
+ $MYCSMDATA/lnd/clm2/snicardata/aerosoldep_monthly_${SIM_YEAR}_${MYDATAID}.nc
+
+ Then set CLM_BLDNML_OPTS="-clm_usr_name $MYDATAID" in your env_conf.xml. You
+ may have to set DIN_LOC_ROOT_CSMDATA in env_run.xml to $MYCSMDATA is this isn't
+ the standard location as well (use scripts/link_dirtree $CSMDATA $MYCSMDATA to
+ link standard datasets to your location.
+
+List any changes to the defaults for the boundary datasets: New datasets
+ New aerosol and nitrogen deposition datasets from Jean-Francois Lamarque
+ New interpolated finidat: for 0.9x1.25, and 0.47x0.63
+ New fsurdat: for 0.47x0.63
+ New faerdep, 1849-2006: for 0.9x1.25 strung together by David Bailey
+ New faerdep, 1849-2006: for 1.9x2.5, 10x15 (interpolated)
+ New fndepdat, decadal avgs: for 1.9x2.5 (raw data from J-F)
+ New fndepdyn, 1850-2006: for 1.9x.25 strung together
+ New fndepdyn, 1849-2006: for 0.9x1.25, 0.47x0.63, 10x15 (interpolated)
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, csm_share, datm7
+
+ scripts to scripts4_090806
+ csm_share to share3_090811
+ datm7 to datm7_090812
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+ A Quickstart.userdatasets ---------------------------- Documentation on using own datasets
+ A models/lnd/clm/test/system/nl_files/nl_ptsmode_ocn - Test SCM mode
+ A models/lnd/clm/test/system/nl_files/nl_ptsmode ----- Test SCM mode over ocean
+ A models/lnd/clm/bld/namelist_files/namelist_defaults_usr_files.xml --- Template for
+ user defined input datasets
+ A models/lnd/clm/bld/namelist_files/use_cases/noOrganicSoilDataset.xml- Use case to
+ turn off organic soil dataset
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>>> Add scm tests, new variable to turn on use of CCSM build
+>>>>>>>>>>>>>>>> Turn off -test in nl_files, already in mknamelist
+>>>>>>>>>>>>>>>> Update dublin build/run to what used by cam.
+ M models/lnd/clm/test/system/TCB.sh --------------- Test for CLM_CCSMBLD
+ M models/lnd/clm/test/system/README.testnames ----- Add K and L single point mode cases
+ M models/lnd/clm/test/system/test_driver.sh ------- Update dublin, add INTEL for dublin
+ reconcile jaguar module with CCSM build
+ also set PNETCDF dirs.
+ M models/lnd/clm/test/system/nl_files/clm_per ----- Remove -test
+ M models/lnd/clm/test/system/nl_files/clm_std ----- Remove -test
+ M models/lnd/clm/test/system/nl_files/clm_ndepdyn - Remove -test
+ M models/lnd/clm/test/system/nl_files/clm_pftdyn -- Remove -test
+ M models/lnd/clm/test/system/nl_files/clm_per0 ---- Remove -test
+ M models/lnd/clm/test/system/nl_files/clm_urb1pt -- Remove -test
+ M models/lnd/clm/test/system/input_tests_master --- Add single point tests AK4/AK8,AL4
+ M models/lnd/clm/test/system/README --------------- Add note about CLM_CCSMBLD env var
+ M models/lnd/clm/test/system/CLM_runcmnd.sh ------- Update dublin
+>>>>>>>>>>>>>>>> Changes from Forrest Hoffman so that 0.23x0.31 case will work
+>>>>>>>>>>>>>>>> I had started this work, but didn't complete it. Forrest checked
+>>>>>>>>>>>>>>>> the following changes in.
+>>>> 1. Changed the FFLAGS for debug mode on AIX
+>>>> 2. Added calls to areaave(), gridmap_clean(), and areaini() in mksoicol.F90 and mksoitex.F90
+>>>> 3. Changed "stop" to "call abort()" in mksrfdat.F90
+>>>> 4. Added roundoff error fixes for gridcells containing only special landunits not
+>>>> totalling 100% twice in mksrfdat.F90
+>>>> 5. Added error checking for after landunit adjustment to detect gridcells whose
+>>>> components do not total 100% twice in mksrfdat.F90
+ M models/lnd/clm/tools/mksurfdata/mksoicol.F90 ---- Add regrid for mask
+ M models/lnd/clm/tools/mksurfdata/Makefile -------- On IBM optimized remove -C, non-opt remove -O0
+ M models/lnd/clm/tools/mksurfdata/mksoitex.F90 ---- Regrid mask
+ M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 ---- Roundoff error fix and test
+>>>>>>>>>>>>>>>> Handle sim_year_range for datasets, loosen the tolerance for area sum
+>>>>>>>>>>>>>>>> Allow time variable to be one ndep files.
+ M models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl - Handle sim_year_range
+ M models/lnd/clm/tools/ncl_scripts/ndepregrid.ncl --- Handle sim_year_range, loosen
+ tolerance, and allow time variable
+>>>>>>>>>>>>>>>> Get ccsm_bld option working, change ifort a bit, add clm_usr_name
+>>>>>>>>>>>>>>>> option and noOrganicSoilDataset use case to build-namelist
+>>>>>>>>>>>>>>>> New datasets
+ Fix hybrid bug for dynpft case, update externals. Require get_clump_bounds to be called
+ in threaded regions and get_proc_bounds to be called in non-threaded regions. Remove uneeded get_proc_bounds
+ calls, and pass down begg stuff as needed. Make loop in initSurfAlb Open-MP. Begin adding
+ testing for dublin, and add lightning_pgi testing. Add new pftdyn test datasets for 1x1_tropicAtl.
+ Update testing to beta20, default for lightning is ifort, add lighting_pgi testing. Change hist varnames
+ of 3D_Time_constants_vars* to Time_constant_3Dvars*. Remove use of LSMLAT/LSMLON cpp tokens, by default
+ set lsmlat/lsmlon to 1.
+
+Bugs fixed (include bugzilla ID): 1011 (PGI build problem in driver)
+ 1016 (Problem with PTS_MODE build)
+ 1019 (hybrid/OpenMP reproducibility bug for pftdyn mode)
+
+Known bugs (include bugzilla ID): 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 966 (Hybrid restart problem on bluefire)
+ 971 (abort on lahey with MPI)
+ 972 (abort on intel with MPI)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1007 (interpinic error with Linux/lahey)
+ 1010 (error in mksurfdata for qtr degree)
+ 1014 (shr_scam checkSurface can NOT run an I case)
+ 1023 (SCM mode check for lnd_present)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ 1102 (OpenMP problem with pftdyn mode)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: pftdyn test datasets for 1x1_tropicAtl
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self,mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm7, csm_share
+
+scripts to scripts4_090801 ---------------- Begin adding PTS_MODE settings, update clm testlists
+drv to vocemis-drydep12_drvseq3_0_27 -- Add PTS_MODE settings to template
+datm7 to datm7_090729 ------------------- Add single_column support
+csm_share to share3_090729 ------------------ Add dshr support for scmlat/scmlon in domain
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+ A models/lnd/clm/test/system/tests_pretag_dublin ------- Add test list for dublin
+ A models/lnd/clm/test/system/tests_pretag_dublin_nompi - Add interactive test list for dublin
+
+List all existing files that have been modified, and describe the changes:
+
+ M models/lnd/clm/test/system/test_driver.sh ------- Seq testing to beta20, begin adding dublin, default
+ for lightning is ifort, add lightning_pgi,
+ M models/lnd/clm/test/system/input_tests_master -- Add openMP 4x5 test
+ M models/lnd/clm/test/system/CLM_runcmnd.sh ------ Add dublin remove bangkok
+
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Update pftdyn surfdata for 1000-1004
+ tests for 1x1_tropicAtl
+
+ M models/lnd/clm/src/biogeochem/CNWoodProductsMod.F90 - Remove call to get_proc_bounds -- NOT needed
+ M models/lnd/clm/src/main/clm_varpar.F90 -------------- By default set lsmlat/lsmlon to 1
+ M models/lnd/clm/src/main/dynlandMod.F90 -------------- Remove get_proc_bounds pass begg stuff in
+ M models/lnd/clm/src/main/driver.F90 ------------------ Pass begg stuff down to pft_interp
+ M models/lnd/clm/src/main/initializeMod.F90 ----------- Pass begg stuff down to pft_interp
+ M models/lnd/clm/src/main/pftdynMod.F90 --------------- Pass begg stuff down, remove get_proc_bounds calls
+ M models/lnd/clm/src/main/histFileMod.F90 ------------- Change var names of 3D_Time_constants_vars* to
+ Time_constant_3Dvars*
+ M models/lnd/clm/src/main/initSurfAlbMod.F90 ---------- Make loop OpenMP parallel
+ M models/lnd/clm/src/main/decompMod.F90 --------------- Make sure get_clumpbounds is called from threaded
+ regions and get_proc_bounds is NOT.
+
+Summary of testing:
+
+ bluefire: All PASS except
+002 erA91 TER.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 7
+003 brA91 TBR.sh _sc_dh clm_std^nl_urb_br 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 6
+006 erD91 TER.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v5 72+72 cold ...............FAIL! rc= 7
+010 erE91 TER.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 10+38 arb_ic .........FAIL! rc= 7
+011 brE91 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v5 72+72 arb_ic ......FAIL! rc= 6
+019 erEH1 TER.sh 4p_vodsrsc_dh clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 10+38 arb_icFAIL! rc= 7
+020 brEH1 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 24+24 arbFAIL! rc= 6
+022 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+023 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+024 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+025 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+026 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+041 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 7
+042 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 6
+048 brJ61 TBR.sh 4p_casasc_dh clm_std^nl_urb_br 20021230:NONE:1800 1.9x2.5 gx1v6 72+72 cold .....FAIL! rc= 6
+051 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+052 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 6
+054 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 6
+ bluefire interactive testing: All PASS!
+ bluefire/CCSM testing: All PASS!
+PASS ERS_D.f45_g35.I_2000.bluefire
+PASS PET.f45_g35.I_1850.bluefire.cpl
+PASS PET.f45_g35.I_1850.bluefire.atm
+PASS PET.f45_g35.I_1850.bluefire.lnd
+PASS PET.f45_g35.I_1850.bluefire.ice
+PASS PET.f45_g35.I_1850.bluefire.ocn
+PASS PET.f45_g35.I_1850.bluefire.glc
+PASS ERS.f19_g16.I_1850.bluefire
+PASS ERS.f19_g16.I_1850-2000.bluefire
+PASS ERB.f09_g16.I_1948_2004.bluefire
+PASS ERB.f09_g16.I1850SPINUPCN.bluefire
+PASS ERH_D.f10_f10.I_1850_CN.bluefire
+PASS PET.f10_f10.ICN8520.bluefire.cpl
+PASS PET.f10_f10.ICN8520.bluefire.atm
+PASS PET.f10_f10.ICN8520.bluefire.lnd
+PASS PET.f10_f10.ICN8520.bluefire.ice
+PASS PET.f10_f10.ICN8520.bluefire.ocn
+PASS PET.f10_f10.ICN8520.bluefire.glc
+ jaguar: All PASS!
+ lightning/ifort interactive testing: All PASS!
+ breeze,gale,hail,gust/ifort interactive testing: All PASS up to...
+020 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_46
+
+Changes answers relative to baseline: No bit-for-bit (except dynpft which was irreproducible before)
+
+===============================================================
+===============================================================
+Tag name: clm3_6_46
+Originator(s): erik (erik)
+Date: Wed Jul 22 15:50:43 MDT 2009
+One-line Summary: Get more tests to work/document them, add use cases for 1850_control,
+ 2000_control, and 20thC_transient, straighten out single-point grids, Listen to
+ LND_CDF64 env variable from template, remove CLM_ARB_IC.
+
+Purpose of changes:
+
+Work with build-namelist to make 20th-Century a use-case so that ndepdyn files will be
+included if found, but can still work without them (20thC_transient, 2000_control, and
+1850_control use cases). Fix more bugs and tests, report on testing status for each
+machine. Add files needed for 1.9x2.5_tx1v1 grid and new 10x15 surface dataset. Reconcile
+grids for single-point datasets so consistent (lon within 0-360 rather than -180-180).
+Get new single-point datasets for aerosol and nitrogen-deposition. Work with
+pftdyntest2raw.ncl so will work. Work with mksurfdata.pl script so will append needed
+grid data on urban point datasets. Add in CLM1PT mode for datm7 and use datm7 streams
+template for testing. Listen to LND_CDF64 env variable from template, remove CLM_ARB_IC.
+
+Bugs fixed (include bugzilla ID): 1002 (remove CLM_ARB_IC)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 896 (T62 mode does not work)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 966 (Hybrid restart problem on bluefire)
+ 971 (abort on lahey with MPI)
+ 972 (abort on intel with MPI)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1007 (interpinic error with Linux/lahey)
+ 1019 (hybrid/OpenMP reproducibility bug for pftdyn mode)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system:
+
+ Add CLM1PT option to DATM_MODE in env_conf.xml
+
+Describe any changes made to the namelist:
+
+ New use-cases for:
+ 2000_control
+ 1850_control
+ 20thC_transient
+
+List any changes to the defaults for the boundary datasets:
+ 1.9x2.5_tx1v1 datasets, new single-point/regional datasets, new 10x15 surface dataset
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, datm7, pio
+
+scripts to scripts4_090720
+datm7 to datm7_090721
+pio to pio51_prod
+
+List all files eliminated:
+
+D models/lnd/clm/bld/namelist_files/streams.txt.readme ------- Use datm7 version
+D models/lnd/clm/bld/namelist_files/datm.streams.template.xml- Use datm7 version
+D models/lnd/clm/test/system/nl_files/clm_organic ------------ organic files included anyway
+
+List all files added and what they do:
+
+>>>>>>>>>>>> Add new use cases
+A models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml ---- 2000 control
+A models/lnd/clm/bld/namelist_files/use_cases/20thC_transient.xml - 20th Century transient
+A models/lnd/clm/bld/namelist_files/use_cases/1850_control.xml ---- 1850 control
+A models/lnd/clm/bld/namelist_files/use_cases/2000_control.xml ---- 2000 control
+
+>>>>>>>>>>>> Add regression tests list for without MPI
+A models/lnd/clm/test/system/tests_posttag_nompi_regression ------- no mpi tests
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>>>>> Get more tests working, or at least closer to working
+>>>>>>>>>>>>>>>>>>
+>>>>>>>>>>>>>>>>>> models/lnd/clm/test/system directory changes
+M TCB.sh -------------------------------- Put -mach arg here
+M tests_pretag_bluefire ----------------- Change some hybrid tests to MPI
+M config_files/ext_ccsm_seq_10x15_dh ---- Set cice decomp / remove silent mode
+M config_files/ext_ccsm_seq_4x5_dh ------ Set cice decomp / remove silent mode
+M config_files/ext_ccsm_seq_1.9x2.5_dh -- Set cice decomp / remove silent mode
+M config_files/ext_ccsm_seq_64x128_s ---- Set cice decomp / remove silent mode
+M config_files/ext_ccsm_seq_0.9x1.25_dh - Set cice decomp / remove silent mode
+M test_driver.sh ------------------------ Set threads/tasks, move -mach to TCB,
+ set DIN_LOC_ROOT, change needed for latest jaguar build
+M tests_posttag_hybrid_regression ------- Remove bad tests, move pure-mpi, serial/open-mp out
+M tests_posttag_purempi_regression ------ Remove bad tests, move pure-mpi, serial/open-mp out
+M nl_files/nl_urb ----------------------- Remove urban fields already included
+M nl_files/nl_urb_br -------------------- Remove urban fields already included
+M input_tests_master -------------------- Changes so tests will work
+M TCBext_ccsmseq_cam.sh ----------------- Add main/cpl_mct to clm list of dirs
+
+>>>>>>>>>>>>>>>>>> Update filenames, append grid/frac files to urban single-pt in script
+M models/lnd/clm/tools/mksurfdata/mksurfdata.regional ---- New griddata, fix filepath
+M models/lnd/clm/tools/mksurfdata/mksurfdata.singlept ---- New griddata, fix filepath
+M models/lnd/clm/tools/mksurfdata/mksurfdata.pl ---------- Fix, append grid/frac data
+ to urban single-point datasets
+
+>>>>>>>>>>>>>>>>>> Get the pftdyntest2raw script working (will update datasets later)
+M models/lnd/clm/tools/ncl_scripts/pftdyntest2raw.ncl - Fix so will work, add grazing on
+M models/lnd/clm/tools/ncl_scripts/ndepregrid.ncl ----- Add sim_yr to out filenames
+
+>>>>>>>>>>>>>>>>>> Validate grid
+M models/lnd/clm/tools/mkgriddata/creategridMod.F90 - Check for valid grid values
+
+M models/lnd/clm/srm/main/pftdynMod.F90 - Shorten some long lines
+
+>>>>>>>>>>>>>>>>>> Change to build: add use-cases, remove CLM_ARB_IC, listen to LND_CDF64
+>>>>>>>>>>>>>>>>>> Add new 1.9x2.5_tx1v1 frac dataset, new datasets for single-point,
+>>>>>>>>>>>>>>>>>> new 10x15 datasets, separate out sim_yr and sim_year_range
+>>>>>>>>>>>>>>>>>>
+>>>>>>>>>>>>>>>>>> models/lnd/clm/bld directory changes
+M listDefaultNamelist.pl ----------------------- Add csmdata arg
+M build-namelist ------------------------------- Add -list_use_cases option
+ load uses cases before other defaults, add $ccsm_tools var,
+ separate sim_yr and sim_year_range, put case_desc for use-cases
+M clm.cpl7.template ---------------------------- Remove CLM_ARB_IC, use LND_CDF64
+M namelist_files/checkdatmfiles.ncl ------------ Add tx1v1 mask
+M namelist_files/namelist_definition.xml ------- Add tx1v1 mask, make sim_year integer
+ add sim_year_range, use_case_desc, and clm_demand
+M namelist_files/namelist_defaults_overall.xml - default sim_year_range is constant
+ and default clm_demand is null
+M namelist_files/namelist_defaults_datm.xml ---- Use datm7 streams template,
+ and update domain files
+M namelist_files/use_cases/pergro.xml ---------- Add use_case_desc
+M namelist_files/use_cases/pergro0.xml --------- Add use_case_desc
+M namelist_files/namelist_defaults_clm.xml ----- Move co2_ppmv defaults to use_cases
+ new surf/frac/aer/ndep/grid data: 5x5_amazon, 1x1_brazil, 1x1_urbanc_alpha,
+ 1x1_mexicocityMEX, 1x1_vancouverCAN
+ new frac data: 1.9x2.5_tx1v1
+ new aerdep/ndep data: 1x1_camdenNJ, 1x1_tropicAtl, 1x1_asphaltjungleNJ
+ new surfdata/pftdyn: 10x15
+ (new finidat file for f09 CN, 1850 -- commented out -- so answers same as last tag)
+
+Summary of testing:
+
+ bluefire: All PASS except
+019 erEH1 TER.sh 4p_vodsrsc_dh clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 10+38 arb_icFAIL! rc= 7
+020 brEH1 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 24+24 arbFAIL! rc= 6
+022 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+023 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+024 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+025 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+026 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+027 smC45 TSM.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic .........FAIL! rc= 8
+028 erC45 TER.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -3+-7 arb_ic .......FAIL! rc= 5
+029 brC45 TBR.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -5+-5 arb_ic .......FAIL! rc= 5
+030 blC45 TBL.sh 17p_sc_m clm_pftdyn 18501230:NONE:3600 10x15 USGS@1850-2000 -10 arb_ic .........FAIL! rc= 4
+033 brC61 TBR.sh _sc_dh clm_std^nl_urb_br 20021001:NONE:1800 1.9x2.5 gx1v6 -3+-3 cold ...........FAIL! rc= 6
+041 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 7
+042 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 6
+051 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 4
+052 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 6
+054 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 6
+ bluefire interactive testing: All PASS except
+004 blA74 TBL.sh _sc_ds clm_std^nl_urb 20030101:NONE:3600 1x1_brazil navy -10 arb_ic ............FAIL! rc= 7
+009 blCA4 TBL.sh _sc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 7
+013 blNB4 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 7
+017 blJ74 TBL.sh 4p_casasc_ds clm_std^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic ..FAIL! rc= 7
+019 blCA8 TBL.sh _sc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 7
+021 blL78 TBL.sh _sc_s clm_std^nl_urb 20021231:NONE:1800 1x1_brazil navy -10 arb_ic .............FAIL! rc= 7
+027 erL83 TER.sh _sc_do clm_std^nl_urb 20020115:NONE:3600 5x5_amazon navy -5+-5 arb_ic ..........FAIL! rc= 7
+028 brL83 TBR.sh _sc_do clm_std^nl_urb_br 20020115:NONE:3600 5x5_amazon navy -10+-10 arb_ic .....FAIL! rc= 6
+029 blL83 TBL.sh _sc_do clm_std^nl_urb 20020115:NONE:3600 5x5_amazon navy -10 arb_ic ............FAIL! rc= 5
+034 bl744 TBLtools.sh mksurfdata tools__s namelist ..............................................FAIL! rc= 4
+036 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 4
+043 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ bluefire/CCSM testing: All PASS
+PASS ERS_D.f45_g35.I_2000.bluefire
+PASS PET.f45_g35.I_1850.bluefire.cpl
+PASS PET.f45_g35.I_1850.bluefire.atm
+PASS PET.f45_g35.I_1850.bluefire.lnd
+PASS PET.f45_g35.I_1850.bluefire.ice
+PASS PET.f45_g35.I_1850.bluefire.ocn
+PASS PET.f45_g35.I_1850.bluefire.glc
+PASS ERS.f19_g16.I_1850.bluefire
+PASS ERS.f19_g16.I_1850-2000.bluefire
+PASS ERB.f09_g16.I_1948_2004.bluefire
+PASS ERB.f09_g16.I1850SPINUPCN.bluefire
+PASS ERH_D.f10_f10.I_1850_CN.bluefire
+ lightning/ifort: All PASS except -- up to test 18
+002 erA91 TER.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 7
+003 brA91 TBR.sh _sc_dh clm_std^nl_urb_br 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 6
+005 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+006 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+007 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+008 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+009 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+011 erJ42 TER.sh 4p_casasc_dm clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 10+38 cold ...........FAIL! rc= 7
+012 brJ42 TBR.sh 4p_casasc_dm clm_std^nl_urb_br 20021230:NONE:1800 10x15 USGS 72+72 cold ........FAIL! rc= 6
+015 erL51 TER.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 7
+016 brL51 TBR.sh _sc_dh clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 6
+ lightning/ifort interactive testing: up to test 004
+004 blCA4 TBL.sh _sc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 7
+ calgary/lf95: All PASS except...
+004 blA74 TBL.sh _sc_ds clm_std^nl_urb 20030101:NONE:3600 1x1_brazil navy -10 arb_ic ............FAIL! rc= 7
+015 blCA4 TBL.sh _sc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 7
+019 blOC4 TBL.sh _vansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 7
+023 blNB4 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 7
+024 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+025 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+026 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+027 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+028 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+049 blL74 TBL.sh _sc_s clm_std^nl_urb 20020101:NONE:1800 1x1_brazil navy -10 arb_ic .............FAIL! rc= 7
+052 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+053 smM94 TSMncl_tools.sh ndepregrid ............................................................FAIL! rc= 6
+055 sm952 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dh ext_ccsm_seq_cam 48 .......................FAIL! rc= 8
+056 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ breeze,gale,hail,gust/ifort: All PASS except...
+004 blA74 TBL.sh _sc_ds clm_std^nl_urb 20030101:NONE:3600 1x1_brazil navy -10 arb_ic ............FAIL! rc= 7
+009 blCA4 TBL.sh _sc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 7
+011 blCA8 TBL.sh _sc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 7
+019 blR53 TBL.sh 17p_cnc13sc_do clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@1850 48 cold .......FAIL! rc= 7
+020 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+021 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+022 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+023 blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_45
+
+Changes answers relative to baseline: Bit-for-bit EXCEPT for (as datasets change)
+ 5x5_amazon, 1x1_brazil, 1x1_urbanc_alpha, 1x1_mexicocityMEX, 1x1_vancouverCAN
+ 1x1_camdenNJ, 1x1_tropicAtl, 1x1_asphaltjungleNJ, 10x15
+
+===============================================================
+===============================================================
+Tag name: clm3_6_45
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Fri Jul 10 14:00:27 MDT 2009
+One-line Summary: Remove inconsistent finidat file in clm3_6_44
+
+Purpose of changes: A few simple bug fixes from clm3_6_44, with minimul testing
+
+ Remove finidat inconsistent with the surface datasets for f19_g16, bgc=cn, sim_yr=1850
+ Fix typo in test list, and fix thread settings for bluefire tests
+ Remove -ftz from CFLAGS for ifort for mkdatadomain
+ Change csh run scripts so: use CCSM env_machopts settings, set defaults, fix so can run serial
+ Update datm7 so that CPLHIST3HrWxHfHrSol mode has iradsw=-1 so mimics running with CAM
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 966 (Hybrid restart problem on bluefire)
+ 971 (abort on lahey with MPI)
+ 972 (abort on intel with MPI)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1019 (hybrid/OpenMP reproducibility bug for pftdyn mode)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: Remove inconsistent
+ finidat file for 0.9x1.25, gx1v6, BGC=cn, sim_yr=1850
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): datm7
+
+ datm7 to datm7_090709
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/test/system/test_driver.sh ------------------- Fix threads settings for bluefire
+M models/lnd/clm/test/system/input_tests_master --------------- Fix typo
+M models/lnd/clm/tools/interpinic/runinit_ibm.csh ------------- Use CCSM env_machopts settings
+M models/lnd/clm/tools/mkdatadomain/Makefile ------------------ Remove -ftz from CFLAGS for ifort
+M models/lnd/clm/bld/run-ibm.csh ------------------------------ Use CCSM env_machopts settings, set defaults,
+ fix so can run serial
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Remove 0.9x1.25, BGC=cn, sim_year=1850, mask=gx1v6
+ finidat file as was inconsistent with new surface dataset
+
+Summary of testing: Limited
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_44
+
+Changes answers relative to baseline: bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_6_44
+Originator(s): erik (erik)
+Date: Thu Jul 9 11:47:40 MDT 2009
+One-line Summary: Fix C13 bug, update scripts, drv, datm. Add domain files for idmap
+atm-ocn grids for datm. Remove SEQ_MCT, add new ESMF env vars to template. Work with
+ndeplintInterp, fix SCAM
+
+Purpose of changes:
+
+Fix C13 nflds bug, update scripts, drv, datm. Add domain files for idmap atm-ocn grids
+for datm. Remove SEQ_MCT, add new ESMF env vars to template. Work with ndeplintInterp to
+enable using J-F's new Nitrogen deposition files for transient 20th Century simulations.
+SCAM fixes from John Truesdale. Add indices for PFT types.
+
+Bugs fixed (include bugzilla ID): 981 (ccsm domain files for atm=ocn grid)
+ 987 (remove SEQ_MCT)
+ 991 (C13 nfields cause model to blowup on jaguar)
+ 997 (interpolated finidat files cause fully coupled cases to fail)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 966 (Hybrid restart problem on bluefire)
+ 971 (abort on lahey with MPI)
+ 972 (abort on intel with MPI)
+ 990 (CN transient blowup)
+ 994 (finidat files on jaguar for pftdyn fail)
+ 1019 (hybrid/OpenMP reproducibility bug for pftdyn mode)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Remove SEQ_MCT
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ New surface datasets for 0.9x1.25 and 1.9x2.5, and new finidat for 1850 for 0.9x1.25
+ 10x15 2000 10x15 dataset set to the 1850 version so that testing will work.
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, oleson (pftvarcon changes)
+ SCAM changes from John Truesdale
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm7,
+ csm_share and pio
+
+ scripts to scripts4_090707b
+ drv to vocemis-drydep11_drvseq3_0_23
+ datm7 to datm7_090708
+ csm_share to share3_090706b
+ pio to pio50_prod
+
+List all files eliminated:
+
+ D models/lnd/clm/test/system/tests_pretag_bangkok -- Rename to calgary
+
+List all files added and what they do:
+
+ A models/lnd/clm/test/system/tests_pretag_calgary ----------- Rename from bangkok
+ A models/lnd/clm/test/system/tests_pretag_bluefire_nompi ---- serial/open-MP tests
+ A models/lnd/clm/test/system/tests_pretag_jaguar_nompi ------ serial/open-MP tests
+ A models/lnd/clm/test/system/tests_posttag_lightning_nompi -- serial/open-MP tests
+
+List all existing files that have been modified, and describe the changes:
+
+ M Quickstart.GUIDE --- fix minor error in name of directory as scripts changed.
+
+ >>>>>>>>>>>> Separate out non-mpi tests for bluefire, jaguar, and lightning
+ Test list is different if run interactive or submitted to batch que.
+ Serial, open-mp only tests are run interactive, MPI and hybrid tests
+ are run when submitted to the batch que. This prevents waste of resources
+ for serial and open-mp only tests.
+ Remove bangkok, replace with calgary only. Default threads depends
+ on if interative or not.
+ M models/lnd/clm/test/system/test_driver.sh -----------
+ M models/lnd/clm/test/system/tests_pretag_bluefire ----
+ M models/lnd/clm/test/system/tests_pretag_jaguar ------
+ M models/lnd/clm/test/system/tests_posttag_lightning --
+ M models/lnd/clm/test/system/README ------------------- Add note about CLM_SOFF
+
+ >>>>>>>>>>>>
+ M models/lnd/clm/tools/mksurfdata/Makefile ------------------- For ifort remove -ftz option to CFLAGS
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.globalirrig ----- New input PFT datasets from Peter L.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.regional -------- New input PFT datasets from Peter L.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pftdyn ---------- New input PFT datasets from Peter L.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.singlept -------- New input PFT datasets from Peter L.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.namelist -------- New input PFT datasets from Peter L.
+ M models/lnd/clm/tools/mksurfdata/pftdyn_simyr1850-2005.txt -- New input PFT datasets from Peter L.
+ M models/lnd/clm/tools/mksurfdata/pftdyn_simyr2000.txt ------- New input PFT datasets from Peter L.
+ M models/lnd/clm/tools/mksurfdata/pftdyn_simyr1850.txt ------- New input PFT datasets from Peter L.
+ M models/lnd/clm/tools/mksurfdata/mksurfdata.pl -------------- New input PFT datasets from Peter L.
+
+ >>>>>>>>>>>> Work on linear interpolation of Nitrogen deposition so that add in mid-decades
+ M models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl ------- Check if interpolation should be cyclic
+ M models/lnd/clm/tools/ncl_scripts/ndeplintInterp.ncl ----- Also loop over mid decades as well
+ M models/lnd/clm/tools/ncl_scripts/ndepregrid.ncl --------- Check if interpolation should be cyclic
+
+ >>>>>>>>>>>> Fix from Sam so that don't have negative ice flow
+ M models/lnd/clm/tools/interpinic/interpinic.F90 --- Change from Sam so that fully coupled cases don't trap negative ice flow
+ M models/lnd/clm/tools/interpinic/Srcfiles --------- Don't repeat filenames so can build with lahey
+
+ >>>>>>>>>>>> Change so that document that files should have longs between 0 and 360 rather than -180 to 180
+ M models/lnd/clm/tools/mkgriddata/mkgriddata.regional --- Use longs 0-360
+ M models/lnd/clm/tools/mkgriddata/mkgriddata.singlept --- Use longs 0-360
+ M models/lnd/clm/tools/mkgriddata/Makefile -------------- For ifort remove -ftz option to CFLAGS
+ M models/lnd/clm/tools/mkgriddata/README ---------------- Make note that regional/single-pt grid files should have longs: 0 <= longs <= 360
+
+ >>>>>>>>>>>> Remove SEQ_MCT and handle COMP_INTERFACE from ccsm cpl7 scripts, new surface datasets
+ M models/lnd/clm/bld/configure ----------- Remove SEQ_MCT, handle cpl_esmf
+ M models/lnd/clm/bld/clm.cpl7.template --- Handle $COMP_INTERFACE
+ M models/lnd/clm/bld/namelist_files/namelist_definition.xml --- Allow mid-decadal
+ sim_years so can process ndepdyn files
+ M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml --- New 0.9 finidat,
+ 0.9x1.25, and 1.9x2.5 surfdata, fndepdat files for mid-decadal sim_years.
+
+ >>>>>>>>>>>> Add indices for PFTs. Fixes for SCAM. Break up long lines > 132chars
+ M models/lnd/clm/src/biogeochem/CASAMod.F90 -------------- noveg, nc3_nonarctic_grass
+ M models/lnd/clm/src/biogeochem/STATICEcosysDynMod.F90 --- noveg, ncorn, nbrdlf_dcd_brl_shrub
+ M models/lnd/clm/src/biogeochem/VOCEmissionMod.F90 ------- Add PFT indices
+ M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90 - Add PFT indices
+ M models/lnd/clm/src/main/organicFileMod.F90 ------------- SCAM fix (from jet)
+ M models/lnd/clm/src/main/ncdio.F90 ---------------------- Break up long lines
+ M models/lnd/clm/src/main/pftdynMod.F90 ------------------ Break up long lines, add
+ noveg, nbrdlf_evr_shrub
+ M models/lnd/clm/src/main/clm_atmlnd.F90 ----------------- C13 bug fix for number of fields
+ (found by Jon Wolfe)
+ M models/lnd/clm/src/main/pftvarcon.F90 ------------------ Add PFT indices, make sure
+ pftnames from pftcon file is
+ as expected.
+ M models/lnd/clm/src/biogeophys/UrbanInputMod.F90 -------- SCAM fix (from jet)
+
+Summary of testing:
+
+ bluefire: All FAIL except...
+008 smB91 TSMruncase.sh .........................................................................PASS
+053 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................PASS
+ bluefire/CCSM testing: All PASS
+PASS ERS_D.f45_g35.I_2000.bluefire
+PASS PET.f45_g35.I_1850.bluefire.cpl
+PASS PET.f45_g35.I_1850.bluefire.atm
+PASS PET.f45_g35.I_1850.bluefire.lnd
+PASS PET.f45_g35.I_1850.bluefire.ice
+PASS PET.f45_g35.I_1850.bluefire.ocn
+PASS PET.f45_g35.I_1850.bluefire.glc
+PASS ERS.f19_g16.I_1850.bluefire
+PASS ERS.f19_g16.I_1850-2000.bluefire
+PASS ERB.f09_g16.I_1948_2004.bluefire
+PASS ERB.f09_g16.I1850SPINUPCN.bluefire.001802
+PASS ERH_D.f10_f10.I_1850_CN.bluefire
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_43
+
+Changes answers relative to baseline: No -- bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_6_43
+Originator(s): erik (erik)
+Date: Wed Jun 10 11:41:57 MDT 2009
+One-line Summary: Fix pftdyn bug, enable 1D primary hist files, fix time-const3D output, fix template bug, enable cpl_esmf/cpl_mct
+
+Purpose of changes:
+
+Add src/main/cpl_esmf,src/main/cpl_mct directories, change configure to build either way,
+add -comp_intf option. Remove SEQ_ #ifdef's, simplify some of the logic associated with
+the old options (cpl6 and program_off). Brian K -- fix nans, enable openMP again. Allow
+first history tape to be 1D (Sean Swenson). Fix template co2_ppmv error. Remove SPMD
+#ifdef from RTM. Fix driver pftdyn bug. Fix bug on writing out 3D time-constant fields.
+
+Bugs fixed (include bugzilla ID):
+ 929 (bug in co2ppmv value in template)
+ 969 (allow primary tapes to be 1D)
+ 974 (bug in pftdyn mode)
+ 977 (bug writing out 3D time-const data)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 966 (Hybrid restart problem on bluefire)
+ 971 (abort on lahey with MPI)
+ 972 (abort on intel with MPI)
+ 991 (C13 nfields cause model to blowup on jaguar)
+ 1019 (hybrid/OpenMP reproducibility bug for pftdyn mode)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Add choice between ESMF/MCT compilation
+ NOTE: ESMF option does NOT work as files do NOT exist yet!
+ Add -comp_intf option to configure
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, swensosc (1D and history changes),
+ kauff (reenable OpenMP, some vars spval instead of nan)
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, pio
+
+ scripts to scripts4_090605b
+ drv to vocemis-drydep08_drvseq3_0_18
+ pio back to pio45_prod (to eliminate compilation problem with pathscale)
+
+List all files eliminated:
+
+D models/lnd/clm/src/main/lnd_comp_mct.F90 --- Move to cpl_mct
+
+List all files added and what they do:
+
+A models/lnd/clm/src/main/cpl_mct ---- Directory for MCT interface
+A models/lnd/clm/src/main/cpl_esmf --- Directory for ESMF interface
+A models/lnd/clm/src/main/cpl_mct/lnd_comp_mct.F90 - Moved from main directory
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>> Add -comp_intf option, fix template bug, closing input namelist
+M models/lnd/clm/bld/configure ----------- Add -comp_intf option
+M models/lnd/clm/bld/config_files/config_definition.xml -- Add comp_interface
+M models/lnd/clm/bld/clm.cpl7.template --- Close input namelist with ending "/"
+
+>>>>>>>>>>>>>> Remove SEQ_ CPP #if's, require some arguments
+ (needed to be optional for cpl6/offline), allow primary hist files 1D
+ Fix so that 3D time-constant data does get written out.
+M models/lnd/clm/src/main/clm_comp.F90 --------- Make rstwr, nlend, rdate required
+M models/lnd/clm/src/main/driver.F90 ----------- Remove doalb if's, PFTDYNWBAL CPP
+ (for pftdyn bug). Require rstwr,
+ nlend, and rdate
+M models/lnd/clm/src/main/clmtypeInitMod.F90 --- Some vars init to spval (kauff)
+ certain cell & pft level variables are initialized to spval
+ instead of nan so eliminate the appearance of nans on restart files.
+ (not all cell & pfts were used and given non-nan values)
+M models/lnd/clm/src/main/histFileMod.F90 ------ Write out 3D time-constant vars,
+ fix so can write primary 1D files
+ (Sean Swenson)
+M models/lnd/clm/src/main/restFileMod.F90 ------ nlend required
+M models/lnd/clm/src/main/controlMod.F90 ------- Remove SEQ_ CPP #if's, allow 1D primary
+ ability to run threaded is re-enabled (kauff)
+M models/lnd/clm/src/main/do_close_dispose.F90 - Require rstwr, nlend
+M models/lnd/clm/src/riverroute/RtmMod.F90 ----- Remove SPMD #ifdef
+
+>>>>>>>>>>>>>> Move testing to calgary from bangkok
+M models/lnd/clm/test/system/test_driver.sh ---- Add LD_LIBRARY_PATH for calgary/lf95
+
+Summary of testing:
+
+ bluefire: All PASS except (up to test 35)
+007 erA91 TER.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 7
+008 brA91 TBR.sh _sc_dh clm_std^nl_urb_br 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 6
+009 blA91 TBL.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 5
+012 blD91 TBL.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v5 144 cold .................FAIL! rc= 5
+013 smH74 TSM.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test@1000-1004 -1100 cold FAIL! rc= 8
+014 blH74 TBL.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test@1000-1004 -100 cold FAIL! rc= 4
+019 blE91 TBL.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 arb_ic ............FAIL! rc= 5
+028 blF93 TBL.sh 17p_vodsrsc_do clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 48 cold .............FAIL! rc= 5
+034 erEH1 TER.sh 4p_vodsrsc_dh clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 10+38 arb_icFAIL! rc= 7
+
+ bluefire/CCSM testing:
+PASS ERS.f45_g35.I_2000.bluefire
+PASS ERS.f19_g16.I_1850.bluefire
+PASS ERS.f19_g16.I_1850-2000.bluefire
+PASS ERB.f09_g16.I_1948_2004.bluefire
+SFAIL ERH.f10_f10.I_1850_CN.bluefire.235943 <<< f10_f10 doesn't work for datm7 right now
+
+TBL hybrid/openMP tests fail since previous version had OpenMP disabled.
+
+ breeze/gale/hail/gust/ifort: All PASS up to test 12 (10x15, smL51 test)
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_43
+
+Changes answers relative to baseline: Only pftdyn mode
+
+===============================================================
+===============================================================
+Tag name: clm3_6_42
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Tue Jun 2 11:58:25 MDT 2009
+One-line Summary: Bring CN Harvest branch (cnhrv02_clm3_6_41) to trunk
+
+Purpose of changes:
+
+ Add in Peter Thornton's code to read in harvesting variables on surface datasets
+ and apply harvesting to carbon and nitrogen pools.
+ Add in surface datasets from clm3_6_40 that have harvesting fields on them for
+ 0.9x1.25, 1.9x2.5, and 10x15 (as well as aerdep, ndepdat, and ndepdyn datasets).
+ Remove urban test list as urban on by default, and remove top level doc directory.
+ Add C13 CPP token for C13 extension of CN add -c13 option to configure.
+ Add C13/10x15@1850-2000 testing.
+ Let sum of percent types match to 100 within small value rather than an exact match.
+ Increase wasteheat limit from 40 to 100 W/m2.
+ Change default masks to USGS for 4x5,T31,T42, and T85 resolutions so same as cice
+ Update drv to latest version (drvseq3_0_17 -- on voc branch).
+ Update ccsm comparision version used in test suite.
+
+Bugs fixed (include bugzilla ID):
+ 977 (bug writing out 3D time-const data)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 966 (Hybrid restart problem on bluefire)
+ 971 (abort on lahey with MPI)
+ 972 (abort on intel with MPI)
+ 974 (bug in pftdyn mode)
+ 977 (bug writing out 3D time-const data)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system:
+ Add C13 #ifdef for CN
+ Add -c13 option to configure
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ Make USGS mask default for 4x5,T31,T42, and T85
+ surface datasets with harvesting for: 0.9x1.25, 1.9x2.5, 10x15 (1850,2000)
+ pftdyn datasets with harvesting for: 0.9x1.25, 10x15 (1850-2005)
+ faerdep datasets for: 0.9x1.25, 10x15 (1850, 2000, 1850-2000)
+ fndepdat datasets for: 0.9x1.25, 10x15 (1850,2000)
+ fndepdyn datasets for: 0.9x1.25, 1.9x2.5, 10x15 (1850-2000)
+
+Describe any substantial timing or memory changes: Minor for CN
+
+Code reviewed by: thornton, erik
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ Remove top level doc directory as out of date and won't be updated. Howto is in
+ the scripts directory
+
+List all files eliminated:
+
+D models/lnd/clm/test/system/tests_posttag_urban - Urban on by default so doesn't
+ need it's own tests
+>>>>>>>>>>>>>>>>>> Remove as can NOT easily recreate source from them and code
+ has changed since the creation of the scripts. Would take work
+ to get the two in sync and be able to use these scripts as source.
+D models/lnd/clm/src/main/gen_ncdio_global_subs.csh
+D models/lnd/clm/src/main/gen_ncdio_local_subs.csh
+D models/lnd/clm/src/main/gen_spmdgs_subs.csh
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>>>>>> Add new configurations to test C13 config
+A + models/lnd/clm/test/system/config_files/17p_cnc13sc_dh
+A + models/lnd/clm/test/system/config_files/17p_cnc13sc_dm
+A + models/lnd/clm/test/system/config_files/17p_cnc13sc_do
+>>>>>>>>>>>>>>>>>> New module to handle wood harvesting
+A + models/lnd/clm/src/biogeochem/CNWoodProductsMod.F90 Calculate loss fluxes from wood
+ products pools, and update
+ product pool state variables
+
+List all existing files that have been modified, and describe the changes:
+
+M Quickstart.GUIDE --- Update documentation
+M README ------------- Update documentation
+>>>>>>>>>>>>>>>>>> Add C13 and 10x15@1850-2000 tests
+M models/lnd/clm/test/system/tests_pretag_bluefire --- Add 10x15@1850-2000 tests
+M models/lnd/clm/test/system/config_files/README ----- Add note on new C13 config
+M models/lnd/clm/test/system/tests_posttag_breeze ---- Add openmp C13 test
+M models/lnd/clm/test/system/README.testnames -------- Add R configuration for C13 config
+M models/lnd/clm/test/system/tests_posttag_hybrid_regression -- Add C45 and R51 tests
+M models/lnd/clm/test/system/tests_posttag_purempi_regression - Add C45 and R52 tests
+M models/lnd/clm/test/system/input_tests_master ------ Add C45 (10x15@1850-2000, pure-mpi) and
+ R51-R53 (C13) tests
+M models/lnd/clm/test/system/test_driver.sh ---------- Update ccsm4 comparision version
+ to beta17
+>>>>>>>>>>>>>>>>>> Add C13 configuration option, and new datasets
+M models/lnd/clm/bld/configure -------------------------------- Add -c13 option
+M models/lnd/clm/bld/config_files/config_definition.xml ------- Add c13 entry
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -
+ Change default masks for 4x5,T31,T42,T85 to USGS so agree with cice defaults
+ surface datasets with harvesting for: 0.9x1.25, 1.9x2.5, 10x15 (1850,2000)
+ pftdyn datasets with harvesting for: 0.9x1.25, 10x15 (1850-2005)
+ faerdep datasets for: 0.9x1.25, 10x15 (1850, 2000, 1850-2000)
+ fndepdat datasets for: 0.9x1.25, 10x15 (1850,2000)
+ fndepdyn datasets for: 0.9x1.25, 1.9x2.5, 10x15 (1850-2000)
+>>>>>>>>>>>>>>>>>> C13/DGVM #ifdefs, add harvest vars and calculations
+M models/lnd/clm/src/biogeochem/CNCStateUpdate2Mod.F90 ---- Add CStateUpdate2h method for
+ harvest mortality fluxes
+M models/lnd/clm/src/biogeochem/CNC13StateUpdate2Mod.F90 -- Add C13 cpp and add C13StateUpdate2h
+ method for harvesting
+M models/lnd/clm/src/biogeochem/CNNStateUpdate1Mod.F90 ---- Remove 10n and 100n variables
+M models/lnd/clm/src/biogeochem/CNBalanceCheckMod.F90 ----- Add in harvesting terms
+M models/lnd/clm/src/biogeochem/CNPrecisionControlMod.F90 - Add C13 #ifdef's
+M models/lnd/clm/src/biogeochem/CNSummaryMod.F90 ---------- Add harvesting fields
+M models/lnd/clm/src/biogeochem/CNCStateUpdate1Mod.F90 ---- Remove 10c, 100c variables,
+ formatting changes
+M models/lnd/clm/src/biogeochem/CNC13StateUpdate1Mod.F90 -- Add C13 #ifdef, remove 10c, 100c vars
+M models/lnd/clm/src/biogeochem/CNrestMod.F90 ------------- Add C13 #ifdef
+M models/lnd/clm/src/biogeochem/CNC13StateUpdate3Mod.F90 -- Add C13 #ifdef
+M models/lnd/clm/src/biogeochem/CNNStateUpdate2Mod.F90 ---- Add NStateUpdate2h Nitrogen
+ harvesting method
+M models/lnd/clm/src/biogeochem/C13SummaryMod.F90 --------- Add C13 #ifdef
+M models/lnd/clm/src/biogeochem/CNAllocationMod.F90 ------- Add C13 #ifdef and harvesting variables
+M models/lnd/clm/src/biogeochem/CNC13FluxMod.F90 ---------- Add C13 #ifdef and C13Flux2h harvest
+ method, and CNC13HarvestPftToColumn
+ private method
+M models/lnd/clm/src/biogeochem/CNEcosystemDynMod.F90 ----- Add harvesting method calls
+ filters by lbc,ubc
+ Add C13 #ifdef, add CNHarvest call if
+ fpftdyn file is set.
+M models/lnd/clm/src/biogeochem/CNSetValueMod.F90 --------- Add C13 #ifdef, remove 10c, 100c loss
+ vars, add harvest vars
+M models/lnd/clm/src/main/clm_varcon.F90 ------------------ Add C13 #ifdef
+ Increase wasteheat limit to 100 W/m2
+M models/lnd/clm/src/main/CNiniTimeVar.F90 ---------------- Add C13 #ifdef
+ begc, endc to methods
+M models/lnd/clm/src/main/accFldsMod.F90 ------------------ Put frmf and other vars (t10, t_mo,
+ ... agdd) in DGVM #ifdef
+M models/lnd/clm/src/main/clmtypeInitMod.F90 -------------- Add C13 and DGVM #ifdef and new
+ harvesting vars
+M models/lnd/clm/src/main/pftdynMod.F90 ------------------- Add CNHarvest and CNHarvestPftToColumn
+ as public methods,
+ add pftdyn_getharvest private methods,
+ check that land fractions sum to 100
+ within 1e-15 rather than exactly 100,
+ change pftdyn_get_data to pftdyn_getdata
+ Add C13 #ifdef, remove 10c, 100c loss
+ calculation
+M models/lnd/clm/src/main/iniTimeConst.F90 ---------------- Add DGVM #ifdef
+M models/lnd/clm/src/main/clm_atmlnd.F90 ------------------ Add C13 #ifdef
+M models/lnd/clm/src/main/lnd_comp_mct.F90 ---------------- Add C13 #ifdef
+M models/lnd/clm/src/main/CNiniSpecial.F90 ---------------- Add C13 #ifdef
+M models/lnd/clm/src/main/clmtype.F90 --------------------- Add DGVM, C13 #ifdef,
+ harvest vars
+M models/lnd/clm/src/main/histFldsMod.F90 ----------------- Add C13 #ifdef, correct SEEDN,
+ Add: WOOD_HARVESTC, PRODUCT_CLOSS, C13_PRODUCT_CLOSS, WOOD_HARVESTN, PRODUCT_NLOSS
+ Change long_name: DWT_PROD10C_GAIN, DWT_PROD100C_GAIN, DWT_CLOSS, DWT_NLOSS
+M models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90 --- Add C13 #ifdef
+M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90 ------- Add C13 #ifdef
+
+Summary of testing:
+
+ bluefire: hybrid and open-mp tests FAIL, pftdyn 1000 tests fail, most TBL tests FAIL as answers change
+001 smA74 TSM.sh _sc_ds clm_std^nl_urb 20030101:NONE:3600 1x1_brazil navy -10 arb_ic ............PASS
+002 erA74 TER.sh _sc_ds clm_std^nl_urb 20030101:NONE:3600 1x1_brazil navy -5+-5 arb_ic ..........PASS
+003 brA74 TBR.sh _sc_ds clm_std^nl_urb_br 20030101:NONE:3600 1x1_brazil navy -5+-5 arb_ic .......PASS
+004 blA74 TBL.sh _sc_ds clm_std^nl_urb 20030101:NONE:3600 1x1_brazil navy -10 arb_ic ............PASS
+005 smL74 TSM.sh _sc_s clm_std^nl_urb 20020101:NONE:1800 1x1_brazil navy -10 arb_ic .............PASS
+015 smB91 TSMruncase.sh .........................................................................PASS
+021 smF92 TSM.sh 17p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 96 cold .............PASS
+022 erF92 TER.sh 17p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 10+38 cold ..........PASS
+023 brF92 TBR.sh 17p_vodsrsc_dm clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v5 72+72 cold .......PASS
+029 smCA4 TSM.sh _sc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........PASS
+030 erCA4 TER.sh _sc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ......PASS
+031 brCA4 TBR.sh _sc_ds clm_std^nl_urb_br 20021001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic ...PASS
+032 blCA4 TBL.sh _sc_ds clm_std^nl_urb 20021001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........PASS
+046 smCA8 TSM.sh _sc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...PASS
+047 blCA8 TBL.sh _sc_ds clm_std^nl_urb 20021230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...PASS
+048 smNB4 TSM.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic PASS
+049 erNB4 TER.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 59+100 arb_icPASS
+050 brNB4 TBR.sh _mexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arb_PASS
+051 blNB4 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic PASS
+065 smL78 TSM.sh _sc_s clm_std^nl_urb 20021231:NONE:1800 1x1_brazil navy -366 arb_ic ............PASS
+066 blL78 TBL.sh _sc_s clm_std^nl_urb 20021231:NONE:1800 1x1_brazil navy -10 arb_ic .............PASS
+PASS ERS.f45_g35.I_2000.bluefire
+PASS ERS.f19_g16.I_1850.bluefire
+PASS ERS.f19_g16.I_1850-2000.bluefire
+PASS ERB.f09_g16.I_1948_2004.bluefire
+SFAIL ERH.f10_f10.I_1850_CN.bluefire.014926 <<< f10_f10 doesn't work for datm7 right now
+PASS ERP.f19_g16.I_CN_1850-2000.bluefire
+ bangkok/lf95: Up to test 6 as follows
+001 smA74 TSM.sh _sc_ds clm_std^nl_urb 20030101:NONE:3600 1x1_brazil navy -10 arb_ic ............PASS
+002 erA74 TER.sh _sc_ds clm_std^nl_urb 20030101:NONE:3600 1x1_brazil navy -5+-5 arb_ic ..........PASS
+003 brA74 TBR.sh _sc_ds clm_std^nl_urb_br 20030101:NONE:3600 1x1_brazil navy -5+-5 arb_ic .......PASS
+004 blA74 TBL.sh _sc_ds clm_std^nl_urb 20030101:NONE:3600 1x1_brazil navy -10 arb_ic ............PASS
+005 smA92 TSM.sh _sc_dm clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 10
+006 erA92 TER.sh _sc_dm clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 5
+ breeze/gale/hail/gust/ifort:
+001 smA74 TSM.sh _sc_ds clm_std^nl_urb 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ............PASS
+002 erA74 TER.sh _sc_ds clm_std^nl_urb 19990101:NONE:3600 1x1_brazil navy -5+-5 arb_ic ..........PASS
+003 brA74 TBR.sh _sc_ds clm_std^nl_urb_br 19990101:NONE:3600 1x1_brazil navy -5+-5 arb_ic .......PASS
+004 blA74 TBL.sh _sc_ds clm_std^nl_urb 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ............SKIPPED*
+005 smD94 TSM.sh _persc_ds clm_per^nl_urb 19981231:NONE:1200 4x5 gx3v5 144 cold .................PASS
+006 erD94 TER.sh _persc_ds clm_per^nl_urb 19981231:NONE:1200 4x5 gx3v5 72+72 cold ...............PASS
+007 blD94 TBL.sh _persc_ds clm_per^nl_urb 19981231:NONE:1200 4x5 gx3v5 144 cold .................SKIPPED*
+008 smCA4 TSM.sh _sc_ds clm_std^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........PASS
+009 blCA4 TBL.sh _sc_ds clm_std^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........SKIPPED*
+010 smCA8 TSM.sh _sc_ds clm_std^nl_urb 19971230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...PASS
+011 blCA8 TBL.sh _sc_ds clm_std^nl_urb 19971230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...SKIPPED*
+012 smL54 TSM.sh _sc_ds clm_std^nl_urb 19980115:NONE:1800 10x15 USGS 96 arb_ic ..................FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_41
+
+Changes answers relative to baseline: Yes, urban wasteheat limit increased to 100 W/m2
+ and CN changes due to harvesting
+
+===============================================================
+Tag name: clm3_6_41
+Originator(s): kauff,erik
+Date: Fri May 29 14:15:38 MDT 2009
+One-line Summary: shrub mods, abort if nthreads > 1 (temporary, wrt bugz #965)
+
+Purpose of changes: fix shrub height, disable threading (due to inexact restart)
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system:
+
+Describe any changes made to the namelist:
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by:
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ - update externals for scripts and pio.
+
+List all files eliminated:
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+ - abort if num threads > 1
+ main/controlMod.F90
+
+ - Change CNVegStructUpdateMod.F90 according to Keith Oleson for shrubs.
+ * First change tsai_min to be multiplied by 0.5 instead of 0.65, and
+ * second to lower the tapering for shrubs (types 9 and 11) to 10 with 200 for other woody plants.
+
+ main/aerdepMod.F90 ./aerdepMod.F90
+ main/clmtype.F90 ./clmtype.F90
+ main/clmtypeInitMod.F90 ./clmtypeInitMod.F90
+ main/decompInitMod.F90 ./decompInitMod.F90
+ main/driver.F90 ./driver.F90
+ main/filterMod.F90 ./filterMod.F90
+ main/histFileMod.F90 ./histFileMod.F90
+ main/histFldsMod.F90 ./histFldsMod.F90
+ main/initializeMod.F90 ./initializeMod.F90
+ main/pftdynMod.F90 ./pftdynMod.F90
+ main/subgridRestMod.F90
+
+ biogeochem/CNAnnualUpdateMod.F90 ./CNAnnualUpdateMod.F90
+ biogeochem/CNBalanceCheckMod.F90 ./CNBalanceCheckMod.F90
+ biogeochem/CNEcosystemDynMod.F90 ./CNEcosystemDynMod.F90
+ biogeochem/CNVegStructUpdateMod.F90 ./CNVegStructUpdateMod.F90
+
+ biogeophys//BalanceCheckMod.F90 ./BalanceCheckMod.F90
+ biogeophys//SurfaceAlbedoMod.F90 ./SurfaceAlbedoMod.F90
+ biogeophys//UrbanInputMod.F90
+
+Summary of testing:
+
+ bluefire:
+ PASS ERS.f45_g35.I_2000.bluefire
+ PASS ERS.f19_g16.I_1850.bluefire
+ PASS ERS.f19_g16.I_1850-2000.bluefire
+ PASS ERB.f09_g16.I_1948_2004.bluefire
+ SFAIL ERH.f10_f10.I_1850_CN.bluefire.b16+pretag
+ * code exact restarts when threaded but using only 1 thread
+ jaguar:
+ kraken:
+ lightning/pathscale:
+ bangkok/lf95:
+ breeze/gale/hail/gust/ifort:
+
+CLM tag used for the baseline comparison tests if applicable:
+
+Changes answers relative to baseline:
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+Tag name: clm3_6_40
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Thu May 28 15:17:11 MDT 2009
+One-line Summary: Fix openMP bug, add fndepdyn ncl script, fix interpinic for urban, add mkharvest to mksurfdata, new spinups, turn CLAMP on for CASA or CN
+
+Purpose of changes:
+
+Fix hybrid/open-MP mode bug, and testing for hybrid/open-MP. Add ncl script to
+time-interpolate between 1850 and 2000 for fndepdat dataset, for fndepdyn version. Fix
+interpinic for urban and cndv (jet/oleson/slevis). Update aerdepregrid.ncl and
+ndepregrid.ncl scripts. Add mkharvest fields to mksurfdata. Remove furbinp and just use
+fsurdat (leave forganic, so can remove to turn off). Begin to add an option to build
+with ccsm makefiles, for test-suite. Remove archiving, branching and resub from last run
+script in models/lnd/clm/bld. New spin-up files for 1850 and 2000 for 1.9x2.5 and 1850
+for CN. Make sure CLAMP is turned on for either CASA or CN. Change testing years to
+2002-2003 so same as for ccsm tests.
+
+Bugs fixed (include bugzilla ID): 954 (hybrid problem)
+ 959 (test suite NOT testing hybrid)
+ 965 (hybrid problem for high-proc count)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 966 (Hybrid restart problem on bluefire)
+ 967 (PIO bounds problem on jaguar)
+ 974 (bug in pftdyn mode)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Start adding option to build with ccsm Makefiles
+
+ By default turn on CLAMP when either CN or CASA is enabled
+
+Describe any changes made to the namelist: Remove furbinp (use fsurdat)
+
+List any changes to the defaults for the boundary datasets: New spinup files
+
+ clmi.IQmp17_1950-01-01_1.9x2.5_gx1v6_simyr1850_c090509.nc
+ clmi.IQmp17_2000-01-01_1.9x2.5_gx1v6_simyr2000_c090509.nc
+ clmi.BCN_0093-01-01_1.9x2.5_gx1v6_simyr1850_c090527.nc
+
+Describe any substantial timing or memory changes: Faster because of a fix to a I/O
+ write bug in datm7
+
+Code reviewed by: self, forrest, mvertens, oleson, jet (relevant portions from them)
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, datm7, csm_share
+
+ scripts to scripts4_090527
+ datm7 to datm7_090518
+ csm_share to share3_090512
+
+List all files eliminated:
+
+D models/lnd/clm/bld/build-streams -- Remove phasing out old run scripts, another
+ version exists in scripts/ccsm_utils/Tools/build_streams
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>>>>>>>> Add harvest fields to surface datasets
+A models/lnd/clm/tools/mksurfdata/mkharvestMod.F90 ------------- New module to handle harvest fields
+
+>>>>>>>>>>>>>>>>>>>> New files for ccsm_bld option
+A models/lnd/clm/bld/config_files/Macros.yong_g95 -------------- Macro's file for my Darwin Mac-OSX laptop
+A models/lnd/clm/bld/config_files/Macros.breeze_intel ---------- Macro's file for intel on breeze.
+A models/lnd/clm/bld/config_files/TopCCSMBldMakefile.in -------- Top level makefile for a ccsm_bld
+
+>>>>>>>>>>>>>>>>>>>> New scripts to regrid all aerosol/nitrogen deposition resolutions and create
+ transient Nitrogen-Deposition
+A models/lnd/clm/tools/ncl_scripts/runDepositionRegrid.pl ------ Run regrid for many resolutions for
+ aerosol and nitrogen deposition
+A models/lnd/clm/tools/ncl_scripts/ndeplintInterp.ncl ---------- Linearly interpolate nitrogen-deposition
+ between 1850 and 2000 to get
+ transient nitrogen deposition.
+
+>>>>>>>>>>>>>>>>>>>> New serial and open-MP tests
+A models/lnd/clm/test/system/config_files/17p_cnexitspinupsc_do
+A models/lnd/clm/test/system/config_files/17p_cnadspinupsc_do
+A models/lnd/clm/test/system/config_files/17p_vodsrsc_ds
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>>>>>>> Fix hybrid bug (959), some tweaks, change dates to 2002/2003 to correspond with
+ data checked in
+M models/lnd/clm/test/system/tests-driver.sh -------------------- Add -mach option to configure,
+ able to set CLM_THREADS as input
+M models/lnd/clm/test/system/tests_pretag_bluefire -------------- Move f19_g16 test closer to
+ beginning of list
+M models/lnd/clm/test/system/config_files/17p_cnsc_dh ----------- Turn supln off
+M models/lnd/clm/test/system/config_files/17p_cnsc_dm ----------- Turn supln off
+M models/lnd/clm/test/system/config_files/17p_cnsc_do ----------- Turn supln off
+M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dh --- Turn supln on
+M models/lnd/clm/test/system/config_files/17p_cnadspinupsc_dm --- Turn supln on
+M models/lnd/clm/test/system/mknamelist ------------------------- Set number of threads by input CLM_THREADS
+M models/lnd/clm/test/system/input_tests_master ----------------- Change all start dates to 2002/2003 to
+ correspond with data checked in
+M models/lnd/clm/test/system/README ----------------------------- Document that can set CLM_THREADS
+M models/lnd/clm/test/system/TSM.sh ----------------------------- Set number of threads by input
+ CLM_THREADS / handle cold start
+
+>>>>>>>>>>>>>>>>>>>> Add harvest fields
+M models/lnd/clm/tools/mksurfdata/ncdio.F90 --------------------- Add nf_get_att_text
+M models/lnd/clm/tools/mksurfdata/mkfileMod.F90 ----------------- Write out harvest fields
+M models/lnd/clm/tools/mksurfdata/mkvarpar.F90 ------------------ Formatting change
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 ------------------ Call mkharvest_init, mkharvest,
+ and add harvest fields to file
+M models/lnd/clm/tools/mksurfdata/pftdyn_simyr1850.txt ---------- Point to new landuse files
+M models/lnd/clm/tools/mksurfdata/pftdyn_simyr1850-2005.txt ----- Point to new landuse files
+M models/lnd/clm/tools/mksurfdata/pftdyn_simyr2000.txt ---------- Point to new landuse files
+M models/lnd/clm/tools/mksurfdata/Srcfiles ---------------------- Add mkharvestMod.F90 file to list
+
+>>>>>>>>>>>>>>>>>>>> Fix interpinic for urban
+M models/lnd/clm/tools/interpinic/interpinic.F90 ---- Changes from Keith Oleson/John Truesdale to
+ handle urban
+M models/lnd/clm/tools/interpinic/runinit_ibm.csh --- Tweak sim_years, maxpft, and start times
+
+>>>>>>>>>>>>>>>>>>>> Fix regrid scripts for new sim_yr
+M models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl --- Add sim_yr, document better, add time coord.
+ variable
+M models/lnd/clm/tools/ncl_scripts/ndepregrid.ncl ----- Add sim_yr, figure out file-type from it,
+ document better, transient files use
+ lowercase lat, lon instead of LAT, LON.
+
+>>>>>>>>>>>>>>>>>>>> Fix hybrid bug (959), deprecate old scripts
+M models/lnd/clm/bld/configure ----------------- By default turn on CLAMP when either CN or CASA is enabled,
+ start adding ccsm_bld option, and -mach option.
+M models/lnd/clm/bld/mkSrcfiles ---------------- Get it to match scripts version of same thing
+M models/lnd/clm/bld/mkDepends ----------------- Get it to match scripts version of same thing,
+ remove Darwin kludge for assert.h (as has been renamed)
+M models/lnd/clm/bld/queryDefaultXML.pm -------- Handle return characters in values
+M models/lnd/clm/bld/config_files/Makefile.in -- Get rid of SGI, Nec-SX6, ES, Cray-X1 build options,
+ tweak Linux build
+M models/lnd/clm/bld/listDefaultNamelist.pl ---- Add option to do all resolutions, correct prints
+M models/lnd/clm/bld/build-namelist ------------ Add drv_in namelist "ccsm_pes" setting threads to
+ OMP_NUM_THREADS value, remove furbinp file
+M models/lnd/clm/bld/create_newcase ------------ Document that this script is deprecated
+M models/lnd/clm/bld/run-ibm.csh --------------- Remove archiving, change defaults,
+ add notes that script is deprecated
+M models/lnd/clm/bld/README -------------------- Remove files taken out
+M models/lnd/clm/bld/config_files/config_sys_defaults.xml ----- Add default mach settings
+M models/lnd/clm/bld/config_files/config_definition.xml ------- Add mach and ccsm_bld settings
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml --- Remove furbinp, add task thread layouts
+ for ccsm_pe namelist
+M models/lnd/clm/bld/namelist_files/datm.streams.template.xml - Make same as datm7 version
+ (except using %p instead of DIN_LOC_ROOT)
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - New finidat spinup files for 1.9x2.5,
+ 1850, 2000 and 1850-CN
+
+>>>>>>>>>>>>>>>>>>>> Changes from Forrest Hoffman to fix hybrid issues on jaguar (bug 954 and more)
+M models/lnd/clm/src/biogeochem/CNBalanceCheckMod.F90 -- Add lbc,ubc
+M models/lnd/clm/src/biogeochem/CNAnnualUpdateMod.F90 -- Add lbc, ubc, lbp, ubp
+M models/lnd/clm/src/biogeochem/CNEcosystemDynMod.F90 -- Explicitly dimension filters to "ubp-lbp+1"
+ rather than ":"
+
+>>>>>>>>>>>>>>>>>>>> Fix hybrid issues (bug 954), add #ifdefs to clmtype so can use CLAMP with CASA,
+ remove CSD and USE_OMP junk
+M models/lnd/clm/src/main/driver.F90 ------------------- Remove CSD directives and USE_OMP.
+ Add more variables to private for OMP loops
+ (forrest) (bug 954)
+ Pass array bounds to dynland_hwcontent
+ (mvertens) (bug 954).
+ Pass array bounds needed by Forrest's
+ biogeochem changes above.
+M models/lnd/clm/src/main/decompInitMod.F90 ------------ Make a line shorter (with continue lines)
+M models/lnd/clm/src/main/subgridRestMod.F90 ----------- Make a line shorter (with continue lines)
+M models/lnd/clm/src/main/aerdepMod.F90 ---------------- Remove generic save statement, add save for
+ each data instantiation
+M models/lnd/clm/src/main/clmtypeInitMod.F90 ----------- Add #ifdefs from casafire branch to limit
+ clmtype size
+M models/lnd/clm/src/main/initializeMod.F90 ------------ Remove USE_OMP and CSD directives
+M models/lnd/clm/src/main/pftdynMod.F90 ---------------- #ifdef pftdyn_cnbal
+M models/lnd/clm/src/main/histFileMod.F90 -------------- Remove CSD directives
+M models/lnd/clm/src/main/controlMod.F90 --------------- Remove furbinp, remove UNICOSMP and SSP complexity
+M models/lnd/clm/src/main/filterMod.F90 ---------------- Remove CSD directives
+M models/lnd/clm/src/main/clmtype.F90 ------------------ Add #ifdefs from casafire branch to limit
+ clmtype size
+M models/lnd/clm/src/main/histFldsMod.F90 -------------- Remove KO comments
+
+>>>>>>>>>>>>>>>>>>>> Fix hybrid issues (bug 954 and 965), use fsurdat instead of furbinp file for urban input
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90 ---- Pass in array bounds (mvertens) (bug 954)
+M models/lnd/clm/src/biogeophys/UrbanInputMod.F90 ------ Use fsurdat instead of separate furbinp file
+M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90 --- Remove num_solar logic that caused an early exit
+ (bug 965)
+
+Summary of testing:
+
+ bluefire: All PASS except
+007 erA91 TER.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 7
+008 brA91 TBR.sh _sc_dh clm_std^nl_urb_br 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 6
+011 erD91 TER.sh _persc_dh clm_per^nl_urb 20021231:NONE:1200 4x5 gx3v5 72+72 cold ...............FAIL! rc= 7
+013 smH74 TSM.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test@1000-1004 -1100 cold FAIL! rc= 8
+014 blH74 TBL.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test@1000-1004 -100 cold FAIL! rc= 4
+017 erE91 TER.sh 4p_vodsrsc_dh clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 10+38 arb_ic .........FAIL! rc= 7
+018 brE91 TBR.sh 4p_vodsrsc_dh clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v5 72+72 arb_ic ......FAIL! rc= 6
+022 erF92 TER.sh 17p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 10+38 cold ..........FAIL! rc= 7
+023 brF92 TBR.sh 17p_vodsrsc_dm clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v5 72+72 cold .......FAIL! rc= 6
+009 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+010 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+011 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+012 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+013 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+021 erC61 TER.sh _sc_dh clm_std^nl_urb 20021001:NONE:1800 1.9x2.5 gx1v6 10+38 cold ..............FAIL! rc= 7
+022 brC61 TBR.sh _sc_dh clm_std^nl_urb_br 20021001:NONE:1800 1.9x2.5 gx1v6 -3+-3 cold ...........FAIL! rc= 6
+025 erH51 TER.sh 17p_cnnsc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 10+38 cold ......FAIL! rc= 7
+027 blH51 TBL.sh 17p_cnnsc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS@2000 48 cold .........FAIL! rc= 5
+029 smLI1 TSM.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+030 erLI1 TER.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+031 brLI1 TBR.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+032 blLI1 TBL.sh _sc_dh clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+036 erLD1 TER.sh _sc_dh clm_std^nl_urb 20030101:NONE:1800 2.65x3.33 USGS -5+-5 arb_ic ...........FAIL! rc= 7
+007 brJ61 TBR.sh 4p_casasc_dh clm_std^nl_urb_br 20021230:NONE:1800 1.9x2.5 gx1v6 72+72 cold .....FAIL! rc= 6
+ jaguar: All PASS except
+005 smA91 TSM.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 10 <<< bug 967
+006 erA91 TER.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 5
+007 brA91 TBR.sh _sc_dh clm_std^nl_urb_br 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 5
+009 smE92 TSM.sh 4p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 96 arb_ic ............FAIL! rc= 10 <<< bug 967
+010 erE92 TER.sh 4p_vodsrsc_dm clm_std^nl_urb 20021230:NONE:1800 4x5 gx3v5 10+38 arb_ic .........FAIL! rc= 5
+011 brE92 TBR.sh 4p_vodsrsc_dm clm_std^nl_urb_br 20021230:NONE:1800 4x5 gx3v5 72+72 arb_ic ......FAIL! rc= 5
+013 smEH2 TSM.sh 4p_vodsrsc_dm clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic FAIL! rc= 10 <<< bug 967
+014 erEH2 TER.sh 4p_vodsrsc_dm clm_std^nl_urb 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 10+38 arb_icFAIL! rc= 5
+015 brEH2 TBR.sh 4p_vodsrsc_dm clm_std^nl_urb_br 20021231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 24+24 arbFAIL! rc= 5
+017 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+018 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 10+38 arb_ic ......FAIL! rc= 5
+019 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+021 smH92 TSM.sh 17p_cnnsc_dm clm_ndepdyn 20020101:NONE:1800 4x5 gx3v5@2000 96 cold .............FAIL! rc= 8
+022 erH92 TER.sh 17p_cnnsc_dm clm_ndepdyn 20020101:NONE:1800 4x5 gx3v5@2000 10+38 cold ..........FAIL! rc= 5
+023 brH92 TBR.sh 17p_cnnsc_dm clm_ndepdyn 20020101:NONE:1800 4x5 gx3v5@2000 72+72 cold ..........FAIL! rc= 5
+025 smJ62 TSM.sh 4p_casasc_dm clm_std^nl_urb 20021230:NONE:1800 1.9x2.5 gx1v6 96 cold ...........FAIL! rc= 10 <<< bug 967
+026 erJ62 TER.sh 4p_casasc_dm clm_std^nl_urb 20021230:NONE:1800 1.9x2.5 gx1v6 10+38 cold ........FAIL! rc= 5
+027 brJ62 TBR.sh 4p_casasc_dm clm_std^nl_urb_br 20021230:NONE:1800 1.9x2.5 gx1v6 72+72 cold .....FAIL! rc= 5
+034 smLI2 TSM.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+035 erLI2 TER.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+036 brLI2 TBR.sh _sc_dm clm_std 20020101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+ lightning/pathscale: All PASS except
+009 smA91 TSM.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 10
+010 erA91 TER.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 5
+011 brA91 TBR.sh _sc_dh clm_std^nl_urb_br 20030101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 5
+012 blA91 TBL.sh _sc_dh clm_std^nl_urb 20030101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 4
+017 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+018 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+019 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+020 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 4
+021 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+023 erJ42 TER.sh 4p_casasc_dm clm_std^nl_urb 20021230:NONE:1800 10x15 USGS 10+38 cold ...........FAIL! rc= 7
+024 brJ42 TBR.sh 4p_casasc_dm clm_std^nl_urb_br 20021230:NONE:1800 10x15 USGS 72+72 cold ........FAIL! rc= 6
+026 smL51 TSM.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 96 arb_ic ..................FAIL! rc= 10
+027 erL51 TER.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 5
+028 brL51 TBR.sh _sc_dh clm_std^nl_urb_br 20020115:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+029 blL51 TBL.sh _sc_dh clm_std^nl_urb 20020115:NONE:1800 10x15 USGS 48 arb_ic ..................FAIL! rc= 4
+032 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 6
+035 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+036 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 6
+037 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 5
+ bangkok/lf95:
+024 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+025 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+026 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 72+72 arb_ic ......FAIL! rc= 5
+028 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic .........FAIL! rc= 10
+051 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 6
+052 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+055 sm952 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dh ext_ccsm_seq_cam 48 .......................FAIL! rc= 4
+056 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ breeze/gale/hail/gust/ifort: All PASS up to the pftdyn test
+016 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS@1000-1002 144 arb_ic ........FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_39
+
+Changes answers relative to baseline: No bit-for-bit (unless compare cases using the new vs old spin-up files)
+
+===============================================================
+===============================================================
+Tag name: clm3_6_39
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Thu May 7 14:18:08 MDT 2009
+One-line Summary: Bug fix for script version and maxpatchpft back to numpft+1
+
+Purpose of changes: Bug fixes for two issues, script version to set CLM_DEMAND="null" instead of none
+ And reset default maxpatch_pft=numpft+1 instead of 4 which crept in on clm3_6_38
+
+Bugs fixed (include bugzilla ID): 943 (CLM_DEMAND="null")
+ 946 (default maxpatchpft)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 974 (bug in pftdyn mode)
+ 977 (bug writing out 3D time-const data)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Change default for maxpatch_pft back to numpft+1
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts
+
+ scripts to scripts4_090506 (default CLM_DEMAND is null rather than none)
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/config_files/config_definition.xml ----- maxpft=numpft+1
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml - Allow sim_year=1000 for test datasets
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml Add co2_ppmv for test_yr=1000
+M models/lnd/clm/src/main/driver.F90 ------------------------ Don't write out message about dynamic pft every time-step
+M models/lnd/clm/src/main/lnd_comp_mct.F90 ------------------ Set iulog for non-masterproc processors
+M models/lnd/clm/test/system/input_tests_master ------------- Put year-range for pftdyn 10x15 tests
+
+Summary of testing: Limited
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_38
+
+Changes answers relative to baseline: Default number of PFT's is numpft+1 instead of 4.
+
+===============================================================
+===============================================================
+Tag name: clm3_6_38
+Originator(s): erik (erik)
+Date: Wed May 6 00:20:37 MDT 2009
+One-line Summary: New fsurdat for other resolutions, bug-fixes, deep wetlands to bedrock, new spinups for 1.9x2.5 1850, 2000
+
+Purpose of changes:
+
+New surfdata for all resolutions, and new pftdyn test datasets (1x1 and 10x15). Make sure
+furbinp/forganic/fsurdata consistent. New 1850 and 2000 spin-up for 1.9x2.5. Add in field
+to restart files needed for urban interpinic. Change deep wetlands to bedrock. Remove
+some output for urban and aerdep. fcov changes from Sean. Bring in history change from
+Dave (so only output static 3D fields on first h0 file). Bug fix for RTM bug from Keith
+O.
+
+Bugs fixed (include bugzilla ID): 941 (RTM output 6X too low)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 943 (CLM_DEMAND="null")
+ 946 (default maxpatchpft)
+ 974 (bug in pftdyn mode)
+ 977 (bug writing out 3D time-const data)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system:
+
+Describe any changes made to the namelist: Remove step2init, add irad from datm
+ factorfn now null instead of unused.
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: swensosc, oleson, dlawren
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, csm_share,
+drv, datm7, mct, pio
+
+ scripts to scripts4_090505c
+ drv to vocemis-drydep08_drvseq3_0_16
+ datm7 to datm7_090505b
+ csm_share to share3_090429
+ pio to pio40_prod
+
+List all files eliminated: Remove old run scripts, pt-urban input data
+
+D models/lnd/clm/test/system/nl_files/clm_urb -- remove since urban is default
+D models/lnd/clm/tools/ncl_scripts/addgrid2spointurban.ncl -- only needed to create
+ urban pt surface datasets
+D models/lnd/clm/tools/ncl_scripts/clmi_increasesoillayer.ncl - only needed to go
+ from 10 layer to 15 layer finidat files.
+D models/lnd/clm/bld/urban_input
+D models/lnd/clm/bld/urban_input/vancouverCAN_fluxes.nc
+D models/lnd/clm/bld/urban_input/metropolis_fluxes.nc
+D models/lnd/clm/bld/urban_input/urbanc_alpha_fluxes.nc
+D models/lnd/clm/bld/urban_input/asphaltjungle_fluxes.nc
+D models/lnd/clm/bld/urban_input/surfdata_1x1_tropicAtl_urb3den_simyr2000_c090320.nc
+D models/lnd/clm/bld/urban_input/surfdata_1x1_brazil_urb3den_simyr2000_c090320.nc
+D models/lnd/clm/bld/urban_input/mexicocityMEX_fluxes.nc
+D models/lnd/clm/bld/urban_input/surfdata_1x1_brazil_urb3den_simyr1850_c090317.nc
+D models/lnd/clm/bld/run-pc.csh
+D models/lnd/clm/bld/run-lightning.csh
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>>> configure defaults for everything on, update for new datm7
+ new sim_year for pftdyn test datasets. New fsurdat for all
+ resolutions, furbinp and forganic=fsurdat.
+M models/lnd/clm/bld/configure -------------------------------- document defaults
+correctly
+M models/lnd/clm/bld/config_files/config_definition.xml ------- defaults for:
+ dust: on, maxpft:numpft+1, progsslt:on, rtm:on
+M models/lnd/clm/bld/listDefaultNamelist.pl ------------------- all -res all option
+M models/lnd/clm/bld/clm.cpl7.template ------------------------ use defaults for
+ dust, progsslt, and rtm. Don't demand furbinp, or forganic
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml --- Remove step2init,
+change defaults for factorfn, and sim_year (for test ranges 1000-1002, and 1000-1004)
+M models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml Factorfn=null, rm step2init
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - New 1.9x2.5 finidat,
+ new fsurdat for all resolutions,
+M models/lnd/clm/bld/build-namelist --------------------------- furbinp,forganic now
+longer clm_demand
+ furbinp = fsurdat, forganic = fsurdat, fsurdat no longer need
+ furbinp and fpftdyn, finidat doesn't need furbinp, remove step2init
+>>>>>>>>>>>>>>>>
+M models/lnd/clm/src/main/aerdepMod.F90 ---------------- log output only to masterproc
+M models/lnd/clm/src/main/iniTimeConst.F90 ------------- remove urban log output
+M models/lnd/clm/src/main/subgridRestMod.F90 ----------- add cols1d_ityp
+M models/lnd/clm/src/main/clmtypeInitMod.F90 ----------- add fsat
+M models/lnd/clm/src/main/iniTimeConst.F90 ------------- make deep wetlands bedrock
+M models/lnd/clm/src/main/histFileMod.F90 -------------- only write out static fields
+ to h0 tapes on nstep=0
+M models/lnd/clm/src/main/clmtype.F90 ------------------ add fcov and fsat
+M models/lnd/clm/src/main/histFldsMod.F90 -------------- add fsat to history files
+M models/lnd/clm/src/main/mkarbinitMod.F90 ------------- make wetlands bedrock
+M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90 - make wetlands bedrock
+M models/lnd/clm/src/biogeophys/UrbanInitMod.F90 ------- add fcov/fsat
+M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 ------ add fcov/fsat
+M models/lnd/clm/src/biogeophys/SoilHydrologyMod.F90 --- add fcov/fsat
+M models/lnd/clm/src/biogeophys/HydrologyLakeMod.F90 --- add fcov/fsat
+M models/lnd/clm/src/riverroute/RtmMod.F90 ------------- Fix RTM bug so accumulate
+ during RTM intervals
+>>>>>>>>>>>>>>>> Remove run-pc/run-lighning tests, remove CLMNCEP, update to beta15
+ lightning no parallel gmake, no clm_demand for furbinp, change clm_urb
+ to clm_std, add sim_year for pftdyn tests, add serial vodsrsc tests
+M models/lnd/clm/test/system/tests_pretag_bangkok
+M models/lnd/clm/test/system/tests_posttag_lightning
+M models/lnd/clm/test/system/test_driver.sh --------- update to beta15, lightning gmake no parallel
+M models/lnd/clm/test/system/mknamelist ------------- remove CLMNCEP option
+M models/lnd/clm/test/system/TCSruncase.sh ---------- remove lightning, pc option
+M models/lnd/clm/test/system/nl_files/clm_per ------- no clm_demand on furbinp
+M models/lnd/clm/test/system/nl_files/clm_per0 ------ no clm_demand on furbinp
+M models/lnd/clm/test/system/nl_files/clm_urb1pt ---- no clm_demand on furbinp
+M models/lnd/clm/test/system/input_tests_master ----- change clm_urb to clm_std
+ add sim_year for pftdyn tests, add serial vodsrsc
+ tests
+>>>>>>>>>>>>>>>> Change urban pt datasets from 1850 to 2000 sim_year.
+M models/lnd/clm/tools/mksurfdata/mksurfdata.pl
+
+Summary of testing: limited testing on breeze, lightning, and bangkok
+
+ bluefire: All PASS except TBL up to test 27
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_37
+
+Changes answers relative to baseline: Yes, RTM 6X higher, surface datasets different
+ deep wetlands now bedrock
+
+===============================================================
+===============================================================
+Tag name: clm3_6_37
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Mon Apr 27 23:27:26 MDT 2009
+One-line Summary: Update faerdep dataset for 1.9x2.5 to point to version cice is using for 1850 and 2000
+
+Purpose of changes: Point to same version of faerdep datasets used by cice for 1.9x2.5 1850/2000
+ This was needed for the ccsm4_0_beta15 tag.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 941 (RTM output 6X too low)
+ 974 (bug in pftdyn mode)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ http://bugs.cgd.ucar.edu/
+
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets:
+ New faerdep datasets for 1.9x2.5 used by cice (only difference is time coord)
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts
+ scripts to scripts4_090427b
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+
+Summary of testing: None, other than build-namelist for 1.9x2.5 sim_year=1850/2000
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_36
+
+Changes answers relative to baseline: Should be identical
+
+===============================================================
+===============================================================
+Tag name: clm3_6_36
+Originator(s): erik (erik)
+Date: Mon Apr 27 14:10:13 MDT 2009
+One-line Summary: Handle transient aersol, make maxpatchpft=numpft+1 default, new datasets for 1.9x2.5 and 0.9x1.25, change doalb
+
+Purpose of changes:
+
+Changes so can do aerosol transient time-series (1850-2000) (kauff). New surfdata
+datasets for 1.9x2.5 and 0.9x1.25 (1850 and 2000). New 1850-2000 pftdyn dataset for
+1.9x2.5. New aerosol and ndep for 1.9x2.5 (1850 and 2000). Change to doalb from Mariana.
+Make maxpatchpft=numpft+1 the default and remove all finidat files
+
+Bugs fixed (include bugzilla ID): 936 (create_test bug)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 935 (RTM warning NOT an error)
+ 937 (undef value on bangkok for maxpatchpft=numpft+1 case)
+ 941 (RTM output 6X too low)
+ 974 (bug in pftdyn mode)
+ 1068 (Problems interpolated deposition datasets to high res)
+ 1069 (Nitrogen Deposition datasets have wrong units)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: maxpatchpft default is now numpft+1
+
+Describe any changes made to the namelist: build-namelist now allows 1850-2000 for sim_year for transient datasets
+ aerdep now chooses a transient dataset for this case as well
+
+List any changes to the defaults for the boundary datasets:
+ New aerosol deposition and nitrogen deposition datasets for 1.9x2.5 and transient
+ New pftdyn dataset for 1.9x2.5 for 1850-2000
+ New surfdata for 1.9x2.5 and 0.9x1.25
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, kauff (aer transient), mvertens (doalb changes)
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm7, and pio
+ scripts to scripts4_090424
+ drv to vocemis-drydep08_drvseq3_0_14
+ datm7 to datm7_090406
+ pio to pio38_prod
+
+List all files eliminated: None
+
+List all files added and what they do: Add file for generic settings NOT used by a specific model component
+
+A models/lnd/clm/bld/namelist_files/namelist_defaults_overall.xml
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>>>>>>>>>>> Add cice decomp info, use xml input file for PE change
+M models/lnd/clm/test/system/config_files/ext_ccsm_seq_10x15_dh
+M models/lnd/clm/test/system/config_files/ext_ccsm_seq_4x5_dh
+M models/lnd/clm/test/system/config_files/ext_ccsm_seq_1.9x2.5_dh
+M models/lnd/clm/test/system/config_files/ext_ccsm_seq_64x128_s
+M models/lnd/clm/test/system/config_files/ext_ccsm_seq_0.9x1.25_dh
+M models/lnd/clm/test/system/TCT_ccsmseq.sh --- use xml file format for PE change
+>>>>>>>>>>>>>>>>>>>>>>>> Base LANDMASK on SUM(PCT_PFT) rather than LANDFRAC_PFT
+M models/lnd/clm/tools/ncl_scripts/pftdyntest2raw.ncl
+>>>>>>>>>>>>>>>>>>>>>>>> Add 1850-2000 simyr option
+M models/lnd/clm/bld/config_files/config_definition.xml ------ maxpatchpft default
+is numpft+1
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml -- Add 1850-2000 to valid sim_year values
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml - Remove finidat files,
+ remove data in namelist_defaults_overall,
+ new fsurdat,forganic,furbinp for 1.9x2.5 and 0.9x1.25
+ 1850-2000 PFT dataset for 1.9x2.5
+ new aerdep and ndep datasets for 1.9x2.5 and 0.9x1.25,
+ and transient 1850-2000 aerdep datasets
+M models/lnd/clm/bld/build-namelist ----------- Be careful if datasets are picked
+based on full sim_year (which could be 1850-2000) or the first year (1850 finidat,
+fsurdat files)
+M models/lnd/clm/bld/listDefaultNamelist.pl --- Use list of defaults files
+M models/lnd/clm/bld/queryDefaultNamelist.pl -- Use list of defaults files, remove scpto option
+M models/lnd/clm/bld/queryDefaultXML.pm ------- Use list of defaults files
+>>>>>>>>>>>>>>>>>>>>>>>> doalb changes from Mariana Vertenstein (branches/new_doalb)
+>>>>>>>>>>>>>>>>>>>>>>>> remove caldayp1 use next_swcday sent from atm
+M models/lnd/clm/src/biogeochem/DGVMMod.F90 ---------- remove caldayp1, send nextsw_cday
+M models/lnd/clm/src/main/clm_comp.F90 --------------- don't calcualte caldayp1, calc declinp1 based on nextsw_cday
+M models/lnd/clm/src/main/driver.F90 ----------------- Pass nextsw_cday instead of caldayp1
+M models/lnd/clm/src/main/initSurfAlbMod.F90 --------- Don't pass calday and declin
+M models/lnd/clm/src/main/lnd_comp_mct.F90 ----------- Remove never_doAlb logic, pass nextsw_cday down
+M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90 - cosz based on nextsw_cday NOT caldayp1
+M models/lnd/clm/src/biogeophys/UrbanMod.F90 --------- Do NOT pass calday, declin
+>>>>>>>>>>>>>>>>>>>>>>>> aerdep changes from Brian Kauffman (cbgcdev05_clm3_6_35)
+M models/lnd/clm/src/main/aerdepMod.F90 -- Time-interpolation done each time-step (rather than just each day) method slightly different.
+ Also allows transient file where uses first year
+ until reaches middle years, then after last year continues to use last year.
+
+Summary of testing:
+
+ bluefire: All PASS except TBL and... up to test 62
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+038 smCA4 TSM.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 10
+039 erCA4 TER.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ......FAIL! rc= 5
+040 brCA4 TBR.sh _sc_ds clm_urb^nl_urb_br 19981001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic ...FAIL! rc= 5
+042 smCA8 TSM.sh _sc_ds clm_urb^nl_urb 19971230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 10
+056 smI58 TSMcnspinup.sh 17p_cnadspinupsc_dh 17p_cnexitspinupsc_dh 17p_cnsc_dh clm_std 19980115:NONEFAIL! rc= 5
+057 smLI1 TSM.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+058 erLI1 TER.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+059 brLI1 TBR.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_35
+
+Changes answers relative to baseline: Yes -- default for maxpatchpft is numpft+1
+ rather than 4
+ And aersol time-interpolation is different
+ method is different and also does interpolation
+ for every time-step NOT held constant each day.
+
+===============================================================
+===============================================================
+Tag name: clm3_6_35
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Mon Apr 20 15:19:17 MDT 2009
+One-line Summary: Fix major logic bug in mksurfdata
+
+Purpose of changes: Fix major logic bug in mksurfdata (bug 934) which requires us to recreate any surface datasets
+ created with clm3_6_32. The bug zero'd out LAI for some PFT's and biased the LAI values
+ (LAI, SAI, veg-bot, veg-top).
+
+ This is a note from dlawren
+ "I have taken a look at the new surface files and they look correct to me. LAI
+ is defined everywhere. I did a quick test using this surface dataset with a
+ spunup file from Keith's prior 1850 simulation and it worked fine (no errors).
+ I also confirmed that indeed the gridbox mean LAI is different by up to about
+ +-0.5. In most places the difference is below +-0.1."
+
+
+Bugs fixed (include bugzilla ID): 934 (pftdyn logic bug)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 941 (RTM output 6X too low)
+ 974 (bug in pftdyn mode)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: Self (but new 1.9x2.5 surface dataset checked by dlawren and lawrence)
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+A models/lnd/clm/tools/ncl_scripts/pftdyntest2raw.ncl --- Create raw pftdyn test datasets, so can create new ones.
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/tools/mksurfdata/mklaiMod.F90 ---------------- Fix pftdyn logic error.
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 ---------------- Move soil-text calc higher up, allow more space for filenames
+M models/lnd/clm/tools/mksurfdata/mksurfdata.globalirrig ------ Make output in double precision
+M models/lnd/clm/tools/mksurfdata/mksurfdata.regional --------- Make output in double precision
+M models/lnd/clm/tools/mksurfdata/mksurfdata.pftdyn ----------- Make output in double precision
+M models/lnd/clm/tools/mksurfdata/mksurfdata.singlept --------- Make output in double precision
+M models/lnd/clm/tools/mksurfdata/mksurfdata.namelist --------- Make output in double precision
+M models/lnd/clm/tools/mksurfdata/mksurfdata.pl --------------- Get T62, 2x2.5 and qtr deg res's, only do 2000 for urban single-point
+M models/lnd/clm/tools/mksurfdata/pftdyn_simyr1850-2005.txt --- Allow larger size for filenames
+M models/lnd/clm/tools/mksurfdata/pftdyn_simyr2000.txt -------- Allow larger size for filenames
+M models/lnd/clm/tools/mksurfdata/pftdyn_simyr1850.txt -------- Allow larger size for filenames
+
+MM models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl ----------- Add svn keywords
+MM models/lnd/clm/tools/ncl_scripts/clmi_increasesoillayer.ncl - Add svn keywords
+M models/lnd/clm/tools/ncl_scripts/README --------------------- Update doc on files
+
+Summary of testing: No testing except for mksurfdata on bluefire
+
+001 sm774 TSMtools.sh mksurfdata tools__ds singlept .............................................PASS
+002 sm754 TSMtools.sh mksurfdata tools__s globalirrig ...........................................PASS
+003 sm756 TSMtools.sh mksurfdata tools__s pftdyn ................................................PASS
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_34
+
+Changes answers relative to baseline: no bit-for-bit (other than mksurfdata)
+
+===============================================================
+===============================================================
+Tag name: clm3_6_34
+Originator(s): oleson (Oleson Keith 1332 CGD)
+Date: Sun Apr 19 09:34:43 MDT 2009
+One-line Summary: Fix bangkok urban bug
+
+Purpose of changes: Fix urban bug found from bangkok testing (#927) and eliminate potential water balance error
+
+Bugs fixed (include bugzilla ID): 927
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 934 (pftdyn logic bug)
+ 941 (RTM output 6X too low)
+ 974 (bug in pftdyn mode)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: myself, Erik Kluzek, Sean Swenson
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90 .... Change loop over all columns to filter_nourbanc
+M models/lnd/clm/src/biogeophys/UrbanMod.F90 .... Change some net_solar fields from intent(out) to intent(inout).
+ add soilalpha_u restriction on soil evaporation/transpiration selection for pervious road (this second
+ change is bit for bit for all bluefire/bangkok testing, but will prevent small water balance errors in
+ special situations (e.g., perpetual January simulations)
+
+Summary of testing:
+
+ bluefire: All PASS except:
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 4
+056 smI58 TSMcnspinup.sh 17p_cnadspinupsc_dh 17p_cnexitspinupsc_dh 17p_cnsc_dh clm_std 19980115:NONEFAIL! rc= 5
+057 smLI1 TSM.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+058 erLI1 TER.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+059 brLI1 TBR.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+060 blLI1 TBL.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+069 smJ61 TSM.sh 4p_casasc_dh clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 96 cold ...........FAIL! rc= 10
+070 erJ61 TER.sh 4p_casasc_dh clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 10+38 cold ........FAIL! rc= 5
+071 brJ61 TBR.sh 4p_casasc_dh clm_urb^nl_urb_br 19981230:NONE:1800 1.9x2.5 gx1v6 72+72 cold .....FAIL! rc= 5
+072 blJ61 TBL.sh 4p_casasc_dh clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 48 cold ...........FAIL! rc= 4
+082 bl744 TBLtools.sh mksurfdata tools__s namelist ..............................................FAIL! rc= 7
+084 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 6
+086 bl754 TBLtools.sh mksurfdata tools__s globalirrig ...........................................FAIL! rc= 6
+088 bl756 TBLtools.sh mksurfdata tools__s pftdyn ................................................FAIL! rc= 7
+093 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+094 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+095 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 4
+096 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 4
+097 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 4
+ jaguar:
+ kraken:
+ lightning/pathscale:
+ bangkok/lf95: All PASS except:
+008 blA92 TBL.sh _sc_dm clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 5
+011 blD91 TBL.sh _persc_dh clm_per^nl_urb 19981231:NONE:1200 4x5 gx3v5 144 cold .................FAIL! rc= 5
+033 blH52 TBL.sh 17p_cnnsc_dm clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS@2000 48 cold .........FAIL! rc= 5
+034 smJ92 TSM.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 96 cold ...............FAIL! rc= 10
+035 erJ92 TER.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 10+38 cold ............FAIL! rc= 5
+036 brJ92 TBR.sh 4p_casasc_dm clm_urb^nl_urb_br 19981230:NONE:1800 4x5 gx3v5 72+72 cold .........FAIL! rc= 5
+037 blJ92 TBL.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 48 cold ...............FAIL! rc= 4
+041 blL51 TBL.sh _sc_dh clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS 48 arb_ic ..................FAIL! rc= 5
+048 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 6
+049 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+052 sm952 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dh ext_ccsm_seq_cam 48 .......................FAIL! rc= 8
+053 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ breeze/gale/hail/gust/ifort:
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_33
+
+Changes answers relative to baseline: No, bit for bit
+
+===============================================================
+===============================================================
+Tag name: clm3_6_33
+Originator(s): erik (erik)
+Date: Thu Apr 16 14:45:23 MDT 2009
+One-line Summary: Bring in dynpft changes from cbgc branch
+
+Purpose of changes: New method for dealing with dynamic land-use changes
+
+ morph routine casa() in casa_ecosystemDyn(), so casa is more similar to CN & DGVM,
+ and prepares casa code for adding additional carbon flux functionality.
+ Larger plan is to duplicate these and other mods from casafire branch on this branch.
+ Add new method for conserving heat & water wrt dynamic land use.
+ Conserves heat & water for any change in the land-unit, column, or pft arrangment.
+ when pftdyn is activated, "normalize" sum of new pft weights in a column
+ to be the same as the sum of the old pft weights
+ otherwise BalanceCheck will generate water/heat balance errors.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 927 (problem with urban on bangkok/lahey)
+ 934 (pftdyn logic bug)
+ 941 (RTM output 6X too low)
+ 974 (bug in pftdyn mode)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by:
+ B. Kauffman, D. Lawrence, G. Bonan, K. Oleson
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated:
+
+ D biogeochem/CASASummary.F90 ................ code relocated inside CASAMod.F90
+
+List all files added and what they do:
+
+ A main/dynlandMod.F90 ............... new routine is here
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>>> models/lnd/clm/src
+ M biogeochem/CASAMod.F90 .......... Add in CASASummary.F90, add casa_recosystemDyn subroutine
+ M main/pftdynMod.F90 .............. when pftdyn is active, "normalize" pft weights in a column
+ M biogeophys/BalanceCheckMod.F90 .. improved imbalance write statement
+ M main/driver.F90 ................. CASAsummary, CASAPhenology now called in
+ casa_ecocsystemDyn()
+ M main/initSurfAlbMod.F90 ......... casa() renamed casa_ecocsystemDyn()
+ M main/driver.F90 ......... call new routine here
+ M main/clmtype.F90 ......... define new fields
+ M main/clmtypeInitMod.F90 ......... init new fields
+ M main/histFldsMod.F90 ......... put new fields on hist file
+ M main/clm_atmlnd.F90 ......... heat imbalance is applied here
+ (to latent heat flux)
+ M riverroute/RtmMod.F90 ......... water imbalance is applied here (to runoff)
+
+Summary of testing:
+
+ bluefire: All PASS except pftdyn TBL tests and ...
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+056 smI58 TSMcnspinup.sh 17p_cnadspinupsc_dh 17p_cnexitspinupsc_dh 17p_cnsc_dh clm_std 19980115:NONEFAIL! rc= 5
+057 smLI1 TSM.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+058 erLI1 TER.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+059 brLI1 TBR.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+069 smJ61 TSM.sh 4p_casasc_dh clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 96 cold ...........FAIL! rc= 10
+070 erJ61 TER.sh 4p_casasc_dh clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 10+38 cold ........FAIL! rc= 5
+071 brJ61 TBR.sh 4p_casasc_dh clm_urb^nl_urb_br 19981230:NONE:1800 1.9x2.5 gx1v6 72+72 cold .....FAIL! rc= 5
+093 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+094 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+095 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 4
+096 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 4
+097 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 4
+ jaguar: All PASS except
+005 smA91 TSM.sh _sc_dh clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 10
+006 erA91 TER.sh _sc_dh clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 5
+007 brA91 TBR.sh _sc_dh clm_urb^nl_urb_br 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 5
+009 smE92 TSM.sh 4p_vodsrsc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 96 arb_ic ............FAIL! rc= 10
+010 erE92 TER.sh 4p_vodsrsc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 10+38 arb_ic .........FAIL! rc= 5
+011 brE92 TBR.sh 4p_vodsrsc_dm clm_urb^nl_urb_br 19981230:NONE:1800 4x5 gx3v5 72+72 arb_ic ......FAIL! rc= 5
+013 smEH2 TSM.sh 4p_vodsrsc_dm clm_urb^nl_urb 19981231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic FAIL! rc= 10
+014 erEH2 TER.sh 4p_vodsrsc_dm clm_urb^nl_urb 19981231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 10+38 arb_icFAIL! rc= 5
+015 brEH2 TBR.sh 4p_vodsrsc_dm clm_urb^nl_urb_br 19981231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 24+24 arbFAIL! rc= 5
+021 smH92 TSM.sh 17p_cnnsc_dm clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@2000 96 cold .............FAIL! rc= 10
+022 erH92 TER.sh 17p_cnnsc_dm clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@2000 10+38 cold ..........FAIL! rc= 5
+023 brH92 TBR.sh 17p_cnnsc_dm clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@2000 72+72 cold ..........FAIL! rc= 5
+025 smJ62 TSM.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 96 cold ...........FAIL! rc= 10
+026 erJ62 TER.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 10+38 cold ........FAIL! rc= 5
+027 brJ62 TBR.sh 4p_casasc_dm clm_urb^nl_urb_br 19981230:NONE:1800 1.9x2.5 gx1v6 72+72 cold .....FAIL! rc= 5
+034 smLI2 TSM.sh _sc_dm clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+035 erLI2 TER.sh _sc_dm clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+036 brLI2 TBR.sh _sc_dm clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+038 erP65 TSM_ccsmseq.sh ERS f19_g15 I ..........................................................FAIL! rc= 4
+039 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 4
+ lightning/pathscale: All PASS except pftdyn TBL tests and ...
+011 erA91 TER.sh _sc_dh clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 7
+012 brA91 TBR.sh _sc_dh clm_urb^nl_urb_br 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 5
+018 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+019 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+020 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+022 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ...................FAIL! rc= 10
+023 smJ42 TSM.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 10x15 USGS 96 cold ..............FAIL! rc= 10
+024 erJ42 TER.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 10x15 USGS 10+38 cold ...........FAIL! rc= 5
+025 brJ42 TBR.sh 4p_casasc_dm clm_urb^nl_urb_br 19981230:NONE:1800 10x15 USGS 72+72 cold ........FAIL! rc= 5
+027 smL51 TSM.sh _sc_dh clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS 96 arb_ic ..................FAIL! rc= 10
+028 erL51 TER.sh _sc_dh clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 5
+029 brL51 TBR.sh _sc_dh clm_urb^nl_urb_br 19980115:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+036 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+037 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 6
+038 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 4
+ bangkok/lf95: All PASS except pftdyn TBL tests and ...
+005 smA92 TSM.sh _sc_dm clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 10
+006 erA92 TER.sh _sc_dm clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 5
+007 brA92 TBR.sh _sc_dm clm_urb^nl_urb_br 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 5
+009 smD91 TSM.sh _persc_dh clm_per^nl_urb 19981231:NONE:1200 4x5 gx3v5 144 cold .................FAIL! rc= 10
+010 erD91 TER.sh _persc_dh clm_per^nl_urb 19981231:NONE:1200 4x5 gx3v5 72+72 cold ...............FAIL! rc= 5
+030 smH52 TSM.sh 17p_cnnsc_dm clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS@2000 96 cold .........FAIL! rc= 10
+031 erH52 TER.sh 17p_cnnsc_dm clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS@2000 10+38 cold ......FAIL! rc= 5
+032 brH52 TBR.sh 17p_cnnsc_dm clm_urb^nl_urb_br 19980115:NONE:1800 10x15 USGS@2000 72+72 cold ...FAIL! rc= 5
+034 smJ92 TSM.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 96 cold ...............FAIL! rc= 10
+035 erJ92 TER.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 10+38 cold ............FAIL! rc= 5
+036 brJ92 TBR.sh 4p_casasc_dm clm_urb^nl_urb_br 19981230:NONE:1800 4x5 gx3v5 72+72 cold .........FAIL! rc= 5
+038 smL51 TSM.sh _sc_dh clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS 96 arb_ic ..................FAIL! rc= 10
+039 erL51 TER.sh _sc_dh clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 5
+040 brL51 TBR.sh _sc_dh clm_urb^nl_urb_br 19980115:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+048 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 6
+049 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+052 sm952 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dh ext_ccsm_seq_cam 48 .......................FAIL! rc= 8
+053 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ breeze/gale/hail/gust/ifort: All PASS
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_33
+
+Changes answers relative to baseline: Only for pftdyn mode
+
+===============================================================
+================================================================================
+Tag name: clm3_6_32
+Originator(s): dlawren, erik, jet
+Date: Fri Apr 10 14:38:52 MDT 2009
+One-line Summary: Add irrigation area to mksrfdata, fix high-res and pftdyn problems
+
+Purpose of changes: Add irrigation area to mksrfdat tool, for irrigated area copy PFT=15 LAI and heights
+ into PFT=16, PFT=15 is unirrigated crop, PFT=16 is irrigated crop
+ fix pftdyn mode for mksurfdata (erik), bug fixes to mksurfdata from John Truesdale
+ script changes to make gx1v6 default.
+
+Bugs fixed (include bugzilla ID): 919 (pftdyn mode in mksurfdata)
+ 821 (problems running mksurfdata at high-res)
+ 357 (codes replicated in tools)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 920 (glacier_mec problems in mksurfdata)
+ 926 (pftdyn code needs to be shared in mksurfdata)
+ 927 (problem with urban on bangkok/lahey)
+ 934 (pftdyn logic bug)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Building with PIO is on by default
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: gx1v6 for and 0.47 res
+ fix 5x5_amazon surface dataset.
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, jet, dlawren
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, drv, datm7,
+csm_share, esmf_wrf_timemgr
+
+ scripts to scripts4_090406
+ drv to vocemis-drydep08_drvseq3_0_13
+ datm7 to datm7_090403
+ csm_share to share3_090407
+ timemgr to esmf_wrf_timemgr_090402
+
+List all files eliminated: Remove text urban input files, globalurban mksurf namelist --
+as urban is default.
+
+D models/lnd/clm/bld/urban_input/metropolis_fluxes.txt
+D models/lnd/clm/bld/urban_input/urbanc_alpha_fluxes.txt
+D models/lnd/clm/bld/urban_input/asphaltjungle_fluxes.txt
+D models/lnd/clm/bld/urban_input/mexicocityMEX_fluxes.txt
+D models/lnd/clm/bld/urban_input/vancouverCAN_fluxes.txt
+D models/lnd/clm/tools/mksurfdata/mksurfdata.globalurban
+
+List all files added and what they do:
+
+A models/lnd/clm/tools/mksurfdata/pftdyn_simyr1850-2005.txt . List of dynamic PFT files from 1850 to 2005
+A models/lnd/clm/tools/mksurfdata/pftdyn_simyr1850.txt ...... Dynamic PFT file for 1850
+A models/lnd/clm/tools/mksurfdata/pftdyn_simyr2000.txt ...... Dynamic PFT file for 2000
+A models/lnd/clm/tools/mksurfdata/mkirrig.F90 ............... calculates irrigated area from irrigated area on input dataset
+A models/lnd/clm/tools/mksurfdata/mksurfdata.globalirrig .... namelist file pointing to irrigated area source file
+A models/lnd/clm/bld/namelist_files/namelist_defaults_datm.xml datm namelist info
+A models/lnd/clm/bld/namelist_files/namelist_defaults_drv.xml drv namelist info
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>> Add firrig option, fix bugs, fix pftdyn mode
+M models/lnd/clm/tools/mksurfdata/mkvarctl.F90 .............. Add mksrf_firrig
+M models/lnd/clm/tools/mksurfdata/mklaiMod.F90 .............. Copy LAI in PFT=15 into PFT=16 if mksrf_irrig /= ''
+ use standard averaging for pftdyn
+M models/lnd/clm/tools/mksurfdata/mkfileMod.F90 ............. Add mksrf_firrig
+M models/lnd/clm/tools/mksurfdata/README
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 .............. Add pct_irr to surface dataset if mksrf_irrig /= ''
+M models/lnd/clm/tools/mksurfdata/Srcfiles
+M models/lnd/clm/tools/mksurfdata/mkpftMod.F90 .............. Adjust PCT_PFT for non-irrigated (PFT=15) and irrigted (PFT=16) crops
+M models/lnd/clm/tools/mksurfdata/mkglcmec.F90 .............. Check for divide by zero (JT)
+M models/lnd/clm/tools/mksurfdata/mkvarctl.F90 .............. Initialize files to blank
+M models/lnd/clm/tools/mksurfdata/mkfileMod.F90 ............. Only output data needed for pftdyn files
+M models/lnd/clm/tools/mksurfdata/mkorganic.F90 ............. Allocate bug-fix (JT)
+M models/lnd/clm/tools/mksurfdata/mkurban.F90 ............... bug-fix (JT)
+M models/lnd/clm/tools/mksurfdata/areaMod.F90 ............... bug-fix (JT)
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 .............. add mkirrig, changes for pftdyn
+M models/lnd/clm/tools/mksurfdata/Srcfiles .................. add mkirrig.F90
+M models/lnd/clm/tools/mksurfdata/mkpftMod.F90 .............. if firrig => irrig/non-irrig crops
+>>>>>>>>>>>>>>> Always create files using the transient input raw datasets
+M models/lnd/clm/tools/mksurfdata/mksurfdata.pl
+M models/lnd/clm/tools/mksurfdata/mksurfdata.namelist
+M models/lnd/clm/tools/mksurfdata/mksurfdata.regional
+M models/lnd/clm/tools/mksurfdata/mksurfdata.singlept
+M models/lnd/clm/tools/mksurfdata/mksurfdata.namelist
+>>>>>>>>>>>>>>> Add needed fields (mask, LANDMASK) to urban datasets
+M models/lnd/clm/tools/ncl_scripts/addgrid2spointurban.ncl
+>>>>>>>>>>>>>>> Turn pio on, work with defaults
+M models/lnd/clm/bld/configure
+M models/lnd/clm/bld/config_files/config_definition.xml
+M models/lnd/clm/bld/run-ibm.csh
+M models/lnd/clm/bld/clm.cpl7.template
+M models/lnd/clm/bld/build-namelist
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+>>>>>>>>>>>>>>> Add mask,PCT_URBAN and LANDMASK to urban point input files
+M models/lnd/clm/bld/urban_input/vancouverCAN_fluxes.nc ..... Add mask
+M models/lnd/clm/bld/urban_input/metropolis_fluxes.nc ....... Add mask
+M models/lnd/clm/bld/urban_input/urbanc_alpha_fluxes.nc ..... Add mask
+M models/lnd/clm/bld/urban_input/asphaltjungle_fluxes.nc .... Add mask
+M models/lnd/clm/bld/urban_input/mexicocityMEX_fluxes.nc .... Changes from Keith, add mask
+>>>>>>>>>>>>>>>
+M models/lnd/clm/test/system/tests_pretag_bluefire - add pftdyn test
+M models/lnd/clm/test/system/test_driver.sh -------- use beta14
+M models/lnd/clm/test/system/input_tests_master ---- fix TBR tests,
+M models/lnd/clm/test/system/TCBext_ccsmseq_cam.sh - remove eshr
+M models/lnd/clm/test/system/TSM.sh ---------------- fix
+M models/lnd/clm/test/system/tests_pretag_bangkok -- put some serial tests first
+M models/lnd/clm/test/system/CLM_runcmnd.sh -------- remove bluesky
+>>>>>>>>>>>>>>> Always use T_REF2M NOT t_ref2m
+M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90
+
+Summary of testing:
+
+ bluefire: All PASS except
+004 blA74 TBL.sh _sc_ds clm_urb^nl_urb 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ............FAIL! rc= 5
+009 blA91 TBL.sh _sc_dh clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 5
+012 blD91 TBL.sh _persc_dh clm_per^nl_urb 19981231:NONE:1200 4x5 gx3v5 144 cold .................FAIL! rc= 7
+014 blH74 TBL.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test -100 cold ........FAIL! rc= 5
+019 blE91 TBL.sh 4p_vodsrsc_dh clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 48 arb_ic ............FAIL! rc= 5
+024 blF92 TBL.sh 17p_vodsrsc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 48 cold .............FAIL! rc= 5
+028 blF93 TBL.sh 17p_vodsrsc_do clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 48 cold .............FAIL! rc= 5
+032 blEH1 TBL.sh 4p_vodsrsc_dh clm_urb^nl_urb 19981231:NONE:3600 1.9x2.5^0.9x1.25 gx1v6 48 arb_ic FAIL! rc= 5
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 4
+041 blCA4 TBL.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 7
+043 blCA8 TBL.sh _sc_ds clm_urb^nl_urb 19971230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 7
+047 blNB4 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 7
+051 blC61 TBL.sh _sc_dh clm_urb^nl_urb 19981001:NONE:1800 1.9x2.5 gx1v6 48 cold .................FAIL! rc= 7
+055 blH51 TBL.sh 17p_cnnsc_dh clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS@2000 48 cold .........FAIL! rc= 5
+056 smI58 TSMcnspinup.sh 17p_cnadspinupsc_dh 17p_cnexitspinupsc_dh 17p_cnsc_dh clm_std 19980115:NONEFAIL! rc= 5
+057 smLI1 TSM.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+058 erLI1 TER.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+059 brLI1 TBR.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+060 blLI1 TBL.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+062 blL78 TBL.sh _sc_s clm_urb^nl_urb 19971231:NONE:1800 1x1_brazil navy -10 arb_ic .............FAIL! rc= 5
+065 smL83 TSM.sh _sc_do clm_urb^nl_urb 19980115:NONE:3600 5x5_amazon navy -10 arb_ic ............FAIL! rc= 10
+066 erL83 TER.sh _sc_do clm_urb^nl_urb 19980115:NONE:3600 5x5_amazon navy -5+-5 arb_ic ..........FAIL! rc= 5
+067 brL83 TBR.sh _sc_do clm_urb^nl_urb_br 19980115:NONE:3600 5x5_amazon navy -10+-10 arb_ic .....FAIL! rc= 5
+068 blL83 TBL.sh _sc_do clm_urb^nl_urb 19980115:NONE:3600 5x5_amazon navy -10 arb_ic ............FAIL! rc= 4
+069 smJ61 TSM.sh 4p_casasc_dh clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 96 cold ...........FAIL! rc= 10
+070 erJ61 TER.sh 4p_casasc_dh clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 10+38 cold ........FAIL! rc= 5
+071 brJ61 TBR.sh 4p_casasc_dh clm_urb^nl_urb_br 19981230:NONE:1800 1.9x2.5 gx1v6 72+72 cold .....FAIL! rc= 5
+072 blJ61 TBL.sh 4p_casasc_dh clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 48 cold ...........FAIL! rc= 4
+073 smJ05 TSM.sh 4p_casasc_h clm_std^nl_lfiles 19800101:NONE:1800 0.47x0.63 gx1v6 48 arb_ic .....FAIL! rc= 10
+077 blJ74 TBL.sh 4p_casasc_ds clm_urb^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic ..FAIL! rc= 5
+084 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 6
+086 bl754 TBLtools.sh mksurfdata tools__s globalirrig ...........................................FAIL! rc= 6
+088 bl756 TBLtools.sh mksurfdata tools__s pftdyn ................................................FAIL! rc= 7
+093 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+094 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+095 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 4
+096 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 4
+097 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 4
+ jaguar: ALL FAIL except
+029 smJ05 TSM.sh 4p_casasc_h clm_std^nl_lfiles 19800101:NONE:1800 0.47x0.63 gx1v6 48 arb_ic .....PASS
+ breeze/pathscale: All PASS
+ bangkok/lahey: All PASS except
+005 smA92 TSM.sh _sc_dm clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 10
+006 erA92 TER.sh _sc_dm clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 5
+007 brA92 TBR.sh _sc_dm clm_urb^nl_urb_br 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 5
+009 smD91 TSM.sh _persc_dh clm_per^nl_urb 19981231:NONE:1200 4x5 gx3v5 144 cold .................FAIL! rc= 10
+010 erD91 TER.sh _persc_dh clm_per^nl_urb 19981231:NONE:1200 4x5 gx3v5 72+72 cold ...............FAIL! rc= 5
+014 erCA4 TER.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ......FAIL! rc= 7
+030 smH52 TSM.sh 17p_cnnsc_dm clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS@2000 96 cold .........FAIL! rc= 10
+031 erH52 TER.sh 17p_cnnsc_dm clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS@2000 10+38 cold ......FAIL! rc= 5
+032 brH52 TBR.sh 17p_cnnsc_dm clm_urb^nl_urb_br 19980115:NONE:1800 10x15 USGS@2000 72+72 cold ...FAIL! rc= 5
+034 smJ92 TSM.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 96 cold ...............FAIL! rc= 10
+035 erJ92 TER.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 10+38 cold ............FAIL! rc= 5
+036 brJ92 TBR.sh 4p_casasc_dm clm_urb^nl_urb_br 19981230:NONE:1800 4x5 gx3v5 72+72 cold .........FAIL! rc= 5
+038 smL51 TSM.sh _sc_dh clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS 96 arb_ic ..................FAIL! rc= 10
+039 erL51 TER.sh _sc_dh clm_urb^nl_urb 19980115:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 5
+040 brL51 TBR.sh _sc_dh clm_urb^nl_urb_br 19980115:NONE:1800 10x15 USGS 72+72 arb_ic ............FAIL! rc= 5
+052 sm952 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dh ext_ccsm_seq_cam 48 .......................FAIL! rc= 8
+053 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_30
+
+Changes answers relative to baseline: No bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_6_31
+Originator(s): erik (erik)
+Date: Wed Apr 1 00:58:15 MDT 2009
+One-line Summary: New surface datasets for 1850,2000, support for 0.9x1.25_gx1v6, urban always on. New pft-physiology file. Update scripts so remove some CLM_ env_conf vars. Fix CN for urban/pftdyn.
+
+Purpose of changes:
+
+New surface datasets for 1850,2000. sim_year can be 1850 or 2000
+(1870 no longer supported), support for 0.9x1.25_gx1v6. Demand furbinp (urban always on),
+wasteheat='ON_WASTEHEAT' by default. Change cpl7 template so can either do a cold start
+or require a finidat file (cold or startup). New pft-physiology file for CN used by
+everything. Update scripts so remove some CLM_ env_conf vars: CLM_BGC, CLM_DYNNDEP,
+CLM_DYNPFT, CLM_CO2_TYPE, remove CLMNCEP from scripts/datm (keeping CLM_QIAN mode).
+Change final CN loop to go over soil filter -- so CN,CASA,DGVM can work with urban.
+Remove traffic_flux array as it's subscript was out of bounds on breeze. lnd_comp_mct
+changed so that check for spval allows for rounding of spval.
+
+Bugs fixed (include bugzilla ID): 904 (I cases start in 2003 rather than 1948)
+ 897 (string comparision in scripts)
+ 357 (remove duplicated files in tools)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 821 (mksurfdata for qtr deg)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 919 (problems in mksurfdata for pftdyn mode)
+ 920 (glacier_mec problems in mksurfdata)
+ 990 (illegal instruction)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Add -pio option to configure
+ Will set the BUILDPIO CPP token as well as adding pio source to Filepath.
+
+Describe any changes made to the namelist: Add pio_inparm namelist when -pio
+ was set in configure
+
+List any changes to the defaults for the boundary datasets:
+ New 1850 and 2000 surface datasets with urban enabled for most resolutions
+ Also new 1850 and 2000 finidat files for 1.9x2.5 resolution (other finidat files removed)
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: Peter Thornton, Keith Oleson, Forrest Hoffman
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, datm, csm_share, mct, pio
+
+ scripts4_090325
+ datm7_090325
+ vocemis-drydep11_share3_090112
+ CT2_6_0_090308
+ pio28_prod
+
+List all files eliminated:
+
+>>>>>>>>>>>>>> Remove DGVM namelist tests.
+D models/lnd/clm/test/system/config_files/10p_dgvmsc_h
+D models/lnd/clm/test/system/config_files/10p_dgvmsc_m
+D models/lnd/clm/test/system/config_files/10p_dgvmsc_o
+D models/lnd/clm/test/system/config_files/10p_dgvmsc_s
+D models/lnd/clm/test/system/config_files/10p_dgvmsc_dh
+D models/lnd/clm/test/system/config_files/10p_dgvmsc_dm
+D models/lnd/clm/test/system/config_files/10p_dgvmsc_do
+
+>>>>>>>>>>>>>> Remove script that creates ASCII global data for urban.
+D models/lnd/clm/tools/ncl_scripts/generate_ascii_avg_urbanparam_file_p7.ncl
+
+>>>>>>>>>>>>>> Remove modules replicated in mkgriddata by mksurfdata modules
+>>>>>>>>>>>>>> use the versions in mksurfdata.
+D models/lnd/clm/tools/mkgriddata/ncdio.F90
+D models/lnd/clm/tools/mkgriddata/domainMod.F90
+D models/lnd/clm/tools/mkgriddata/areaMod.F90
+
+List all files added and what they do:
+
+>>>>>>>>>>>>>> Add point datasets that now have urban information in them.
+A models/lnd/clm/bld/urban_input/surfdata_1x1_tropicAtl_urb3den_simyr2000_c090320.nc
+A models/lnd/clm/bld/urban_input/surfdata_1x1_brazil_urb3den_simyr2000_c090320.nc
+A models/lnd/clm/bld/urban_input/surfdata_1x1_brazil_urb3den_simyr1850_c090317.nc
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>> Make most tests with urban, remove dgvm tests
+M models/lnd/clm/test/system/tests_pretag_bluefire
+M models/lnd/clm/test/system/config_files/README
+M models/lnd/clm/test/system/tests_pretag_jaguar
+M models/lnd/clm/test/system/tests_pretag_bangkok
+M models/lnd/clm/test/system/test_driver.sh
+M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+M models/lnd/clm/test/system/tests_posttag_purempi_regression
+M models/lnd/clm/test/system/nl_files/clm_per
+M models/lnd/clm/test/system/nl_files/clm_per0
+M models/lnd/clm/test/system/input_tests_master
+M models/lnd/clm/test/system/tests_posttag_lightning
+
+>>>>>>>>>>>>>> Add all_urban mode for single-point mode
+M models/lnd/clm/tools/mksurfdata/mkvarctl.F90
+M models/lnd/clm/tools/mksurfdata/ncdio.F90
+M models/lnd/clm/tools/mksurfdata/mkglacier.F90
+M models/lnd/clm/tools/mksurfdata/mkfileMod.F90
+M models/lnd/clm/tools/mksurfdata/mklanwat.F90
+M models/lnd/clm/tools/mksurfdata/mkurban.F90
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90
+M models/lnd/clm/tools/mksurfdata/mksurfdata.pl
+M models/lnd/clm/tools/mksurfdata/mkpftMod.F90
+
+>>>>>>>>>>>>>> Add pio option to configure, and if set add pio_inparm namelist
+>>>>>>>>>>>>>> Datasets to 1850/2000 and most with urban.
+M models/lnd/clm/bld/configure ------------- Add pio option
+M models/lnd/clm/bld/config_files/config_definition.xml - Add pio to config_cache.xml
+M models/lnd/clm/bld/clm.cpl7.template ----- require furbinp, remove CLM_ env vars
+M models/lnd/clm/bld/build-namelist -------- make sure sim_year sent in, change
+ some names etc.
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml ---- Add pio_inparm
+M models/lnd/clm/bld/namelist_files/datm.streams.template.xml -- Remove CLM_NCEP
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -- urban datasets
+ for most resolutions, datasets for 1850 and 2000, remove most finidat
+
+>>>>>>>>>>>>>> Add data from grid files as well as LANDMASK and PCT_URBAN.
+M models/lnd/clm/bld/urban_input/asphaltjungle_fluxes.nc
+M models/lnd/clm/bld/urban_input/metropolis_fluxes.nc
+M models/lnd/clm/bld/urban_input/mexicocityMEX_fluxes.nc
+M models/lnd/clm/bld/urban_input/urbanc_alpha_fluxes.nc
+M models/lnd/clm/bld/urban_input/vancouverCAN_fluxes.nc
+
+>>>>>>>>>>>>>> Changes from Forrest H./Peter T. to fix some CN problems (single-point, pftdyn)
+M models/lnd/clm/src/biogeochem/CNSetValueMod.F90
+M models/lnd/clm/src/main/driver.F90
+M models/lnd/clm/src/main/pftdynMod.F90
+M models/lnd/clm/src/main/lnd_comp_mct.F90 --------- Change from Mark Flanner
+ to fix roundoff issues for aerosols.
+M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/biogeophys/UrbanMod.F90 ------- Remove traffic_flux as subscript
+ bounds was being exceeded on breeze.
+
+Summary of testing:
+
+ bluefire: All PASS except TBL and...
+023 brF92 TBR.sh 17p_vodsrsc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 72+72 cold ..........FAIL! rc= 13
+027 brF93 TBR.sh 17p_vodsrsc_do clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 72+72 cold ..........FAIL! rc= 13
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+056 smI58 TSMcnspinup.sh 17p_cnadspinupsc_dh 17p_cnexitspinupsc_dh 17p_cnsc_dh clm_std 19980115:NONEFAIL! rc= 5
+057 smLI1 TSM.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+058 erLI1 TER.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+059 brLI1 TBR.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+065 smL83 TSM.sh _sc_do clm_urb^nl_urb 19980115:NONE:3600 5x5_amazon navy -10 arb_ic ............FAIL! rc= 10
+066 erL83 TER.sh _sc_do clm_urb^nl_urb 19980115:NONE:3600 5x5_amazon navy -5+-5 arb_ic ..........FAIL! rc= 5
+067 brL83 TBR.sh _sc_do clm_urb^nl_urb_br 19980115:NONE:3600 5x5_amazon navy -10+-10 arb_ic .....FAIL! rc= 5
+069 smJ61 TSM.sh 4p_casasc_dh clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 96 cold ...........FAIL! rc= 10
+070 erJ61 TER.sh 4p_casasc_dh clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 10+38 cold ........FAIL! rc= 5
+071 brJ61 TBR.sh 4p_casasc_dh clm_urb^nl_urb_br 19981230:NONE:1800 1.9x2.5 gx1v6 72+72 cold .....FAIL! rc= 5
+076 brJ74 TBR.sh 4p_casasc_ds clm_urb^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -3+-3 arb_ic .FAIL! rc= 13
+ jaguar: All PASS except TBL and...
+005 smA91 TSM.sh _sc_dh clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -6 arb_ic ...................FAIL! rc= 10
+006 erA91 TER.sh _sc_dh clm_urb^nl_urb 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic ................FAIL! rc= 5
+007 brA91 TBR.sh _sc_dh clm_urb^nl_urb_br 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .............FAIL! rc= 5
+009 smE92 TSM.sh 4p_vodsrsc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 96 arb_ic ............FAIL! rc= 10
+010 erE92 TER.sh 4p_vodsrsc_dm clm_urb^nl_urb 19981230:NONE:1800 4x5 gx3v5 10+38 arb_ic .........FAIL! rc= 5
+011 brE92 TBR.sh 4p_vodsrsc_dm clm_urb^nl_urb_br 19981230:NONE:1800 4x5 gx3v5 72+72 arb_ic ......FAIL! rc= 5
+017 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+018 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 10+38 arb_ic ................FAIL! rc= 5
+019 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+025 smJ62 TSM.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 96 cold ...........FAIL! rc= 10
+026 erJ62 TER.sh 4p_casasc_dm clm_urb^nl_urb 19981230:NONE:1800 1.9x2.5 gx1v6 10+38 cold ........FAIL! rc= 5
+027 brJ62 TBR.sh 4p_casasc_dm clm_urb^nl_urb_br 19981230:NONE:1800 1.9x2.5 gx1v6 72+72 cold .....FAIL! rc= 5
+029 smJ05 TSM.sh 4p_casasc_h clm_std^nl_lfiles 19800101:NONE:1800 0.47x0.63 gx1v5 48 startup ....FAIL! rc= 10
+030 smJ74 TSM.sh 4p_casasc_ds clm_urb^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -1100 arb_ic .FAIL! rc= 10
+031 erJ74 TER.sh 4p_casasc_ds clm_urb^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -10+-10 arb_ic FAIL! rc= 5
+032 brJ74 TBR.sh 4p_casasc_ds clm_urb^nl_urb 10001230:NONE:3600 1x1_tropicAtl test -3+-3 arb_ic .FAIL! rc= 5
+034 smLI2 TSM.sh _sc_dm clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+035 erLI2 TER.sh _sc_dm clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+036 brLI2 TBR.sh _sc_dm clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+039 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 7
+ bangkok/lf95: All PASS except TBL
+ breeze/gale/hail/gust/ifort: All PASS
+
+TBL tests are different since most tests are now with urban.
+
+Most of the fails are due to missing files.
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_30
+
+Changes answers relative to baseline: Yes for CN -- new pft-physiology file
+
+===============================================================
+===============================================================
+Tag name: clm3_6_30
+Originator(s): oleson (Oleson Keith 1332 CGD)
+Date: Thu Mar 19 20:44:33 MDT 2009
+One-line Summary: Fix urban roof/wall layers
+
+Purpose of changes: Fix urban roof/wall layers
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 821 (mksurfdata for qtr deg)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 990 (illegal instruction)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: Me
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+M models/lnd/clm/src/main/iniTimeConst.F90 ---- divide roof/wall thickness by nlevurb instead of nlevsoi
+
+Summary of testing:
+
+ bluefire: All urban testing passed except TBL
+ jaguar:
+ kraken:
+ lightning/pathscale:
+ bangkok/lf95:
+ breeze/gale/hail/gust/ifort:
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_29
+
+Changes answers relative to baseline: Urban only
+
+===============================================================
+===============================================================
+Tag name: clm3_6_29
+Originator(s): oleson (Oleson Keith 1332 CGD)
+Date: Thu Mar 19 07:16:05 MDT 2009
+One-line Summary: CN SAI, CN testing fix, rad step size fix
+
+Purpose of changes: Add SAI decay for CN mode.
+ Fix CN for tests SmI58, smH51, erH51, brH51
+ Add new get_rad_step_size function used by SAI decay function. This is the "simple fix"
+ and yields correct radiation time step size for all time steps except one for the I and F
+ cases. The "complete fix" involves changes to other component models and will be available soon.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 821 (mksurfdata for qtr deg)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 990 (illegal instruction)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: K. Oleson, G. Bonan, F. Hoffman, M. Vertenstein, J. Truesdale
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+M models/lnd/clm/src/biogeochem/CNrestMod.F90 --- add seven CN fields deleted previously for restart
+M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90 ---- add SAI decay function (calls get_rad_step_size)
+M models/lnd/clm/src/main/clm_time_manager.F90 ---- changes to fix get_rad_step_size function
+M models/lnd/clm/src/main/lnd_comp_mct.F90 ---- changes to fix get_rad_step_size function
+
+Summary of testing:
+
+ bluefire: All PASS except:
+013 smH74 TSM.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test -1100 cold .......FAIL! rc= 10
+014 blH74 TBL.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test -100 cold ........FAIL! rc= 4
+020 smE95 TSM.sh 4p_vodsrsc_h clm_std 19981231:NONE:1800 4x5 gx3v5 48 arb_ic ....................FAIL! rc= 10
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 4
+055 blH51 TBL.sh 17p_cnnsc_dh clm_std 19980115:NONE:1800 10x15 USGS@2000 48 cold ................FAIL! rc= 7
+057 smLI1 TSM.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+058 erLI1 TER.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+059 brLI1 TBR.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+060 blLI1 TBL.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+096 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+097 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+098 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 6
+099 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 6
+100 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 6
+
+smH74 and blH74 failures are being investigated.
+blH51 fails because it fails in clm3_6_28 (fixed in this commit).
+Other failures are known.
+
+ jaguar:
+ kraken:
+ lightning/pathscale:
+ bangkok/lf95:
+ breeze/gale/hail/gust/ifort:
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_28
+
+Changes answers relative to baseline: CN mode only due to SAI decay factor
+
+===============================================================
+===============================================================
+Tag name: clm3_6_28
+Originator(s): oleson (Oleson Keith 1332 CGD)
+Date: Tue Mar 17 07:03:12 MDT 2009
+One-line Summary: Fix permission denied error when reading surface dataset
+
+Purpose of changes: Change nf_open statement in UrbanInputMod.F90
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 821 (mksurfdata for qtr deg)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 990 (illegal instruction)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: Erik K.
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+M models/lnd/clm/src/biogeophys/UrbanInputMod.F90 ---- don't use getavu for nf_open
+
+Summary of testing:
+
+ bluefire: All PASS except for:
+013 smH74 TSM.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test -1100 cold .......FAIL! rc= 10
+014 blH74 TBL.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test -100 cold ........FAIL! rc= 4
+020 smE95 TSM.sh 4p_vodsrsc_h clm_std 19981231:NONE:1800 4x5 gx3v5 48 arb_ic ....................FAIL! rc= 10
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 4
+051 blC61 TBL.sh _sc_dh clm_urb^nl_urb 19981001:NONE:1800 1.9x2.5 gx1v5 48 cold .................FAIL! rc= 5
+053 erH51 TER.sh 17p_cnnsc_dh clm_std 19980115:NONE:1800 10x15 USGS@2000 10+38 cold .............FAIL! rc= 7
+054 brH51 TBR.sh 17p_cnnsc_dh clm_std 19980115:NONE:1800 10x15 USGS@2000 72+72 cold .............FAIL! rc= 6
+056 smI58 TSMcnspinup.sh 17p_cnadspinupsc_dh 17p_cnexitspinupsc_dh 17p_cnsc_dh clm_std 19980115:NONEFAIL! rc= 5
+057 smLI1 TSM.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+058 erLI1 TER.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+059 brLI1 TBR.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+060 blLI1 TBL.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+096 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+097 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+098 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 6
+099 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 6
+100 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 6
+
+sm74, blH74, smI58 are known failures related to CN and are being investigated.
+blC61 fails because clm3_6_27 fails (fixed in this tag).
+Other failures are known.
+
+ jaguar:
+ kraken:
+ lightning/pathscale:
+ bangkok/lf95:
+ breeze/gale/hail/gust/ifort:
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_27
+
+Changes answers relative to baseline: bfb
+
+===============================================================
+===============================================================
+Tag name: clm3_6_27
+Originator(s): oleson (Oleson Keith 1332 CGD)
+Date: Mon Mar 16 10:52:05 MDT 2009
+One-line Summary: Urban model changes and FGR12 fix
+
+Purpose of changes: Fix large urban saturation excess runoff.
+ Limit urban dew formation.
+ Change FGR12 diagnostic.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 821 (mksurfdata for qtr deg)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 990 (illegal instruction)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: Not tested
+
+Code reviewed by: K. Oleson
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90 ---- change eflx_fgr12 diagnostic
+M models/lnd/clm/src/biogeophys/Biogeophysics1Mod.F90 ---- limit urban dew formation and calculate
+ pervious road qred over nlevsoi, not nlevurb
+M models/lnd/clm/src/biogeophys/SoilHydrologyMod.F90 ---- prevent large saturation excess due to
+ ponded ice
+
+Summary of testing:
+
+ bluefire: All PASS tests_pretag_bluefire except TBL and :
+013 smH74 TSM.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test -1100 cold .......FAIL! rc= 10
+020 smE95 TSM.sh 4p_vodsrsc_h clm_std 19981231:NONE:1800 4x5 gx3v5 48 arb_ic ....................FAIL! rc= 10
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+048 smC61 TSM.sh _sc_dh clm_urb^nl_urb 19981001:NONE:1800 1.9x2.5 gx1v5 -6 cold .................FAIL! rc= 10
+049 erC61 TER.sh _sc_dh clm_urb^nl_urb 19981001:NONE:1800 1.9x2.5 gx1v5 10+38 cold ..............FAIL! rc= 5
+050 brC61 TBR.sh _sc_dh clm_urb^nl_urb_br 19981001:NONE:1800 1.9x2.5 gx1v5 -3+-3 cold ...........FAIL! rc= 5
+053 erH51 TER.sh 17p_cnnsc_dh clm_std 19980115:NONE:1800 10x15 USGS@2000 10+38 cold .............FAIL! rc= 7
+054 brH51 TBR.sh 17p_cnnsc_dh clm_std 19980115:NONE:1800 10x15 USGS@2000 72+72 cold .............FAIL! rc= 6
+056 smI58 TSMcnspinup.sh 17p_cnadspinupsc_dh 17p_cnexitspinupsc_dh 17p_cnsc_dh clm_std 19980115:NONEFAIL! rc=5
+057 smLI1 TSM.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+058 erLI1 TER.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+059 brLI1 TBR.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+096 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+097 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+098 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 6
+099 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 6
+100 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 6
+
+smH74 and smI58 are CN-related tests that did not fail in clm3_6_25, but fail in clm3_6_26 and in this tag and
+thus should be investigated further.
+smC61, erC61, brC61 fail because of permission denied when reading surface dataset. This appears to be a test
+suite problem only.
+
+ jaguar:
+ kraken:
+ lightning/pathscale:
+ bangkok/lf95:
+ breeze/gale/hail/gust/ifort:
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_26
+
+Changes answers relative to baseline: Urban answers change because of runoff fix.
+ Standard mode answers only change in FGR12 diagnostic.
+
+===============================================================
+===============================================================
+Tag name: clm3_6_26
+Originator(s): Peter Thornton
+Date: 3/14/09
+One-line Summary: CN time step and restart file changes
+
+Purpose of changes: shorten CN restart file. Requires moving CLM to physical model timestep.
+
+Bugs fixed (include bugzilla ID):
+
+Known bugs (include bugzilla ID):
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: nonee
+
+Describe any substantial timing or memory changes: CLM restart file reduced in size by ~factor of 3.
+
+Code reviewed by: Forrest Hoffman
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all files eliminated: none
+
+List all files added and what they do:
+
+
+List all existing files that have been modified, and describe the changes:
+M models/lnd/clm/src/biogeochem/CNCStateUpdate2Mod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNC13StateUpdate2Mod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNNStateUpdate1Mod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNBalanceCheckMod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNNStateUpdate3Mod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNFireMod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNMRespMod.F90 - change q10
+M models/lnd/clm/src/biogeochem/CNPhenologyMod.F90 - remove reference to retransn
+M models/lnd/clm/src/biogeochem/CNCStateUpdate1Mod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNDecompMod.F90 - change q10
+M models/lnd/clm/src/biogeochem/CNCStateUpdate3Mod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNC13StateUpdate1Mod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNrestMod.F90 - eliminate many CN variables
+M models/lnd/clm/src/biogeochem/CNC13StateUpdate3Mod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNAnnualUpdateMod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNNStateUpdate2Mod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNNDynamicsMod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNAllocationMod.F90 - change time step, and cleanup some variable names
+M models/lnd/clm/src/biogeochem/CNEcosystemDynMod.F90 - change time step
+M models/lnd/clm/src/biogeochem/CNSetValueMod.F90 - cleanup variable names
+M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90 - change time step
+M models/lnd/clm/src/main/CNiniTimeVar.F90 - cleanup variable names
+M models/lnd/clm/src/main/driver.F90 - change time step
+M models/lnd/clm/src/main/clmtypeInitMod.F90 - cleanup variable names
+M models/lnd/clm/src/main/pftdynMod.F90 - change time step
+M models/lnd/clm/src/main/clm_time_manager.F90 - change time step
+M models/lnd/clm/src/main/clmtype.F90 - cleanup variable names
+M models/lnd/clm/src/main/histFldsMod.F90 - cleanup variable names
+
+Summary of testing:
+
+ bluefire:
+ jaguar:
+ Ran the CLM test suite, with the following results:
+smA74 TSM.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ...................PASS
+erA74 TER.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -5+-5 arb_ic .................PASS
+brA74 TBR.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -5+-5 arb_ic .................PASS
+blA74 TBL.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ...................SKIPPED*
+smA91 TSM.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -6 arb_ic ..........................PASS
+erA91 TER.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .......................FAIL! rc= 7 (passes with 512 tasks)
+brA91 TBR.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .......................FAIL! rc= 6 (passes with 512 tasks)
+blA91 TBL.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -6 arb_ic ..........................SKIPPED*
+smE92 TSM.sh 4p_vodsrsc_dm clm_std 19981230:NONE:1800 4x5 gx3v5 96 arb_ic ...................PASS
+erE92 TER.sh 4p_vodsrsc_dm clm_std 19981230:NONE:1800 4x5 gx3v5 10+38 arb_ic ................PASS
+brE92 TBR.sh 4p_vodsrsc_dm clm_std 19981230:NONE:1800 4x5 gx3v5 72+72 arb_ic ................PASS
+blE92 TBL.sh 4p_vodsrsc_dm clm_std 19981230:NONE:1800 4x5 gx3v5 48 arb_ic ...................SKIPPED*
+smEH2 TSM.sh 4p_vodsrsc_dm clm_std 19981231:NONE:3600 1.9x2.5^0.9x1.25 USGS 48 arb_ic .......PASS
+erEH2 TER.sh 4p_vodsrsc_dm clm_std 19981231:NONE:3600 1.9x2.5^0.9x1.25 USGS 10+38 arb_ic ....PASS
+brEH2 TBR.sh 4p_vodsrsc_dm clm_std 19981231:NONE:3600 1.9x2.5^0.9x1.25 USGS 24+24 arb_ic ....PASS
+blEH2 TBL.sh 4p_vodsrsc_dm clm_std 19981231:NONE:3600 1.9x2.5^0.9x1.25 USGS 48 arb_ic .......SKIPPED*
+smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 10+38 arb_ic ................FAIL! rc= 5
+brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+blG43 TBL.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................SKIPPED*
+smH92 TSM.sh 17p_cnnsc_dm clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@2000 96 cold .............PASS
+erH92 TER.sh 17p_cnnsc_dm clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@2000 10+38 cold ..........PASS
+brH92 TBR.sh 17p_cnnsc_dm clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@2000 72+72 cold ..........PASS
+blH92 TBL.sh 17p_cnnsc_dm clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@2000 48 cold .............SKIPPED*
+smJ62 TSM.sh 4p_casasc_dm clm_std 19981230:NONE:1800 1.9x2.5 gx1v5 96 startup ...............PASS
+erJ62 TER.sh 4p_casasc_dm clm_std 19981230:NONE:1800 1.9x2.5 gx1v5 10+38 startup ............PASS
+brJ62 TBR.sh 4p_casasc_dm clm_std 19981230:NONE:1800 1.9x2.5 gx1v5 72+72 startup ............PASS
+blJ62 TBL.sh 4p_casasc_dm clm_std 19981230:NONE:1800 1.9x2.5 gx1v5 48 startup ...............SKIPPED*
+smJ05 TSM.sh 4p_casasc_h clm_std^nl_lfiles 19800101:NONE:1800 0.47x0.63 gx1v5 48 startup ....FAIL! rc= 10
+smJ74 TSM.sh 4p_casasc_ds clm_std 10001230:NONE:3600 1x1_tropicAtl test -1100 arb_ic ........PASS
+erJ74 TER.sh 4p_casasc_ds clm_std 10001230:NONE:3600 1x1_tropicAtl test -10+-10 arb_ic ......PASS
+brJ74 TBR.sh 4p_casasc_ds clm_std 10001230:NONE:3600 1x1_tropicAtl test -3+-3 arb_ic ........PASS
+blJ74 TBL.sh 4p_casasc_ds clm_std 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic .........SKIPPED*
+smK92 TSM.sh 10p_dgvmsc_dm clm_std 19981230:NONE:1800 4x5 gx3v5 96 arb_ic ...................PASS
+erK92 TER.sh 10p_dgvmsc_dm clm_std 19981230:NONE:1800 4x5 gx3v5 10+38 arb_ic ................PASS
+brK92 TBR.sh 10p_dgvmsc_dm clm_std 19981230:NONE:1800 4x5 gx3v5 72+72 arb_ic ................PASS
+blK92 TBL.sh 10p_dgvmsc_dm clm_std 19981230:NONE:1800 4x5 gx3v5 48 arb_ic ...................SKIPPED*
+smLI2 TSM.sh _sc_dm clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+erLI2 TER.sh _sc_dm clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+brLI2 TBR.sh _sc_dm clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+blLI2 TBL.sh _sc_dm clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................SKIPPED*
+erP65 TSM_ccsmseq.sh ERS f19_g15 I ..........................................................PASS
+erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................PASS kraken:
+
+Note: the tests that FAIL here are the same that FAIL on jaguar with clm3_6_25.
+
+ lightning/pathscale:
+ bangkok/lf95:
+ breeze/gale/hail/gust/ifort:
+
+CLM tag used for the baseline comparison tests if applicable:
+
+Changes answers relative to baseline:
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ q10 changes are climate changing. Time step changes are larger than roundoff, similar climate.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+===============================================================
+Tag name: clm3_6_25
+Originator(s): dlawren (Lawrence David 1384 CGD), erik (Kluzek Erik), tcraig (Craig Tony)
+Date: Fri Mar 13 15:11:01 MDT 2009
+One-line Summary: Daylength control on Vcmax, 1%Lake,wetland,glacier in mksrfdat, remove ELEVATION in surface data file
+
+Purpose of changes: Include changes from Peter Thornton to include daylength control
+on vcmax in photosynthesis scheme; Set minimum lake, wetland, and glacier area to 1%
+to be more consistent with urban and to represent more lakes and wetlands; remove
+temporary unused ELEVATION field from surface dataset
+
+Bugs fixed (include bugzilla ID): 877 (CN restart problem)
+ 911 (high PE count problem)
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 821 (mksurfdata for qtr deg)
+ 851 (abort when files non-exist on jaguar)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 990 (illegal instruction)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: New global urban datasets 10x15 and 1.9x2.5 resolutions
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: David Lawrence (code), Erik Kluzek (testing and build), Tony Craig (DecompInitMod.F90)
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts, mct, datm
+
+ scripts to scripts4_090310
+ datm7 to datm7_090229
+ mct to MCT2_6_0_090308
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+A models/lnd/clm/test/system/config_files/tools__s ..... Optimized serial mode for tools
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/tools/mksurfdata/mkglcmec.F90 ..... correct bug in error check
+M models/lnd/clm/tools/mksurfdata/mkglacier.F90 .... reduce min glacier frac from 5 to 1%
+M models/lnd/clm/tools/mksurfdata/mkfileMod.F90 .... remove ELEVATION field
+M models/lnd/clm/tools/mksurfdata/mklanwat.F90 ..... reduce min lake,wetland frac from 5 to 1%
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90 ..... remove ELEVATION field
+M models/lnd/clm/tools/mksurfdata/README ........... add documentation about being slow unless use OPT=TRUE in gmake
+M models/lnd/clm/src/main/decompInitMod.F90 ........ Changes from Tony Craig to fix for high PE counts
+M models/lnd/clm/src/main/clm_comp.F90 ............. daylength control on vcmax changes
+M models/lnd/clm/src/main/driver.F90 ............... daylength control on vcmax changes
+M models/lnd/clm/src/main/clmtypeInitMod.F90 ....... daylength control on vcmax changes
+M models/lnd/clm/src/main/iniTimeConst.F90 ......... daylength control on vcmax changes
+M models/lnd/clm/src/main/clmtype.F90 .............. daylength control on vcmax changes
+M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90 daylength control on vcmax changes
+
+M models/lnd/clm/test/system/test_driver.sh ........ update to beta10, fix some issues on jaguar
+M models/lnd/clm/test/system/input_tests_master .... do most mksurfdata testing optimized
+ change CN tests to cold-starts, change 1890 to 1870
+
+M models/lnd/clm/bld/build-namelist ................ pass sim_year and maxpft in when determining default for finidat
+M models/lnd/clm/bld/clm.cpl7.template ............. add in ignore logic like cam
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml . update input files
+
+Summary of testing:
+
+ bluefire: All PASS except TBL and...
+020 smE95 TSM.sh 4p_vodsrsc_h clm_std 19981231:NONE:1800 4x5 gx3v5 48 arb_ic ....................FAIL! rc= 10
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+052 smH51 TSM.sh 17p_cnnsc_dh clm_std 19980115:NONE:1800 10x15 USGS@1890 96 cold ................FAIL! rc= 8
+053 erH51 TER.sh 17p_cnnsc_dh clm_std 19980115:NONE:1800 10x15 USGS@1890 10+38 cold .............FAIL! rc= 5
+054 brH51 TBR.sh 17p_cnnsc_dh clm_std 19980115:NONE:1800 10x15 USGS@1890 72+72 cold .............FAIL! rc= 5
+057 smLI1 TSM.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+058 erLI1 TER.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+059 brLI1 TBR.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+096 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+097 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+098 erP61 TSM_ccsmseq.sh ERS f19_g15 I4804 ......................................................FAIL! rc= 6
+099 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 6
+100 erP68 TSM_ccsmseq.sh ERS f19_g15 ICASA ......................................................FAIL! rc= 6
+ jaguar: Limited testing...
+ lightning/pathscale: All PASS except TBL and...
+002 smCA4 TSM.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 10
+003 erCA4 TER.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ......FAIL! rc= 5
+004 brCA4 TBR.sh _sc_ds clm_urb^nl_urb_br 19981001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic ...FAIL! rc= 5
+006 smOC4 TSM.sh _vansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 10
+007 erOC4 TER.sh _vansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 115+115 arb_icFAIL! rc= 5
+008 brOC4 TBR.sh _vansc_ds clm_urb1pt^nl_urb_br 19920812:NONE:3600 1x1_vancouverCAN navy 72+72 arb_iFAIL! rc= 5
+011 erA91 TER.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .......................FAIL! rc= 7
+012 brA91 TBR.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .......................FAIL! rc= 6
+019 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 7
+020 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 6
+024 erJ42 TER.sh 4p_casasc_dm clm_std 19981230:NONE:1800 10x15 USGS 10+38 startup ...............FAIL! rc= 7
+025 brJ42 TBR.sh 4p_casasc_dm clm_std 19981230:NONE:1800 10x15 USGS 72+72 startup ...............FAIL! rc= 6
+027 smK51 TSM.sh 10p_dgvmsc_dh clm_std 19981230:NONE:1800 10x15 USGS 96 arb_ic ..................FAIL! rc= 10
+028 erK51 TER.sh 10p_dgvmsc_dh clm_std 19981230:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 5
+029 brK51 TBR.sh 10p_dgvmsc_dh clm_std 19981230:NONE:1800 10x15 USGS 72+72 arb_ic ...............FAIL! rc= 5
+032 erL51 TER.sh _sc_dh clm_std 19980115:NONE:1800 10x15 USGS 10+38 arb_ic ......................FAIL! rc= 7
+033 brL51 TBR.sh _sc_dh clm_std 19980115:NONE:1800 10x15 USGS 72+72 arb_ic ......................FAIL! rc= 6
+040 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+041 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 6
+042 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804 ....................................................FAIL! rc= 5
+ breeze/gale/hail/gust/ifort: All PASS except TBL
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_24
+
+Changes answers relative to baseline: Yes
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers:
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change: similar climate
+
+ Dave Lawrence ran a short simulation to show that answers do NOT change significantly
+
+===============================================================
+===============================================================
+Tag name: clm3_6_24
+Originator(s): oleson (Oleson Keith 1332 CGD)
+Date: Mon Mar 9 21:01:47 MDT 2009
+One-line Summary: Fix urban testing and some history field changes
+
+Purpose of changes: Convert urban ascii files to netcdf to get urban testing to work.
+ Add rh_ref2m calculation for urban and change urban/rural humidity from specific to relative in
+ history files.
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID): 672 (3.5.4-3.5.14 diffs)
+ 698 (cprnc bug gives false difference)
+ 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 722 (threading slow)
+ 789 (1pt sims 2.5X slow)
+ 794 (hist avg strange)
+ 821 (mksurfdata for qtr deg)
+ 851 (abort when files non-exist on jaguar)
+ 877 (CN restart problem)
+ 883 (aerosol deposition not from atm)
+ 903 (problems in driver with open-MP on PGI)
+ 990 (illegal instruction)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ 1087 (let weights come from fsurdat file NOT finidat)
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: In namelist_defaults_clm.xml:
+ Change *.txt urban files to *.nc
+ Change aerosoldep_monthly_1990s_mean_1x1_urbanc_alpha_c090119.nc to
+ aerosoldep_monthly_1990s_mean_1x1_urbanc_alpha_c090114.nc
+ Change path for surfdata_0096x0144_090223_v2.nc from
+ lnd/clm2/surfdata/ to lnd/clm2/urbdata
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: Not tested
+
+Code reviewed by: K. Oleson
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all files eliminated: None
+
+List all files added and what they do: A models/lnd/clm/bld/urban_input/vancouverCAN_fluxes.nc
+ A models/lnd/clm/bld/urban_input/metropolis_fluxes.nc
+ A models/lnd/clm/bld/urban_input/urbanc_alpha_fluxes.nc
+ A models/lnd/clm/bld/urban_input/asphaltjungle_fluxes.nc
+ A models/lnd/clm/bld/urban_input/mexicocityMEX_fluxes.nc
+
+List all existing files that have been modified, and describe the changes:
+M models/lnd/clm/test/system/nl_files/nl_urb ---- Remove TSNOW from hist_fincl1, remove Q2M, Q2M_R, Q2M_U and
+ add RH2M, RH2M_R, RH2M_U to hist_fincl2
+M models/lnd/clm/test/system/nl_files/nl_urb_br --- Remove TSNOW from hist_fincl1
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml ---- Change *.txt urban files to *.nc
+ Change aerosoldep_monthly_1990s_mean_1x1_urbanc_alpha_c090119.nc to
+ aerosoldep_monthly_1990s_mean_1x1_urbanc_alpha_c090114.nc
+ Change path for surfdata_0096x0144_090223_v2.nc from
+ lnd/clm2/surfdata/ to lnd/clm2/urbdata
+M models/lnd/clm/src/main/clmtypeInitMod.F90 ---- delete q_ref2m_r, q_ref2m_u, and add rh_ref2m_r, rh_ref2m_u
+M models/lnd/clm/src/main/clmtype.F90 ---- delete q_ref2m_r, q_ref2m_u, and add rh_ref2m_r, rh_ref2m_u
+M models/lnd/clm/src/main/histFldsMod.F90 ---- delete Q2M_U, Q2M_R, and add RH2M_U, RH2M_R
+M models/lnd/clm/src/biogeophys/UrbanInitMod.F90 ---- delete q_ref2m_u and add rh_ref2m_u
+M models/lnd/clm/src/biogeophys/UrbanMod.F90 ---- delete q_ref2m_u and add calculation for rh_ref2m_u
+M models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90 ---- delete q_ref2m_r and add rh_ref2m_r
+M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90 ---- delete q_ref2m_r and add rh_ref2m_r
+
+Summary of testing:
+
+ bluefire: All PASS except:
+033 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+034 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+035 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+036 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 4
+037 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ...................FAIL! rc= 10
+041 blCA4 TBL.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 5
+043 blCA8 TBL.sh _sc_ds clm_urb^nl_urb 19971230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 5
+047 blNB4 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 5
+051 blC61 TBL.sh _sc_dh clm_urb^nl_urb 19981001:NONE:1800 1.9x2.5 gx1v5 48 cold .................FAIL! rc= 5
+052 smH51 TSM.sh 17p_cnnsc_dh clm_std std 19980115:NONE:1800 10x15 USGS@1890 96 arb_ic ..........FAIL! rc= 1
+053 erH51 TER.sh 17p_cnnsc_dh clm_std std 19980115:NONE:1800 10x15 USGS@1890 10+38 arb_ic .......FAIL! rc= 1
+054 brH51 TBR.sh 17p_cnnsc_dh clm_std std 19980115:NONE:1800 10x15 USGS@1890 72+72 arb_ic .......FAIL! rc= 1
+055 blH51 TBL.sh 17p_cnnsc_dh clm_std std 19980115:NONE:1800 10x15 USGS@1890 48 arb_ic ..........FAIL! rc= 1
+057 smLI1 TSM.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 10
+058 erLI1 TER.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -5+-5 arb_ic ......................FAIL! rc= 5
+059 brLI1 TBR.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10+-10 arb_ic ....................FAIL! rc= 5
+060 blLI1 TBL.sh _sc_dh clm_std 19980101:NONE:1800 94x192 T62 -10 arb_ic ........................FAIL! rc= 4
+090 sm9J2 TSMext_ccsmseq_cam.sh ext_ccsm_seq_0.9x1.25_dh ext_ccsm_seq_cam 48 ....................FAIL! rc= 8
+091 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+092 erP61 TSM_ccsmseq.sh ERS f19_g1 701 (svn keyword)
+ 698 (cprnc bug gives false difference)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 1077 (Slow leak of land aquifer to Ocean runoff)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ http://bugs.cgd.ucar.edu/
+
+ Known bugs that will NOT be resolved: 546(interpinic for DGVM),
+ 652 (threads different answers with older PGI versions)
+
+Hydrology changes (Guo-Yue Niu water table below soil column, fff=7,
+Qing micropore eqs (hksat and sucsat) and Qing microporosity
+-Simulation clm3_5niu5).
+
+Snow cover fraction (Niu and Yang); snow burial fraction for short vegetation
+(Wang and Zeng); thermal and hydraulic organic soil (Lawrence);
+snow compaction fix (Lawrence); snow T profile during layer splitting
+fix (Lawrence); new FGR12 diagnostic.
+
+Ground emissivity now weighted combination (using fsno) of snow and soil
+emissivity, sparse dense aerodynamic parameters from X. Zeng parameterization,
+Liu (2004) saturated hydraulic conductivity and matric potential,
+change saturation excess mechanism to only go to next to lowest layer, change
+forcing height to min. of 40m.
+
+Lower volumetric soil water content to remove drainage spikes from some points
+with high sand content. Change volumetric soil water content from 0.4 to 0.3.
+
+Incorporate hydrologically inactive deep soil (15 layers, 11-15
+hyrdologically inactive) and add mksoilcarb capability.
+Change deep soil (layers 11-15) to dry rock type rather than wet sand
+to reduce spinup time and for greater realism.
+
+Improved representation of snow-radiation interaction, including snow aging,
+darkening from black carbon and dust, and vertically-resolved solar heating.
+
+Remove code pertaining to 40m minimum forcing height. Forcing height is now
+whatever the atmospheric model provides plus z0+d of each pft. For offline
+simulations this will be 30m+z0+d.
+
+saturation excess back to CLM3.5 parameterization. Sakaguchi litter resistance
+Remove Qing Liu soil micropore functions and return to CLM3.5 formulations,
+change decay factor for drainage to 2.5. remove Niu water table below soil
+column formulation, frozen fraction of soil expression normalized per Zeng,
+rsubmax=9.9 for drainage calculation, decay factor=0.5 for surface runoff
+calculation, Zeng/Decker Richards equation mods, modified one-step solution
+for soil moisture and qcharge for compatibility with Zeng/Decker Richards
+mods per Swenson.
+
+Change input datm7 forcing so that Precip is over 6 hour interval,
+times are corrected for Temp, Pres, Humid, and Wind data and linear
+interpolation is used, and solar data is scaled by the cos(sol-zen angle).
+
+Set litter LAI = 0.5 and incorporate Swenson organic/mineral soil hk
+percolation theory
+
+CASA changes from Forrest Hoffman:
+
+ These changes add SOILPSI to the CASA'
+ configuration, correct units on C-LAMP carbon pool type fluxes, and reclassify
+ microbial pools as soil type pools. I believe this includes all modifications
+ between bgcmip04_clm3_expa_60 and bgcmip08_clm3_expa_72.
+
+Summary of CN and Btran changes from Sam Levis:
+
+- CanopyFluxes modification in the calculation of btran so that it equals 0 in soil layers with temperature <=-2 C.
+- CN mods recommended by Peter Thornton and the BGCWG during the bgc development phase of the last few months.
+
+Grassland AND CROP optical properties changes from Keith Oleson:
+
+New pft physiology file was created:
+
+pft-physiology.c081002
+
+Description of changes to physiology file:
+
+New leaf and stem optical properties (VIS and NIR reflectance and transmittance)
+were derived for grasslands and crops (pfts 12-16) from full optical range
+spectra of measured optical properties (Asner et al. (RSE 1998).
+
+New properties are:
+
+ Leaf Stem
+ VIS NIR VIS NIR
+Reflectance 0.11 0.35 0.31 0.53
+Transmittance 0.05 0.34 0.12 0.25
+
+
+Describe any changes made to build system:
+
+ Add SNICAR_FRC and CARBON_AERO ifdef tokens
+
+ DEFINE option SNICAR_FRC: enables second radiative transfer calculation of pure snow for radiative forcing estimation
+
+ in configure use options -carbon_aero and -snicar_frc
+
+Describe any changes made to the namelist: Add fsnowoptics, fsnowaging, faerdep
+
+Added namelist variables fsnowoptics, fsnowaging, and faerdep, which point to files containing, respectively, snow/aerosol optical properties, snow aging parameters, and global aerosol deposition file. THESE FILES ARE REQUIRED.
+
+List any changes to the defaults for the boundary datasets:
+
+ finidat files developed, all new fsurdat files, new pft-physiology,
+ files for T62, new organic files, new files for SNICAR (fsnowoptics,
+ fsnowaging, faerdep), fix some inconsistencies with fraction files,
+ get topo files setup correctly, get all files for 4x5 and 2x2.5 resolution
+
+Describe any substantial timing or memory changes: Yes
+ 20% slower because of SNICAR and slower because of deep soil
+
+Code reviewed by: Keith Oleson, Mark Flanner, Dave Lawrence,
+ Peter Thornton, Sam Levis, Sean Swenson
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts,
+ drv, datm7, socn, sice, sglc, csm_share, timing, pio, cprnc
+
++scripts scripts4_090112
++models/drv/seq_mct drvseq3_0_04
++models/atm/datm7 datm7_090107
++models/ocn/socn stubs1_1_01/socn
++models/ice/sice stubs1_1_01/sice
++models/glc/sglc stubs1_1_01/sglc
++models/csm_share share3_090112
++models/utils/timing timing_081028
++models/utils/pio pio28_prod/pio
++models/lnd/clm/tools/cprnc cprnc_081022
+
+List all files eliminated:
+
+D models/lnd/clm/test/system/tests_pretag_bluevista - remove
+D models/lnd/clm/bld/scpDefaultNamelist.pl ---------- replace with listDefaultNamelist.pl
+D models/lnd/clm/bld/run-frost.csh ------------------ remove as can use cpl7
+D models/lnd/clm/tools/interpinic/clmi_1999-01-02_10x15_c070330.nc -- new file
+
+List all files added and what they do:
+
+A + models/lnd/clm/test/system/nl_files/clm_organic ------------ test organic
+A + models/lnd/clm/tools/mksurfdata/mkorganic.F90 -------------- add organic to surfdat
+A + models/lnd/clm/tools/mksurfdata/mksurfdata.pl -------------- create all fsurdat files
+A + models/lnd/clm/tools/ncl_scripts/aerdepregrid.ncl ---------- interpolate aerosol deposition
+A + models/lnd/clm/tools/ncl_scripts/clmi_increasesoillayer.ncl interpolate old clmi files to 15 soil levels
+A + models/lnd/clm/tools/interpinic/clmi.IQ.1953-01-01_10x15_USGS_simyr2000_c081202.nc
+ ---------------- new 15 layer file to test interpolation
+A + models/lnd/clm/tools/interpinic/runinit_ibm.csh ------------ create all finidat files
+A + models/lnd/clm/bld/listDefaultNamelist.pl ------------------ list inputdata files needed
+A + models/lnd/clm/src/main/organicFileMod.F90 ----------------- organic soil
+A + models/lnd/clm/src/main/aerdepMod.F90 ---------------------- read in aerosol deposition
+A + models/lnd/clm/src/biogeophys/SNICARMod.F90 ---------------- SNICAR model
+A + Quickstart.GUIDE ------------------------------------------- Quickstart to cpl7 scripts
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>>>>>>>>>>>> Add compile_only option, fix smp/spmd, change most
+>>>>>>>>>>>>>>>>>>>>>>>>> tests to 4x5 resolution, update ccsm version,
+>>>>>>>>>>>>>>>>>>>>>>>>> remove bluevista, change to clm_qian inputdata
+>>>>>>>>>>>>>>>>>>>>>>>>> add cold start type
+
+M models/lnd/clm/test/system/TCB.sh
+M models/lnd/clm/test/system/config_files/4p_vodsrsc_dm
+M models/lnd/clm/test/system/config_files/4p_vodsrsc_do
+M models/lnd/clm/test/system/config_files/scam_ds ----- fix defaults for scam
+M models/lnd/clm/test/system/config_files/17p_cnnsc_o
+M models/lnd/clm/test/system/config_files/4p_casasc_dm
+M models/lnd/clm/test/system/config_files/10p_dgvmsc_o
+M models/lnd/clm/test/system/config_files/4p_casasc_do
+M models/lnd/clm/test/system/config_files/4p_casasc_o
+M models/lnd/clm/test/system/config_files/17p_vodsrsc_o
+M models/lnd/clm/test/system/config_files/17p_cnnsc_dm
+M models/lnd/clm/test/system/config_files/17p_cnnsc_do
+M models/lnd/clm/test/system/config_files/10p_dgvmsc_dm
+M models/lnd/clm/test/system/config_files/10p_dgvmsc_do
+M models/lnd/clm/test/system/config_files/17p_vodsrsc_dm
+M models/lnd/clm/test/system/config_files/ext_ccsm_seq_64x128_s
+M models/lnd/clm/test/system/config_files/17p_vodsrsc_do
+M models/lnd/clm/test/system/config_files/4p_vodsrsc_o
+M models/lnd/clm/test/system/TSMncl_tools.sh
+M models/lnd/clm/test/system/CLM_compare.sh
+M models/lnd/clm/test/system/TBL.sh
+M models/lnd/clm/test/system/TSM_ccsmseq.sh
+M models/lnd/clm/test/system/TSMext_ccsmseq_cam.sh
+M models/lnd/clm/test/system/tests_pretag_jaguar
+M models/lnd/clm/test/system/tests_posttag_kraken
+M models/lnd/clm/test/system/tests_pretag_bluefire
+M models/lnd/clm/test/system/README.testnames
+M models/lnd/clm/test/system/tests_posttag_breeze
+M models/lnd/clm/test/system/tests_pretag_bangkok
+M models/lnd/clm/test/system/TBR.sh
+M models/lnd/clm/test/system/TCBtools.sh
+M models/lnd/clm/test/system/TER.sh
+M models/lnd/clm/test/system/test_driver.sh
+M models/lnd/clm/test/system/tests_posttag_purempi_regression
+M models/lnd/clm/test/system/mknamelist
+M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+M models/lnd/clm/test/system/TCT_ccsmseq.sh
+M models/lnd/clm/test/system/TCSruncase.sh
+M models/lnd/clm/test/system/TSMpergro.sh
+M models/lnd/clm/test/system/nl_files/clm_per
+M models/lnd/clm/test/system/nl_files/clm_urb
+M models/lnd/clm/test/system/nl_files/clm_std
+M models/lnd/clm/test/system/nl_files/clm_ndepdyn
+M models/lnd/clm/test/system/nl_files/clm_pftdyn
+M models/lnd/clm/test/system/nl_files/clm_per0
+M models/lnd/clm/test/system/TSMcnspinup.sh
+M models/lnd/clm/test/system/input_tests_master
+M models/lnd/clm/test/system/TSMtools.sh
+M models/lnd/clm/test/system/TSMruncase.sh
+M models/lnd/clm/test/system/TCBext_ccsmseq_cam.sh
+M models/lnd/clm/test/system/TSCext_ccsmseq_scam.sh
+M models/lnd/clm/test/system/tests_posttag_lightning
+M models/lnd/clm/test/system/CLM_runcmnd.sh
+M models/lnd/clm/test/system/TBLtools.sh
+M models/lnd/clm/test/system/TSM.sh
+
+>>>>>>>>>>>>>>>>>>>>>>>>> Add organic add option to output as double precision
+>>>>>>>>>>>>>>>>>>>>>>>>> be more careful with averging add error checking
+M models/lnd/clm/tools/mksurfdata/mkvarctl.F90
+M models/lnd/clm/tools/mksurfdata/mksurfdata.globalurban
+M models/lnd/clm/tools/mksurfdata/mksurfdata.regional
+M models/lnd/clm/tools/mksurfdata/mkfileMod.F90
+M models/lnd/clm/tools/mksurfdata/Makefile ----------- -Kieee for pgi
+M models/lnd/clm/tools/mksurfdata/mksurfdata.singlept
+M models/lnd/clm/tools/mksurfdata/mksrfdat.F90
+M models/lnd/clm/tools/mksurfdata/Srcfiles
+M models/lnd/clm/tools/mksurfdata/mksurfdata.namelist
+
+>>>>>>>>>>>>>>>>>>>>>>>>> 15 levels for urban
+MM models/lnd/clm/tools/ncl_scripts/generate_ascii_avg_urbanparam_file_p7.ncl
+M models/lnd/clm/tools/ncl_scripts/README
+
+>>>>>>>>>>>>>>>>>>>>>>>>> Change for 15 levels and new variables/dims on dataset
+M models/lnd/clm/tools/interpinic/interpinic.F90
+M models/lnd/clm/tools/interpinic/interpinic.runoptions
+M models/lnd/clm/tools/interpinic/Srcfiles
+M models/lnd/clm/tools/interpinic/Filepath
+M models/lnd/clm/tools/interpinic/Makefile
+
+>>>>>>>>>>>>>>>>>>>>>>>>> 10x15 resolution, start to resolve domain checking bug
+M models/lnd/clm/tools/mkgriddata/mkgriddata.namelist
+M models/lnd/clm/tools/mkgriddata/mkgriddata.F90
+M models/lnd/clm/tools/mkgriddata/domainMod.F90
+M models/lnd/clm/tools/mkgriddata/creategridMod.F90
+M models/lnd/clm/tools/mkgriddata/Makefile
+
+>>>>>>>>>>>>>>>>>>>>>>>>> Get grid data from grid files rather than frac files
+M models/lnd/clm/tools/mkdatadomain/mkdatadomain.namelist
+M models/lnd/clm/tools/mkdatadomain/addglobal.F90
+M models/lnd/clm/tools/mkdatadomain/create_domain.F90
+M models/lnd/clm/tools/mkdatadomain/Makefile
+
+>>>>>>>>>>>>>>>>>>>>>>>>> minor changes to build, new datasets for build-namelist
+M models/lnd/clm/bld/configure --- add -snicar_frc and -carbon_aero, add sglc, remove timing for cpl7
+M models/lnd/clm/bld/queryDefaultNamelist.pl --- minor change
+M models/lnd/clm/bld/config_files/Makefile.in -- add HAVE_GETTIMEOFDAY for new timing, more consistent with cpl7 build
+M models/lnd/clm/bld/config_files/config_definition.xml -- add snicar_frc and carbon_aero
+M models/lnd/clm/bld/clm.cpl7.template -- simplify update for new scripts
+M models/lnd/clm/bld/README ------------- update info.
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml ----- add new namelist items remove irad
+M models/lnd/clm/bld/namelist_files/datm.streams.template.xml --- new Qian datasets
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml --- new defaults
+M models/lnd/clm/bld/build-namelist
+
+>>>>>>>>>>>>>>>>>>>>>>>>> change default resolution to 4x5 remove irad
+M models/lnd/clm/bld/run-pc.csh
+M models/lnd/clm/bld/run-ibm.csh
+M models/lnd/clm/bld/run-lightning.csh
+
+>>>>>>>>>>>>>>>>>>>>>>>>> New 15 layer urban single point datasets
+M models/lnd/clm/bld/urban_input/metropolis_fluxes.txt
+M models/lnd/clm/bld/urban_input/asphaltjungle_fluxes.txt
+M models/lnd/clm/bld/urban_input/mexicocityMEX_fluxes.txt
+M models/lnd/clm/bld/urban_input/vancouverCAN_fluxes.txt
+
+>>>>>>>>>>>>>>>>>>>>>>>>> Code changes documented above
+M models/lnd/clm/src/biogeochem/CASASummaryMod.F90
+M models/lnd/clm/src/biogeochem/CNCStateUpdate2Mod.F90
+M models/lnd/clm/src/biogeochem/CNC13StateUpdate2Mod.F90
+M models/lnd/clm/src/biogeochem/CNNStateUpdate1Mod.F90
+M models/lnd/clm/src/biogeochem/DGVMEcosystemDynMod.F90
+M models/lnd/clm/src/biogeochem/CNBalanceCheckMod.F90
+M models/lnd/clm/src/biogeochem/CNNStateUpdate3Mod.F90
+M models/lnd/clm/src/biogeochem/CNFireMod.F90
+M models/lnd/clm/src/biogeochem/CNMRespMod.F90
+M models/lnd/clm/src/biogeochem/CASAMod.F90
+M models/lnd/clm/src/biogeochem/CNPrecisionControlMod.F90
+M models/lnd/clm/src/biogeochem/CNSummaryMod.F90
+M models/lnd/clm/src/biogeochem/DUSTMod.F90
+M models/lnd/clm/src/biogeochem/CNPhenologyMod.F90
+M models/lnd/clm/src/biogeochem/CNCStateUpdate1Mod.F90
+M models/lnd/clm/src/biogeochem/CNDecompMod.F90
+M models/lnd/clm/src/biogeochem/STATICEcosysDynMod.F90
+M models/lnd/clm/src/biogeochem/CNCStateUpdate3Mod.F90
+M models/lnd/clm/src/biogeochem/CNC13StateUpdate1Mod.F90
+M models/lnd/clm/src/biogeochem/DGVMMod.F90
+M models/lnd/clm/src/biogeochem/CNrestMod.F90
+M models/lnd/clm/src/biogeochem/CNC13StateUpdate3Mod.F90
+M models/lnd/clm/src/biogeochem/CNAnnualUpdateMod.F90
+M models/lnd/clm/src/biogeochem/CNNStateUpdate2Mod.F90
+M models/lnd/clm/src/biogeochem/C13SummaryMod.F90
+M models/lnd/clm/src/biogeochem/CNNDynamicsMod.F90
+M models/lnd/clm/src/biogeochem/CNAllocationMod.F90
+M models/lnd/clm/src/biogeochem/CNC13FluxMod.F90
+M models/lnd/clm/src/biogeochem/CNSetValueMod.F90
+M models/lnd/clm/src/biogeochem/CNVegStructUpdateMod.F90
+M models/lnd/clm/src/main/inicFileMod.F90
+M models/lnd/clm/src/main/clm_varcon.F90
+M models/lnd/clm/src/main/clm_varpar.F90
+M models/lnd/clm/src/main/CNiniTimeVar.F90
+M models/lnd/clm/src/main/clm_comp.F90
+M models/lnd/clm/src/main/driver.F90
+M models/lnd/clm/src/main/ncdio.F90
+M models/lnd/clm/src/main/fileutils.F90
+M models/lnd/clm/src/main/clmtypeInitMod.F90
+M models/lnd/clm/src/main/pftdynMod.F90
+M models/lnd/clm/src/main/iniTimeConst.F90
+M models/lnd/clm/src/main/histFileMod.F90
+M models/lnd/clm/src/main/clm_atmlnd.F90
+M models/lnd/clm/src/main/restFileMod.F90
+M models/lnd/clm/src/main/controlMod.F90
+M models/lnd/clm/src/main/initSurfAlbMod.F90
+M models/lnd/clm/src/main/clm_time_manager.F90
+M models/lnd/clm/src/main/filterMod.F90
+M models/lnd/clm/src/main/clm_varctl.F90
+M models/lnd/clm/src/main/lnd_comp_mct.F90
+M models/lnd/clm/src/main/CASAiniTimeVarMod.F90
+M models/lnd/clm/src/main/areaMod.F90
+M models/lnd/clm/src/main/clmtype.F90
+M models/lnd/clm/src/main/histFldsMod.F90
+M models/lnd/clm/src/main/mkarbinitMod.F90
+M models/lnd/clm/src/riverroute/RtmMod.F90
+M models/lnd/clm/src/biogeophys/SurfaceRadiationMod.F90
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/biogeophys/UrbanInputMod.F90
+M models/lnd/clm/src/biogeophys/SnowHydrologyMod.F90
+M models/lnd/clm/src/biogeophys/Biogeophysics1Mod.F90
+M models/lnd/clm/src/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/biogeophys/FrictionVelocityMod.F90
+M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90
+M models/lnd/clm/src/biogeophys/Hydrology1Mod.F90
+M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/biogeophys/BiogeophysicsLakeMod.F90
+M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/biogeophys/SoilHydrologyMod.F90
+M models/lnd/clm/src/biogeophys/UrbanMod.F90
+M models/lnd/clm/src/biogeophys/DriverInitMod.F90
+M models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90
+M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90
+
+>>>>>>>>>>>>>>>>>>>>>>>>> update documentation
+M README -------------- update information
+M KnownBugs ----------- add info on new known bugs
+
+Summary of testing:
+
+ bluefire: All PASS except TBL and ...
+
+031 smF96 TSM.sh 17p_vodsrsc_m clm_std 19981231:NONE:1800 4x5 gx3v5 48 arb_ic ...................FAIL! rc= 10
+036 smF96 TSM.sh 17p_vodsrsc_m clm_std 19981231:NONE:1800 4x5 gx3v5 48 arb_ic ...................FAIL! rc= 2
+013 blCA4 TBL.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 5
+015 blCA8 TBL.sh _sc_ds clm_urb^nl_urb 19971230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 5
+019 blNB4 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 5
+020 smC61 TSM.sh _sc_dh clm_urb^nl_urb 19981001:NONE:1800 1.9x2.5 gx1v5 -6 arb_ic ...............FAIL! rc= 10
+021 erC61 TER.sh _sc_dh clm_urb^nl_urb 19981001:NONE:1800 1.9x2.5 gx1v5 10+38 arb_ic ............FAIL! rc= 5
+022 brC61 TBR.sh _sc_dh clm_urb^nl_urb_br 19981001:NONE:1800 1.9x2.5 gx1v5 -3+-3 arb_ic .........FAIL! rc= 5
+023 blC61 TBL.sh _sc_dh clm_urb^nl_urb 19981001:NONE:1800 1.9x2.5 gx1v5 48 arb_ic ...............FAIL! rc= 4
+024 smH91 TSM.sh 17p_cnnsc_dh clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@1890 96 startup ..........FAIL! rc= 10
+025 erH91 TER.sh 17p_cnnsc_dh clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@1890 10+38 startup .......FAIL! rc= 5
+026 brH91 TBR.sh 17p_cnnsc_dh clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@1890 72+72 startup .......FAIL! rc= 5
+027 blH91 TBL.sh 17p_cnnsc_dh clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@1890 48 startup ..........FAIL! rc= 4
+029 erH52 TER.sh 17p_cnnsc_dm clm_std 19980115:NONE:1800 10x15 USGS@1890 10+38 arb_ic ...........FAIL! rc= 13
+030 brH52 TBR.sh 17p_cnnsc_dm clm_std 19980115:NONE:1800 10x15 USGS@1890 72+72 arb_ic ...........FAIL! rc= 11
+031 blH52 TBL.sh 17p_cnnsc_dm clm_std 19980115:NONE:1800 10x15 USGS@1890 48 arb_ic ..............FAIL! rc= 7
+
+ lightning/pathscale: all PASS except TBL and ...
+
+009 blCA4 TBL.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 5
+013 blCA8 TBL.sh _sc_ds clm_urb^nl_urb 19971230:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 5
+017 blOC4 TBL.sh _vansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 5
+019 erA91 TER.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .......................FAIL! rc= 7
+020 brA91 TBR.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -3+-3 arb_ic .......................FAIL! rc= 6
+021 blA91 TBL.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -6 arb_ic ..........................FAIL! rc= 5
+027 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 7
+028 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 6
+029 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 5
+031 smH52 TSM.sh 17p_cnnsc_dm clm_std 19980115:NONE:1800 10x15 USGS@1890 96 arb_ic ..............FAIL! rc= 10
+032 erH52 TER.sh 17p_cnnsc_dm clm_std 19980115:NONE:1800 10x15 USGS@1890 10+38 arb_ic ...........FAIL! rc= 5
+033 brH52 TBR.sh 17p_cnnsc_dm clm_std 19980115:NONE:1800 10x15 USGS@1890 72+72 arb_ic ...........FAIL! rc= 5
+034 blH52 TBL.sh 17p_cnnsc_dm clm_std 19980115:NONE:1800 10x15 USGS@1890 48 arb_ic ..............FAIL! rc= 4
+036 erK51 TER.sh 10p_dgvmsc_dh clm_std 19981230:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 7
+037 brK51 TBR.sh 10p_dgvmsc_dh clm_std 19981230:NONE:1800 10x15 USGS 72+72 arb_ic ...............FAIL! rc= 6
+001 smL51 TSM.sh _sc_dh clm_std 19980115:NONE:1800 10x15 USGS 96 arb_ic .........................FAIL! rc= 10
+002 erL51 TER.sh _sc_dh clm_std 19980115:NONE:1800 10x15 USGS 10+38 arb_ic ......................FAIL! rc= 5
+003 brL51 TBR.sh _sc_dh clm_std 19980115:NONE:1800 10x15 USGS 72+72 arb_ic ......................FAIL! rc= 5
+004 blL51 TBL.sh _sc_dh clm_std 19980115:NONE:1800 10x15 USGS 48 arb_ic .........................FAIL! rc= 4
+005 sm674 TSMtools.sh mkgriddata tools__ds singlept .............................................FAIL! rc= 6
+006 sm774 TSMtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 6
+007 bl774 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 4
+010 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+011 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 6
+012 erP91 TSM_ccsmseq.sh ERS f45_g35 ICN4804Q ...................................................FAIL! rc= 4
+
+ jaguar: ALL PASS except TBL and ....
+
+001 smA74 TSM.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ...................FAIL! rc= 10
+002 erA74 TER.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -5+-5 arb_ic .................FAIL! rc= 5
+003 brA74 TBR.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -5+-5 arb_ic .................FAIL! rc= 5
+013 smE32 TSM.sh 4p_vodsrsc_dm clm_std 19981231:NONE:3600 64x128^360x720 USGS 48 arb_ic .........FAIL! rc= 10
+014 erE32 TER.sh 4p_vodsrsc_dm clm_std 19981231:NONE:3600 64x128^360x720 USGS 10+38 arb_ic ......FAIL! rc= 5
+015 brE32 TBR.sh 4p_vodsrsc_dm clm_std 19981231:NONE:3600 64x128^360x720 USGS 24+24 arb_ic ......FAIL! rc= 5
+017 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+018 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 10+38 arb_ic ................FAIL! rc= 5
+019 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+021 smH92 TSM.sh 17p_cnnsc_dm clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@1890 96 startup ..........FAIL! rc= 10
+022 erH92 TER.sh 17p_cnnsc_dm clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@1890 10+38 startup .......FAIL! rc= 5
+023 brH92 TBR.sh 17p_cnnsc_dm clm_ndepdyn 19980101:NONE:1800 4x5 gx3v5@1890 72+72 startup .......FAIL! rc= 5
+029 smJ05 TSM.sh 4p_casasc_h clm_std^nl_lfiles 19800101:NONE:1800 0.47x0.63 gx1v5 48 startup ....FAIL! rc= 10
+030 smJ74 TSM.sh 4p_casasc_ds clm_std 10001230:NONE:3600 1x1_tropicAtl test -1100 arb_ic ........FAIL! rc= 10
+031 erJ74 TER.sh 4p_casasc_ds clm_std 10001230:NONE:3600 1x1_tropicAtl test -10+-10 arb_ic ......FAIL! rc= 5
+032 brJ74 TBR.sh 4p_casasc_ds clm_std 10001230:NONE:3600 1x1_tropicAtl test -3+-3 arb_ic ........FAIL! rc= 5
+038 smL62 TSM.sh _sc_m clm_std 19980101:NONE:1800 1.9x2.5 gx1v5 -10 startup .....................FAIL! rc= 10
+039 erL62 TER.sh _sc_m clm_std 19980101:NONE:1800 1.9x2.5 gx1v5 -5+-5 startup ...................FAIL! rc= 5
+040 brL62 TBR.sh _sc_m clm_std 19980101:NONE:1800 1.9x2.5 gx1v5 -10+-10 startup .................FAIL! rc= 5
+
+ breeze/gale/hail/gust/ifort: All PASS except TBL and...
+
+ bangkok: All PASS except TBL and..
+
+005 smA74 TSM.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ...................FAIL! rc= 10
+006 erA74 TER.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -5+-5 arb_ic .................FAIL! rc= 5
+007 brA74 TBR.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -5+-5 arb_ic .................FAIL! rc= 5
+009 smD91 TSM.sh _persc_dh clm_per 19981231:NONE:1200 4x5 gx3v5 144 startup .....................FAIL! rc= 10
+010 erD91 TER.sh _persc_dh clm_per 19981231:NONE:1200 4x5 gx3v5 72+72 startup ...................FAIL! rc= 5
+013 smCA4 TSM.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 10
+014 erCA4 TER.sh _sc_ds clm_urb^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -45+-45 arb_ic ......FAIL! rc= 5
+015 brCA4 TBR.sh _sc_ds clm_urb^nl_urb_br 19981001:NONE:3600 1x1_camdenNJ navy -10+-10 arb_ic ...FAIL! rc= 5
+017 smOC4 TSM.sh _vansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 10
+018 erOC4 TER.sh _vansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 115+115 arb_icFAIL! rc= 5
+019 brOC4 TBR.sh _vansc_ds clm_urb1pt^nl_urb_br 19920812:NONE:3600 1x1_vancouverCAN navy 72+72 arb_iFAIL! rc= 5
+021 smNB4 TSM.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 10
+022 erNB4 TER.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 59+100 arb_icFAIL! rc= 5
+023 brNB4 TBR.sh _mexsc_ds clm_urb1pt^nl_urb_br 19931201:NONE:3600 1x1_mexicocityMEX navy 72+72 arb_FAIL! rc= 5
+025 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+026 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+027 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+029 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ...................FAIL! rc= 10
+030 smH52 TSM.sh 17p_cnnsc_dm clm_std 19980115:NONE:1800 10x15 USGS@1890 96 arb_ic ..............FAIL! rc= 10
+031 erH52 TER.sh 17p_cnnsc_dm clm_std 19980115:NONE:1800 10x15 USGS@1890 10+38 arb_ic ...........FAIL! rc= 5
+032 brH52 TBR.sh 17p_cnnsc_dm clm_std 19980115:NONE:1800 10x15 USGS@1890 72+72 arb_ic ...........FAIL! rc= 5
+038 smK51 TSM.sh 10p_dgvmsc_dh clm_std 19981230:NONE:1800 10x15 USGS 96 arb_ic ..................FAIL! rc= 10
+039 erK51 TER.sh 10p_dgvmsc_dh clm_std 19981230:NONE:1800 10x15 USGS 10+38 arb_ic ...............FAIL! rc= 5
+040 brK51 TBR.sh 10p_dgvmsc_dh clm_std 19981230:NONE:1800 10x15 USGS 72+72 arb_ic ...............FAIL! rc= 5
+042 smL51 TSM.sh _sc_dh clm_std 19980115:NONE:1800 10x15 USGS 96 arb_ic .........................FAIL! rc= 10
+043 erL51 TER.sh _sc_dh clm_std 19980115:NONE:1800 10x15 USGS 10+38 arb_ic ......................FAIL! rc= 5
+044 brL51 TBR.sh _sc_dh clm_std 19980115:NONE:1800 10x15 USGS 72+72 arb_ic ......................FAIL! rc= 5
+046 smL58 TSM.sh _sc_dh clm_std^nl_crcrop 19980115:NONE:1800 10x15 USGS 96 arb_ic ...............FAIL! rc= 10
+047 smL74 TSM.sh _sc_s clm_std 19980101:NONE:1800 1x1_brazil navy -10 arb_ic ....................FAIL! rc= 10
+048 erL74 TER.sh _sc_s clm_std 19980101:NONE:1800 1x1_brazil navy -5+-5 arb_ic ..................FAIL! rc= 5
+049 brL74 TBR.sh _sc_s clm_std 19980101:NONE:1800 1x1_brazil navy -10+-10 arb_ic ................FAIL! rc= 5
+051 sm654 TSMtools.sh mkgriddata tools__ds namelist .............................................FAIL! rc= 6
+052 sm853 TSMtools.sh interpinic tools__o runoptions ............................................FAIL! rc= 6
+053 sm854 TSMtools.sh interpinic tools__ds runoptions ...........................................FAIL! rc= 6
+057 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+Changes answers relative to baseline: Yes! Changes climate
+
+ Summarize any changes to answers:
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change (new climate)
+ - configuration (CPP ifdefs): All
+ - build-namelist command (or complete namelist):
+
+ MSS location of control simulations used to validate new climate:
+
+ Grass optical properties: /OLESON/csm/clm36sci16_clm3_6_11shklit0_5sfc_goa
+
+ ccsm4_0_beta05: /CCSM/csm/b40.018
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+ccsm4_0_beta05 (with clm36sci27_clm3_6_14)
+
+http://www.cgd.ucar.edu/cdp/mai/ccsmweb/b40.018-b40.017/setsIndex.html
+
+http://www.cgd.ucar.edu/tss/clm/diagnostics/clm4.0_dev/clm36sci16_clm3_6_11shklit0_5sfc_goa-clm36sci16_clm3_6_11shklit0_5sfca/setsIndex.html
+
+===============================================================
+===============================================================
+Tag name: clm3_6_14
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Fri Oct 10 11:32:57 MDT 2008
+One-line Summary: Fix some global urban issues, fix pftdyn, really get compile-only option working in testing
+
+Purpose of changes: Fix column and pft averaging for urban (crtical for coupling to cam) (from Keith)
+ Fix Qanth (was wasteheat previously) (from Keith)
+ Fix so that pftdyn works (fix from Sam)
+ Really get the compile-only option working in test-suite
+ (so that doesn't re-compile, but does re-run, when sent again)
+
+Bugs fixed (include bugzilla ID): 826 (pftdyn)
+
+Known bugs (include bugzilla ID): 251 (TwoStream), 672 (3.5.4-3.5.14 diffs), 830 (missing C-LAMP mods)
+ 680 (t0 precip diff for seq-ccsm), 789 (pt sims slower than offline)
+ 698 (cprnc bug gives false difference), 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 833 (bug with cam in ccsm4_0_alpha37), 722 (threading slow)
+ 832 (problem with cice bn in ccsm4_0_alpha37)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ http://bugs.cgd.ucar.edu/
+
+Known bugs that will NOT be resolved: 512 (mksurf on PGI), 546(interpinic for DGVM),
+ 652 (threads different answers with older PGI versions)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, oleson, slevis (Sam provided pftdyn fix, and Keith provided urban fixes)
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts and drv
+
+ scripts to scripts4_081009
+ drv to drvseq2_0_33
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>> Change so that pftdyn will work, do urban averaging, and fix Qanth
+ M models/lnd/clm/src/main/clm_atmlnd.F90 -------------- Make averaging take into account urban (critical for global urban modeling)
+ M models/lnd/clm/src/main/histFldsMod.F90 ------------- Add urban scaling, fix Qanth
+ M models/lnd/clm/src/main/filterMod.F90 --------------- Change urban filter to include pftwgt>0
+ M models/lnd/clm/src/biogeophys/SurfaceAlbedoMod.F90 -- Change filter_vegsol to include pftwgt>0
+>>>>>>>>>>> Fix so that compile-only option leaves compiled program there, doesn't recompile, but does rerun
+ M models/lnd/clm/test/system/TCB.sh
+ M models/lnd/clm/test/system/TSMncl_tools.sh
+ M models/lnd/clm/test/system/TBL.sh
+ M models/lnd/clm/test/system/TSM_ccsmseq.sh
+ M models/lnd/clm/test/system/TSMext_ccsmseq_cam.sh
+ M models/lnd/clm/test/system/TBR.sh
+ M models/lnd/clm/test/system/TCBtools.sh
+ M models/lnd/clm/test/system/test_driver.sh
+ M models/lnd/clm/test/system/TER.sh
+ M models/lnd/clm/test/system/TCT_ccsmseq.sh
+ M models/lnd/clm/test/system/TSMpergro.sh
+ M models/lnd/clm/test/system/TSMcnspinup.sh
+ M models/lnd/clm/test/system/TSMtools.sh
+ M models/lnd/clm/test/system/TSMruncase.sh
+ M models/lnd/clm/test/system/TCBext_ccsmseq_cam.sh
+ M models/lnd/clm/test/system/CLM_runcmnd.sh
+ M models/lnd/clm/test/system/TSM.sh
+
+Summary of testing:
+
+ bluefire: All PASS except
+071 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ jaguar: All PASS
+ bangkok/lf95: All PASS except
+028 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 5
+054 sm952 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dh ext_ccsm_seq_cam 48 .......................FAIL! rc= 6
+055 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ breeze/gale/hail/gust/ifort: All PASS
+
+pftdyn TBL test fails, because pftdyn did not work in previous tag.
+cam and scam tests fail because of bugs 832 and 833 in ccsm4_0_alpha37
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_13
+
+Changes answers relative to baseline: No -- bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_6_13
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Wed Oct 1 13:33:36 MDT 2008
+One-line Summary: Update to new version of cpl7 scripts and build, update externals for versions needed for clm36sci branch, add new CASA tests
+
+Purpose of changes: Update to new version of cpl7 scripts and build.
+ Update externals for versions needed on clm36sci branch.
+ Add new CASA tests.
+ Add $CLM_ACCOUNT as option to test_driver.sh
+ Add single point capability to cpl7 scripts.
+ Add CLM_DEMAND, CLM_BLD_NL_OPTIONS as options to cpl7 scripts.
+ Some code changes from Keith Oleson to fix a CASA startup problem.
+
+Code changes from Keith Oleson
+
+1. Volumetric soil water check in BiogeophysRestMod changed so that it accounts for ponded ice/water
+that may be present in surface layer. If volumetric soil water is above saturation, h2osoi_liq
+and h2osoi_ice are reduced according to their proportion of total water/ice. Both h2osoi_liq
+and h2osoi_ice are limited to be no lower than watmin (currently 0.01_r8 mm). All this done for
+soil points only.
+
+2. In SoilHydrologyMod, variable su changed to:
+
+ su = max(0._r8,(s1-fcov(c)) / max(0.01_r8,1._r8-fcov(c))
+
+to account for the fact that fcov could be one and hence divide by zero could have occurred.
+Also, the factor "1._r8" multiplying fcov in the numerator was removed.
+
+3. watmin made a global parameter available from clm_varcon
+
+Bugs fixed (include bugzilla ID): 805 (too much output in build-streams), 801 (G95 in csm_Share),
+ 786 (dshr_map bug), 834 (CASA startup bug),
+
+Known bugs (include bugzilla ID): 251 (TwoStream), 672 (3.5.4-3.5.14 diffs),
+ 680 (t0 precip diff for seq-ccsm),
+ 698 (cprnc bug gives false difference), 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 826 (pftdyn), 833 (bug with cam in ccsm4_0_alpha37)
+ 832 (problem with cice bn in ccsm4_0_alpha37)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ http://bugs.cgd.ucar.edu/
+
+Known bugs that will NOT be resolved: 512 (mksurf on PGI), 546(interpinic for DGVM),
+ 652 (threads different answers with older PGI versions)
+
+Describe any changes made to build system: Update to new ccsm4_0_alpha37 scripts
+
+ New options in cpl7 scripts: CLM_DEMAND, CLM_BLD_NL_OPTIONS
+
+ CLM_DEMAND: List of output variables to require be set in namelist
+ For example, set to "furbinp" to use urban grid.
+ CLM_BLD_NL_OPTIONS: List of options to pass to clm build-namelist.
+
+ New grid in cpl7 scripts: pt1_pt1 (also set CLM_PT1_NAME) for single point sims
+
+ Add ability to set "none" in clm build-namelist -clm_dmand option.
+
+Quickstart to new cpl7 scripts...
+
+ New cpl7 namelists now do two things for you.
+ - Add a ton of error checking at each step -- so it won't let you do something you aren't allowed to
+ - Only show you the variables that you could actually set in your case.
+
+ To accomplish this we use XML files rather than cshell env files. But, the
+ operation sequence is similar with options only changed slightly.
+
+ cd scripts
+ ./create_newcase -help # get help on how to run create_newcase
+ ./create_newcase -case testI -mach bluefire -res f19_g15 -compset I # create new "I" case for bluefire at 1.9x2.5_gx1v5 res
+ # "I" case is clm active, datm7, and inactive ice/ocn
+ cd testI
+ ./xmlchange -help # Get help on editor for XML files
+ ./xmlchange env_conf.xml env_mach_pes # Edit configure files if needed
+ configure -case # create scripts
+ ./xmlchange env_build.xml # Edit build files if needed
+ testI.build # build model and create namelists
+ ./xmlchange env_run.xml # Edit run files if needed
+ bsub < testI.run # submit script
+ # (NOTE: edit env_run.xml to set RESUBMIT to number of times to automatically resubmit)
+
+ Note that the -skip_rundb option to create_newcase no longer needs the argument of "NONE".
+ Syntax of create_tests changed to only one form.
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: erik, oleson and dlawren (source code changes)
+
+List any svn externals directories updated (csm_share, mct, etc.): drv, csm_share, datm7, and scripts
+
+ csm_share, datm7 and scripts include changes required for the clm36sci branch.
+
+ scripts to scripts4_080930
+ drv to drvseq2_0_32
+ datm7 to datm7_080926
+ csm_share to share3_080929
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>> Add $CLM_ACCOUNT env var, change tests around, update to ccsm4_0_alpha37
+ add some more CASA tests.
+M models/lnd/clm/test/system/README
+M models/lnd/clm/test/system/tests_pretag_bluefire
+M models/lnd/clm/test/system/tests_pretag_jaguar
+M models/lnd/clm/test/system/tests_posttag_kraken
+M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+M models/lnd/clm/test/system/tests_posttag_purempi_regression
+M models/lnd/clm/test/system/test_driver.sh ------------------ Add $CLM_ACCOUNT env var
+ update to ccsm4_0_alpha37
+M models/lnd/clm/test/system/input_tests_master -------------- Add CASA 1.9x2.5 tests
+M models/lnd/clm/test/system/TSMext_ccsmseq_cam.sh ----------- Separate call to cice bn
+M models/lnd/clm/test/system/TCT_ccsmseq.sh
+M models/lnd/clm/test/system/TCBext_ccsmseq_cam.sh ----------- Need to set threads/tasks
+M models/lnd/clm/test/system/TSM.sh -------------------------- Remove old namelist name
+
+>>>>>>>>>>>>>> Update for new scripts
+M models/lnd/clm/bld/clm.cpl7.template ----------------------- Straighten out clm_demand
+ Add new env vars. Remove prestaging.
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml -- Update to alpha37
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml- Add file for clm36sci branch
+M models/lnd/clm/bld/build-namelist -------------------------- Allow clm_demand to include none.
+
+>>>>>>>>>>>>>> These are Keith's changes to fix bug 834. They do make it possible for
+ answers to change, but in most cases they don't. It allows code to
+ startup correctly for situations it might fail in, and sets a mininum
+ value in SoilHydrologyMod to guard against divide by zero. This would
+ change answers when amount of ice -- fcov > 0.99 -- which would be rare.
+M models/lnd/clm/src/main/clm_varcon.F90
+M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90
+M models/lnd/clm/src/biogeophys/SoilHydrologyMod.F90
+
+Summary of testing:
+
+ bluefire: All PASS except
+042 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+043 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+044 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+045 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 4
+P
+046 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ...................FAIL! rc= 10
+079 blL61 TBL.sh _sc_h clm_std 19980101:NONE:1800 1.9x2.5 gx1v5 -10 startup .....................FAIL! rc= 7
+083 blL62 TBL.sh _sc_m clm_std 19980101:NONE:1800 1.9x2.5 gx1v5 -10 startup .....................FAIL! rc= 7
+107 brJ74 TBR.sh 4p_casasc_ds clm_std 10001230:NONE:3600 1x1_tropicAtl test -3+-3 arb_ic ........FAIL! rc= 11
+127 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ kraken: All PASS except all TER and TBR tests fail, because of a script problem and ends early
+ lightning/pathscale: All PASS except
+022 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+023 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+024 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+025 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 4
+026 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ...................FAIL! rc= 10
+050 erP11 TSM_ccsmseq.sh ERS T31_g35 ICN4804 ....................................................FAIL! rc= 5
+ bangkok/lf95: All PASS except
+025 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+026 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+027 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+028 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 4
+029 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ...................FAIL! rc= 10
+054 sm952 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dh ext_ccsm_seq_cam 48 .......................FAIL! rc= 6
+055 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ breeze/gale/hail/gust/ifort: All PASS
+
+pftdyn tests fail because of previous problem (bug 826). ext_ccsmseq_ tests fail
+because of problem with ccsm4_0_alpha37 (bug 833).
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_12
+
+Changes answers relative to baseline: Only for some cases, see tests 079 and 083 on
+bluefire above
+
+===============================================================
+===============================================================
+Tag name: clm3_6_12
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Sun Sep 21 10:04:22 MDT 2008
+One-line Summary: Fix restarts for urban, add capability to do global urban experiments, add new forcing height changes, remove cpl6
+
+Purpose of changes: Fix restarts for urban model as well as adding capability to do global urban experiments.
+ It also adds the new forcing height changes into the trunk.
+ And we remove all the cpl6 #ifdef's, source codes, and associated scripts and script options.
+ Also fix some memory leaks found in MCT.
+ Add testing for kraken.
+ Fix branch tests so they change the start_ymd.
+ Add some more tests for CASA.
+ Set minimum urban percentage to use from 5% to 1%.
+ Completely remove COUP_CAM #ifdef as NOT needed anymore.
+
+Bugs fixed (include bugzilla ID): Fix urban model restarts, remove cpl6 (755), MCT memory leak (825)
+
+Known bugs (include bugzilla ID): 251 (TwoStream), 672 (3.5.4-3.5.14 diffs),
+ 680 (t0 precip diff for seq-ccsm),
+ 698 (cprnc bug gives false difference), 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 805 (too much output in build-streams), 826 (pftdyn)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ http://bugs.cgd.ucar.edu/
+
+Known bugs that will NOT be resolved: 512 (mksurf on PGI), 546(interpinic for DGVM),
+ 652 (threads different answers with older PGI versions)
+
+Describe any changes made to build system: Add nofire option to configure, remove cpl6 option
+
+Describe any changes made to the namelist: Add new urban oriented output history variables:
+
+ _U, and _R (Urban and Rural) for:
+ QRUNOFF, TREFMXAV, TREFMNAV, TSA, SoilAlpha, TG, Q2M, TREFAV
+ URBAN_AC, and URBAN_HEAT, Qanth, SWup, LWup, QTau, HWR, Wind, Qair
+ and ZBOT_PFT for forcing height
+
+ Change build-namelist so that to use a dataset with urban points on it you
+ need to use "-clm_demand furbinp". This way it will not only select the appropriate
+ furbinp dataset -- but it will select the correct surface dataset that includes
+ urban data on it. Such as for 10x15 and 1.9x2.5 surface datasets where there is
+ now an urban version as well as the standard version.
+
+
+List any changes to the defaults for the boundary datasets: Add urbanc point dataset,
+ and 1.9x2.5 and 10x15 urban datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self (some changes by Keith Oleson)
+
+List any svn externals directories updated (csm_share, mct, etc.): drv, datm7, mct
+
+ drv to drvseq3_0_26
+ datm7 to datm7_080907
+ mct to MCT2_5_1_080522
+
+List all files eliminated: Remove cpl6 files
+
+R models/lnd/clm/test/system/TCText_ccsmcon.sh
+R models/lnd/clm/test/system/TSMext_ccsmcon.sh
+R models/lnd/clm/bld/clm.cpl6.template
+R models/lnd/clm/src/main/program_csm.F90
+R models/lnd/clm/src/main/clm_csmMod.F90
+
+List all files added and what they do:
+
+A models/lnd/clm/test/system/config_files/4p_casasc_ds ----- Add serial test for CASA
+A models/lnd/clm/test/system/nl_files/clm_urb -------------- For standard urban tests.
+A models/lnd/clm/test/system/nl_files/nl_urb_br ------------ Urban namelist for branch tests.
+A models/lnd/clm/test/system/tests_posttag_kraken ---------- Add tests for kraken
+A models/lnd/clm/tools/ncl_scripts/generate_ascii_avg_fv1_9x2_5_urbanparam_file_p7.ncl -- script to create furbinp dataset
+A models/lnd/clm/tools/mksurfdata/mksurfdata.globalurban --- Example namelist to make a global urban surface dataset
+A models/lnd/clm/bld/urban_input/urbanc_alpha_fluxes.txt --- Add urban intercomparison test case
+
+List all existing files that have been modified, and describe the changes:
+
+>>>>>>>>>>>>>>> Remove cpl6 option, add nofire, make urban point datasets consistent with global Feddema datasets
+M models/lnd/clm/bld/configure -------------------------------- Remove cpl6 option, add nofire option
+M models/lnd/clm/bld/queryDefaultNamelist.pl ------------------ Add -filenameonly option
+M models/lnd/clm/bld/urban_input/metropolis_fluxes.txt
+M models/lnd/clm/bld/urban_input/urbanc_alpha_fluxes.txt
+M models/lnd/clm/bld/urban_input/asphaltjungle_fluxes.txt
+M models/lnd/clm/bld/urban_input/mexicocityMEX_fluxes.txt
+M models/lnd/clm/bld/urban_input/vancouverCAN_fluxes.txt
+M models/lnd/clm/bld/config_files/config_definition.xml
+M models/lnd/clm/bld/namelist_files/namelist_definition.xml
+M models/lnd/clm/bld/namelist_files/datm.streams.template.xml
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml
+M models/lnd/clm/bld/build-namelist --------------------------- Have urban surface datasets dependent on furbinp
+
+M models/lnd/clm/test/system/README.testnames
+M models/lnd/clm/test/system/tests_posttag_urban
+M models/lnd/clm/test/system/tests_pretag_jaguar
+M models/lnd/clm/test/system/tests_pretag_bluefire
+M models/lnd/clm/test/system/tests_posttag_purempi_regression
+M models/lnd/clm/test/system/tests_posttag_hybrid_regression
+M models/lnd/clm/test/system/tests_pretag_bluevista
+M models/lnd/clm/test/system/tests_posttag_lightning
+M models/lnd/clm/test/system/nl_files/clm_urb1pt
+M models/lnd/clm/test/system/input_tests_master
+M models/lnd/clm/test/system/README
+M models/lnd/clm/test/system/CLM_runcmnd.sh
+M models/lnd/clm/test/system/TBR.sh ---------------------- Change so start_ymd of branch runs is initial-length after original start-date
+M models/lnd/clm/test/system/test_driver.sh -------------- Reduce from premium to regular, add kraken
+M models/lnd/clm/test/system/mknamelist ------------------ Set hist_* values for second file
+M models/lnd/clm/test/system/nl_files/nl_crcrop ---------- Set hist_dens for second file
+M models/lnd/clm/test/system/nl_files/nl_urb ------------- Set hist_dens for second file, add more fields to list
+M models/lnd/clm/test/system/nl_files/nl_std ------------- Set hist_dens for second file
+M models/lnd/clm/test/system/nl_files/nl_lfiles ---------- Set hist_dens for second file
+M models/lnd/clm/test/system/input_tests_master ---------- Change TBR tests, add more CASA tests
+
+M models/lnd/clm/tools/mksurfdata/mkurban.F90 ------------ Change threshold to ignore urban from 5% to 1%
+
+M models/lnd/clm/src/biogeophys/FrictionVelocityMod.F90 -- For DUST fix forcing height appropriately
+M models/lnd/clm/src/biogeophys/UrbanMod.F90 ------------- Don't set pointers if no urban points
+
+>>>>>>>>>>>>>>>> Remove COUP_CSM #ifdefs
+
+M models/lnd/clm/src/main/driver.F90 --------- Also make sure urban calls have urban points
+M models/lnd/clm/src/main/accFldsMod.F90
+M models/lnd/clm/src/main/clmtypeInitMod.F90
+M models/lnd/clm/src/main/initializeMod.F90
+M models/lnd/clm/src/main/iniTimeConst.F90
+M models/lnd/clm/src/main/histFileMod.F90
+M models/lnd/clm/src/main/restFileMod.F90
+M models/lnd/clm/src/main/controlMod.F90
+M models/lnd/clm/src/main/initSurfAlbMod.F90
+M models/lnd/clm/src/main/clm_time_manager.F90
+M models/lnd/clm/src/main/clm_varctl.F90
+M models/lnd/clm/src/main/subgridAveMod.F90
+M models/lnd/clm/src/main/initGridCellsMod.F90
+M models/lnd/clm/src/main/spmdMod.F90
+M models/lnd/clm/src/main/surfrdMod.F90 ------------- Also remove COUP_CAM #ifdef
+M models/lnd/clm/src/main/do_close_dispose.F90
+M models/lnd/clm/src/main/clmtype.F90 --------------- Also forcing height changes
+M models/lnd/clm/src/main/histFldsMod.F90
+M models/lnd/clm/src/main/mkarbinitMod.F90
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90
+M models/lnd/clm/src/biogeophys/SoilTemperatureMod.F90
+M models/lnd/clm/src/biogeophys/UrbanInputMod.F90
+M models/lnd/clm/src/biogeophys/Biogeophysics1Mod.F90 ------ Also forcing height changes
+M models/lnd/clm/src/biogeophys/Biogeophysics2Mod.F90
+M models/lnd/clm/src/biogeophys/UrbanInitMod.F90
+M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90
+M models/lnd/clm/src/biogeophys/BiogeophysicsLakeMod.F90 ------ Also forcing height changes
+M models/lnd/clm/src/biogeophys/BiogeophysRestMod.F90 --------- Fix restarts for urban
+M models/lnd/clm/src/biogeophys/HydrologyLakeMod.F90
+M models/lnd/clm/src/biogeophys/BareGroundFluxesMod.F90
+M models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90
+
+Summary of testing:
+
+ bluefire: All PASS except TBL and
+042 smG41 TSM.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+043 erG41 TER.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+044 brG41 TBR.sh 17p_sc_dh clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+046 smG45 TSM.sh 17p_sc_h clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ...................FAIL! rc= 10
+ jaguar: All PASS except TBL and
+019 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+ kraken: All PASS except TBL and TER and TBR (this may be a setup problem) and
+017 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+ lightning/pathscale: All PASS except TBL and
+ bangkok/lf95: All PASS except TBL and
+025 smG42 TSM.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ..................FAIL! rc= 10
+026 erG42 TER.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+027 brG42 TBR.sh 17p_sc_dm clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic ................FAIL! rc= 5
+029 smG46 TSM.sh 17p_sc_m clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ...................FAIL! rc= 10
+ kraken: All PASS except TBL and
+017 smG43 TSM.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 144 arb_ic ...........
+.......FAIL! rc= 10
+018 erG43 TER.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 10+38 arb_ic .........
+.......FAIL! rc= 13
+019 brG43 TBR.sh 17p_sc_do clm_pftdyn 10001230:NONE:1800 10x15 USGS 72+72 arb_ic .........
+.......FAIL! rc= 5
+ breeze/gale/hail/gust/ifort: All PASS
+
+ pftdyn tests fail on all platforms -- due to a previous problem that was not
+detected because of a bug in the test.
+ TER and TBR tests fail on kraken -- this may be a setup problem. Possibily a problem
+with newcprnc? I'm not sure but since it passes elsewhere, I don't think it's a problem
+in the code.
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_11
+
+Changes answers relative to baseline: Forcing height changes cause answers to change
+
+===============================================================
+===============================================================
+Tag name: clm3_6_11
+Originator(s): dlawren (Lawrence David 1384 CGD)
+Date: Tue Aug 26 21:53:22 MDT 2008
+One-line Summary: Ice stream for snow capped regions
+
+Purpose of changes: Split liquid and ice runoff streams in snow capped situations
+
+Bugs fixed (include bugzilla ID): None
+
+Known bugs (include bugzilla ID): 251 (TwoStream), 672 (3.5.4-3.5.14 diffs),
+ 680 (t0 precip diff for seq-ccsm),
+ 698 (cprnc bug gives false difference), 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 805 (too much output in build-streams)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ http://bugs.cgd.ucar.edu/
+
+Known bugs that will NOT be resolved: 512 (mksurf on PGI), 546(interpinic for DGVM),
+ 652 (threads different answers with older PGI versions)
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: Update version of pft-physiology file used
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: erik
+
+List any svn externals directories updated (csm_share, mct, etc.): scripts and csm_share
+
+ scripts to scripts4_080731
+ csm_share to share3_080801
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml --- Use the same pft-phisiology file for every option
+M models/lnd/clm/src/main/clmtypeInitMod.F90
+M models/lnd/clm/src/main/pft2colMod.F90
+M models/lnd/clm/src/main/clmtype.F90
+M models/lnd/clm/src/main/histFldsMod.F90
+M models/lnd/clm/src/main/models/lnd/clm/src/main/clm_time_manager.F90 -- Hack for fake Gregorian calendar
+M models/lnd/clm/src/riverroute/RtmMod.F90 --------------- two runoff sreams, liq and ice (qflx_snwcp_ice)
+M models/lnd/clm/src/biogeophys/Biogeophysics2Mod.F90 ---- dew snwcp
+M models/lnd/clm/src/biogeophys/Hydrology1Mod.F90 -------- snow and rain split for snwcp
+M models/lnd/clm/src/biogeophys/SoilHydrologyMod.F90 ----- liq snwcp
+M models/lnd/clm/src/biogeophys/Hydrology2Mod.F90 -------- qrgwl minus snwcp_ice
+M models/lnd/clm/src/biogeophys/HydrologyLakeMod.F90 ----- lake snwcp
+M models/lnd/clm/src/biogeophys/BiogeophysicsLakeMod.F90 - initialize snwcp fields to zero for lakes
+M models/lnd/clm/src/biogeophys/BalanceCheckMod.F90 ------ revised balance check
+ >>>>>>>>>> Get scam test working
+M models/lnd/clm/test/system/TSMext_ccsmseq_cam.sh
+M models/lnd/clm/test/system/nl_files/scam
+M models/lnd/clm/test/system/TSCext_ccsmseq_scam.sh
+M models/lnd/clm/test/system/config_files/scam_ds
+M models/lnd/clm/test/system/config_files/ext_ccsm_seq_64x128_s
+M models/lnd/clm/test/system/nl_files/scam
+M models/lnd/clm/test/system/nl_files/scam_prep
+
+Summary of testing:
+
+ bluefire: All PASS except TBL tests
+ lightning/pathscale: All PASS except TBL tests
+ bangkok/lf95: All PASS except TBL tests
+ breeze/gale/hail/gust/ifort: All PASS
+
+ Didn't test on jaguar -- since it was down.
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_10
+
+Changes answers relative to baseline: Yes -- pft-physiology file, RTM changes
+
+===============================================================
+===============================================================
+Tag name: clm3_6_10
+Originator(s): tcraig
+Date: Fri Aug 15 09:05:50 MDT 2008
+One-line Summary: extend rtm tracer, ascale for tri-grids, AIX O3 to O2
+
+Purpose of changes: extend rtm to handle multiple tracers. added
+ second tracer to rtm associated with frozen water. first tracer
+ is now liquid water. both are passed to cpl7 now via the roff and
+ ioff fields.
+
+ add ascale field to land model in support of model running on it's
+ own grid. ascale is a field provided by the coupler to the land model
+ via the driver "domain" datatype. it is needed to correct fluxes
+ in the land model for conservation. it is being applied to the
+ land to rtm fluxes and will need to be fully validated in a ccsm4
+ tri-grid configuration which is still under development.
+
+ change AIX optimization from -O3 to -O2 at request of LMWG. not
+ needed for these changes in particular. see bug #812.
+
+Bugs fixed (include bugzilla ID): 812
+
+Known bugs (include bugzilla ID): 251 (TwoStream), 672 (3.5.4-3.5.14 diffs),
+ 680 (t0 precip diff for seq-ccsm),
+ 698 (cprnc bug gives false difference), 701 (svn keyword)
+ 717 (archiving bug -- only archive 1000 files at a time)
+ 805 (too much output in build-streams)
+ 1079 (rpointer file updated with clm.i files)
+ 1083 (Units of NEE exported should be kg CO2 NOT kg C)
+ http://bugs.cgd.ucar.edu/
+
+Known bugs that will NOT be resolved: 512 (mksurf on PGI), 546(interpinic for DGVM),
+ 652 (threads different answers with older PGI versions)
+
+Describe any changes made to build system:
+ change AIX -O3 to -O2 at request of LMWG, incorporated
+ into tag for convenience.
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes:
+ added extra rtm tracer, should have no noticable impact on timing
+ or memory.
+
+Code reviewed by: tcraig
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+List all files eliminated: none
+
+List all files added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M bld/config_files/Makefile.in
+M src/main/clmtypeInitMod.F90
+M src/main/pft2colMod.F90
+M src/main/clm_atmlnd.F90
+M src/main/clm_csmMod.F90
+M src/main/lnd_comp_mct.F90
+M src/main/domainMod.F90
+M src/main/clmtype.F90
+M src/main/histFldsMod.F90
+M src/riverroute/RtmMod.F90
+M src/riverroute/RunoffMod.F90
+M src/biogeophys/Biogeophysics2Mod.F90
+M src/biogeophys/Hydrology1Mod.F90
+M src/biogeophys/SoilHydrologyMod.F90
+
+- change AIX -O3 to -O2
+- add ascale implementation. add asca field to domain datatype, set
+ for atm and lnd domains. default is 1.0. received from coupler
+ in first run call. reset in lnd only if atm and lnd domain are same.
+- split qflx_snowcap term into qflx_snowcap_rain and qflx_snowcap_snow.
+ snowcap_rain term is same implementation as old snowcap term.
+ snowcap_snow is set to zero now. potential future mods are noted
+ and commented out, search for tcx_snowcap_new in src code.
+- implement multiple tracers extensibility in rtm. add frozen
+ runoff tracer in addition to liquid runoff tracer.
+- set roff and ioff runoff terms in lnd_comp_mct to send back to coupler
+- update rtm restart file, support backward compatability by setting
+ runoff tracers to zero if the new fields are not on the restart file.
+- update history file for new rtm tracers. requires individual fields
+ to be copied from tracer arrays to single field arrays for history
+ interface.
+
+Summary of testing:
+
+ bluefire: all PASS except
+ 073 blL61 TBL.sh _sc_h clm_std 19980101:MONTHLY:1800 1.9x2.5 gx1v5 -10 startup
+ 077 blL62 TBL.sh _sc_m clm_std 19980101:MONTHLY:1800 1.9x2.5 gx1v5 -10 startup
+ 085 blL74 TBL.sh _sc_s clm_std 19980101:6-HOURLY:1800 1x1_brazil navy -10 arb_ic
+ 087 blL78 TBL.sh _sc_s clm_std 19971231:NONE:1800 1x1_brazil navy -10 arb_ic
+ the above 4 bl cases FAIL due to -O3 to -O2 optimzation change
+ 113 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3
+ sm984 fails due to inconsistent driver in test
+ jaguar: all PASS except
+ 038 erP65 TSM_ccsmseq.sh ERS f19_g13 I
+ 039 erP15 TSM_ccsmseq.sh ERS T31_g35 ICN
+ 040 erP66 TSM_ccsmseq.sh ERH f19_g13 I
+ 041 erP16 TSM_ccsmseq.sh ERH T31_g35 ICN
+ 042 erP67 TSM_ccsmseq.sh ERB f19_g13 I
+ 043 erP17 TSM_ccsmseq.sh ERB T31_g35 ICN
+ erP* tests fail due to script error
+ bangkok/lf95: all PASS except
+ 046 sm952 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dh ext_ccsm_seq_cam 48
+ 047 sm984 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3
+ sm952 and sm984 fail due to inconsistent driver in test
+ breeze/gale/hail/gust/ifort: all PASS
+
+ bluefire cam pretag: all PASS except previously documented failures
+ bluefire ccsm4 pretag: all PASS except previously documented failures
+ compare with alpha33 FAILS since rtm not bit-for-bit in some tests.
+
+CLM tag used for the baseline comparison tests if applicable: clm3_6_09
+
+Changes answers relative to baseline:
+ change of AIX -O3 to -O2 changes some results by what ap.........FAIL! rc= 7
+061 smK17 TSM.sh 10p_dgvmsc_h clm_std 19981231:NONE:1800 48x96 gx3v5 -213 arb_ic ................FAIL! rc= 10
+065 blK71 TBL.sh 10p_dgvmsc_s clm_std 19971231:NONE:1800 1x1_brazil navy -670 arb_ic ............FAIL! rc= 5
+088 smL83 TSM.sh _sc_dh clm_std 19980115:DAILY:3600 5x5_amazon navy -10 arb_ic ..................FAIL! rc= 10
+089 erL83 TER.sh _sc_dh clm_std 19980115:DAILY:3600 5x5_amazon navy -5+-5 arb_ic ................FAIL! rc= 5
+090 brL83 TBR.sh _sc_dh clm_std 19980115:DAILY:3600 5x5_amazon navy -10+-10 arb_ic ..............FAIL! rc= 6
+091 blL83 TBL.sh _sc_dh clm_std 19980115:DAILY:3600 5x5_amazon navy -10 arb_ic ..................FAIL! rc= 4
+095 bl711 TBLtools.sh mksurfdata tools__ds namelist .............................................FAIL! rc= 7
+097 bl771 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 7
+ lightning/pathscale:
+------>>>>>>> Bug 694
+011 er111 TER.sh 4p_vodsr_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 13
+012 br111 TBR.sh 4p_vodsr_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 24+24 arb_ic ..............FAIL! rc= 11
+025 smE13 TSM.sh 4p_vodsrsc_do clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ...............FAIL! rc= 10
+026 erE13 TER.sh 4p_vodsrsc_do clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ............FAIL! rc= 5
+027 brE13 TBR.sh 4p_vodsrsc_do clm_std 19981231:YEARLY:1800 48x96 gx3v5 24+24 arb_ic ............FAIL! rc= 5
+029 smE16 TSM.sh 4p_vodsrsc_o clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ................FAIL! rc= 10
+ lightning/ifort:
+004 blA91 TBL.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -10 arb_ic .........................FAIL! rc= 5
+008 blA71 TBL.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ...................FAIL! rc= 5
+------>>>>>>> Bug 694
+010 sm111 TSM.sh 4p_vodsr_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic .................FAIL! rc= 10
+011 er111 TER.sh 4p_vodsr_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 5
+012 br111 TBR.sh 4p_vodsr_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 24+24 arb_ic ..............FAIL! rc= 5
+013 bl111 TBL.sh 4p_vodsr_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic .................FAIL! rc= 4
+014 sm114 TSM.sh 4p_vodsr_h clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ..................FAIL! rc= 10
+016 erE11 TER.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ............FAIL! rc= 13
+017 brE11 TBR.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 24+24 arb_ic ............FAIL! rc= 11
+018 blE11 TBL.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ...............FAIL! rc= 5
+ tempest:
+009 smB11 TSMruncase.sh .........................................................................FAIL! rc= 4
+------>>>>>>> Bug 694
+011 er111 TER.sh 4p_vodsr_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 13
+012 br111 TBR.sh 4p_vodsr_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 24+24 arb_ic ..............FAIL! rc= 11
+036 erE31 TER.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 64x128^360x720 USGS 10+38 arb_ic ....FAIL! rc= 7
+037 brE31 TBR.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 64x128^360x720 USGS 24+24 arb_ic ....FAIL! rc= 6
+045 smH01 TSM.sh 17p_cnnsc_h clm_std^nl_lfiles 19800101:NONE:1800 0.47x0.63 gx1v5@2000 48 startup FAIL! rc= 10
+
+CLM tag used for the baseline comparison tests if applicable: clm3_5_19
+
+Changes answers relative to baseline: Bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_5_19
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Thu Mar 6 14:52:17 MST 2008
+One-line Summary: Change directory structure to mimic CCSM, fix so no NaNS on BGC interpinic output, new half degree CN clmi dataset
+
+Purpose of changes: move cdir1_clm3_5_18 to trunk. Get directory structure to look like CCSM.
+
+ models ------------------------------ Model source code for each component
+ models/lnd
+ models/lnd/clm ---------------------- CLM source code, build-scripts, tools, and testing
+ models/lnd/clm/test/system ---------- CLM test scripts
+ models/lnd/clm/tools ---------------- CLM tools
+ models/lnd/clm/tools/mksurfdata
+ models/lnd/clm/tools/ncl_scripts
+ models/lnd/clm/tools/interpinic
+ models/lnd/clm/tools/mkgriddata
+ models/lnd/clm/tools/mkdatadomain
+ models/lnd/clm/tools/cprnc
+ models/lnd/clm/bld ------------------ CLM build scripts
+ models/lnd/clm/bld/run-ibm.csh ------ sample CLM run script for the IBM
+ models/lnd/clm/bld/urban_input
+ models/lnd/clm/bld/usr.src
+ models/lnd/clm/bld/perl5lib
+ models/lnd/clm/doc ------------------ CLM documentation
+ models/lnd/clm/doc/UsersGuide
+ models/lnd/clm/doc/CodeReference
+ models/lnd/clm/doc/Dev
+ models/lnd/clm/src ------------------ CLM specific source code directories
+ models/lnd/clm/src/biogeochem
+ models/lnd/clm/src/main
+ models/lnd/clm/src/riverroute
+ models/lnd/clm/src/biogeophys
+ models/ocn/socn --------------------- stub ocean model
+ models/ice
+ models/ice/sice --------------------- stub sea-ice model
+ models/atm
+ models/atm/datm7 -------------------- data atmosphere model
+ models/atm/datm7/bld
+ models/utils ------------------------ Utiltiies
+ models/utils/esmf_wrf_timemgr ------- ESMF WRF time-manager API
+ models/utils/timing ----------------- timing utiltities
+ models/utils/mct -------------------- Model Coupling Toolkit
+ models/utils/pio -------------------- Parallel I/O
+ models/drv -------------------------- Sequential CCSM source code
+ models/drv/seq_mct
+ models/drv/seq_mct/driver
+ models/csm_share -------------------- CCSM share code (shared between CCSM component models)
+ scripts ----------------------------- CCSM build, run and testing scripts
+ scripts/README ---------------------- ReadMe file on CCSM scripts
+ doc --------------------------------- CCSM documentation (currently out of date)
+
+ Changes so that interpinic doesn't output NaNS on AIX compiler for CN configuration.
+
+ QUICKSTART: using the new CPL7 scripts:
+
+ cd scripts
+ ./create_newcase # get help on how to run create_newcase
+ ./create_newcase -case testI -mach blueice -res f19_g15 -compset I # create new "I" case for blueice at 1.9x2.5_gx1v5 res
+ # "I" case is clm active, datm7, and inactive ice/ocn
+ cd testI
+ configure -mach blueice # create scripts
+ testI.build # build model and create namelists
+ bsub < testI.run # submit script
+ # (NOTE: edit env_run to set RESUBMIT to number of times to automatically resubmit)
+
+Bugs fixed (include bugzilla ID): 681 (archiving/resub), 696 (save datm7 files) , 707 (xlf90 bug with CAM)
+
+Known bugs (include bugzilla ID): 251 (TwoStream), 672 (3.5.4-3.5.14 diffs), 680 (t0 precip diff for seq-ccsm),
+ 694 (restarts for offline) , 697 (version etc.), 698 (cprnc bug), 701 (svn keyword),
+ 708, (xlf bug on bluevista)
+ http://bugs.cgd.ucar.edu/
+
+ New bugs found: 708 -- bug with new xlf90 compiler on bluevista for CASA
+ 710 -- Some variables are NaNS on clm.i output from CN configuration
+
+ Known bugs that will NOT be resolved: 512 (mksurf on PGI), 546(interpinic for DGVM),
+ 652 (threads different answers with older PGI versions)
+
+Describe any changes made to build system: Changed to work with new directory structure
+ (also works with any wildcard in clm "models/lnd/clm*" directory name)
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: Update half degree CN clmi file
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.): drv (also add in CCSM doc and scripts directories as externals)
+ doc_060127, seqmct45_scripts_080108, drvseq2_0_10
+
+List all files eliminated: Files moved around extensively
+
+List all files added and what they do: Files moved around extensively
+
+List all existing files that have been modified, and describe the changes: Files moved around extensively
+
+ models/lnd/clm/bld/DefaultCLM_INPARM_Namelist.xml --- change CN half degree clmi file
+ models/lnd/clm/bld/clm.cpl6.template ---------------- change assumed paths (use wildcard for models/lnd/clm*)
+ models/lnd/clm/bld/clm.cpl7.template ---------------- change assumed paths (use wildcard for models/lnd/clm*)
+ models/lnd/clm/bld/configure ------------------------ get to work in new directory structure
+ models/lnd/clm/bld/run-ibm.csh ---------------------- fix archiving, and resubmit
+ models/lnd/clm/bld/run-lightning.csh ---------------- fix archiving, and resubmit
+ models/lnd/clm/bld/run-pc.csh ----------------------- fix archiving, and resubmit
+ models/lnd/clm/src/main/clm_time_manager.F90 -------- make save statements explicit
+ models/lnd/clm/tools/*/Makefile --------------------- change so CLM_ROOT is top of directory structure with
+ models/lnd/clm* assumed below
+ models/lnd/clm/tools/interpinic/interpinic.F90 ------ get numrad dimsize, on AIX check for NaNS and convert to spval,
+ if weights == 0 set values to spval
+ models/lnd/clm/test/system/test_driver.sh ----------- new directory structure, update to ccsm3_9_beta03 and ccsm4_0_alpha25
+ models/lnd/clm/test/system/TBL.sh ------------------- new directory structure
+ models/lnd/clm/test/system/TBLtools.sh -------------- new directory structure
+ models/lnd/clm/test/system/TSMncl_tools.sh ---------- new directory structure
+ models/lnd/clm/test/system/TBR.sh ------------------- new directory structure
+ models/lnd/clm/test/system/TER.sh ------------------- new directory structure
+ models/lnd/clm/test/system/TSM.sh ------------------- new directory structure
+ models/lnd/clm/test/system/TSMpergro.sh ------------- new directory structure
+ models/lnd/clm/test/system/TSMtools.sh -------------- new directory structure
+ models/lnd/clm/test/system/TSMcnspinup.sh ----------- new directory structure
+ models/lnd/clm/test/system/TCBext_ccsmseq_cam.sh ---- new directory structure
+ models/lnd/clm/test/system/TCBtools.sh -------------- new directory structure
+ models/lnd/clm/test/system/TCText_ccsmcon.sh -------- new directory structure, add blueice
+
+Summary of testing:
+
+ bluevista: All PASS except
+052 smJ11 TSM.sh 4p_casasc_dh clm_std 19981231:NONE:1800 48x96 gx3v5 48 arb_ic ..................FAIL! rc= 4
+053 erJ11 TER.sh 4p_casasc_dh clm_std 19981231:NONE:1800 48x96 gx3v5 10+38 arb_ic ...............FAIL! rc= 5
+054 brJ11 TBR.sh 4p_casasc_dh clm_std 19981231:NONE:1800 48x96 gx3v5 24+24 arb_ic ...............FAIL! rc= 5
+102 sm982 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ CAM tests: All PASS except:
+060 sm711 TSM.sh h5x8adm adia 9s ..................................FAIL! rc= 6
+ blueice:
+ CPL7 test_scripts: ERS.f19_g15.I.blueice, ERB.f19_g15.I.blueice, ERS.f45_g35.I.blueice
+FAIL ERB.f19_g15.I.blueice
+ CPL6 test_scripts: PASS ERT_OS.f19_g15.I.blueice PASS ERH_OS.T31_g35.ICN.blueice
+ jaguarcnl: All PASS
+ lightning: All PASS except
+019 er112 TER.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 13
+ bangkok/lf95: All PASS except
+18 er112 TER.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 13
+052 sm982 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ tempest: All PASS
+
+CLM tag used for the baseline comparison tests if applicable: ccsm4_alpha25 with clm3_5_18 in place of default clm
+
+Changes answers relative to baseline: No bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_5_18
+Originator(s): erik (Erik Kluzek)
+Date: Thu Feb 21 22:57:39 MST 2008
+One-line Summary: Update to latest seq-ccsm4.alpha tag
+
+Purpose of changes: Get clm trunk to work with latest ccsm4.alpha24 tag
+
+Bugs fixed (include bugzilla ID): 678 (get clm to work with latest cpl7)
+
+Known bugs (include bugzilla ID): 251, 512, 546, 652, 672, 675, 676, 680,
+ 681, 694, 696, 697, 698, 701, 702, 707, 708
+ http://bugs.cgd.ucar.edu/
+
+ New bugs: 697-- username,version,hostname NOT in seq-driver namelists
+ 707-- bug on new xlf90 compiler on bluevista for running with CAM
+ 708-- bug on new xlf90 compiler on bluevista for running with CASA
+
+Describe any changes made to build system: minor change in configure script
+ name of mode ext_cam changed to ext_ccsm_seq in configure script
+
+Describe any changes made to the namelist: Sequential driver namelists change
+
+ccsm_inparm namelist changes to seq_infodata_inparm
+ (remove restart_override, username, version, hostname)
+timemgr_inparm namelist changes to seq_timemgr_inparm
+ (remove restart_overrideTMG, stop_final_ymd [use stop_ymd instead] )
+
+List any changes to the defaults for the boundary datasets:
+ Add 2.65x3.33 datasets, newer 1x1_brazil domain file
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: mvertens (original version on seq branch)
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+drv, datm7, sice, socn, csm_share_, and mct
+
+src/drv drvseq2_0_07
+src/datm7 drva_datm7_070824_tags/drva07_datm7_071129
+src/sice stubs1_0_7
+src/socn stubs1_0_7
+src/csm_share drva_share3_070903_tags/loga25_share3_071107
+src/utils/mct seqa_MCT2 _3_0_070524_tags/seqa07_MCT2_4_2_071026
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+A + bld/ExtSeqCCSMDrvInNamelistsDescriptions.xml -- for moving drv_in namelist items
+A + bld/clm.cpl7.template ------------------------- for running with cpl7
+
+List all existing files that have been modified, and describe the changes:
+
+------------- Get external CAM tests working, and with changes to seq-ccsm
+M test/system/TSMext_ccsmseq_cam.sh
+M test/system/test_driver.sh
+M test/system/tests_posttag_hybrid_regression
+M test/system/tests_posttag_purempi_regression
+M test/system/nl_files/scam
+M test/system/nl_files/scam_prep
+M test/system/nl_files/ext_ccsm_seq_cam
+M test/system/TSM.sh
+
+------------- Now need clm_varpar.F90 in tools
+M tools/mksurfdata/Srcfiles
+M tools/mkgriddata/Srcfiles
+
+------------- Add ext_ccsm_seq, add 2.65x3.33 datasets, change for new seq-ccsm namelists
+ switch 1x1_brazil domain file
+M bld/configure
+M bld/DefaultCLM_INPARM_Namelist.xml
+M bld/run-ibm.csh
+M bld/clm_inparm.pm
+M bld/DefaultTIMEMGR_INPARM_Namelist.xml
+M bld/sample.seqccsm.namelists
+M bld/run-pc.csh
+M bld/timemgr_inparm.pm
+M bld/DefaultCCSM_INPARM_Namelist.xml
+M bld/drv_in.pm
+M bld/run-lightning.csh
+M bld/mkSrcfiles
+M bld/SeqCCSM_namelist.pm
+M bld/ccsm_inparm.pm
+M bld/SeqCCSMDrvInNamelistsDescriptions.xml
+M bld/DefaultDATM_DSHR_NML_Namelist.xml
+
+------------- Don't allow seq_ccsm datatypes to go below lnd_comp_mct, fix scam
+M src/main/clm_comp.F90 ------------- Remove SyncClock, CCSMInit
+M src/main/driver.F90 --------------- Remove SyncClock, CCSMInit
+M src/main/decompInitMod.F90 -------- Use endrun rather than shr_sys_abort
+M src/main/ncdio.F90 ---------------- fixes for scam
+M src/main/atmdrvMod.F90 ------------ remove unneeded printing
+M src/main/clmtypeInitMod.F90 ------- explicit use only's
+M src/main/initializeMod.F90 -------- remove CCSMInit and EClock
+M src/main/controlMod.F90 ----------- move initialization to timemgr/clm_varctl
+ set methods
+M src/main/clm_time_manager.F90 ----- Make namelist input private, add set method
+M src/main/clm_varctl.F90 ----------- Add set and initialization methods
+M src/main/clm_varorb.F90 ----------- Remove values not needed
+M src/main/lnd_comp_mct.F90 --------- Update to new structures/logic
+ On time-step 0 also advance to time-step 1
+M src/main/program_off.F90 ---------- Move orbital info/dtime to this level
+M src/main/spmdMod.F90 -------------- Change print format
+M src/biogeophys/UrbanInputMod.F90 -- Initialize filename
+
+Summary of testing:
+
+ bluevista:
+004 blA71 TBL.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ...................FAIL! rc= 5
+008 blA91 TBL.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -10 arb_ic .........................FAIL! rc= 5
+011 blD91 TBL.sh _persc_ds clm_per 19981231:YEARLY:1200 4x5 gx3v5 144 arb_ic ....................FAIL! rc= 5
+014 blG71 TBL.sh 17p_sc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic .........FAIL! rc= 5
+016 blH71 TBL.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic ......FAIL! rc= 5
+021 blE11 TBL.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ...............FAIL! rc= 5
+026 blF27 TBL.sh 17p_vodsrsc_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ..............FAIL! rc= 5
+031 blE31 TBL.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 64x128^360x720 USGS 48 arb_ic
+.......FAIL! rc= 5
+034 blCA1 TBL.sh _sc_ds clm_std^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 5
+036 blNB1 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 5
+040 blG41 TBL.sh 17p_sc_dh clm_pftdyn 10001230:MONTHLY:1800 10x15 USGS 48 arb_ic ................FAIL! rc= 5
+046 blH11 TBL.sh 17p_cnnsc_dh clm_std 19980101:MONTHLY:1800 48x96 gx3v5@1890 48 arb_ic ..........FAIL! rc= 5
+050 blH52 TBL.sh 17p_cnnsc_dm clm_std 19980115:MONTHLY:1800 10x15 USGS@1890 48 arb_ic ...........FAIL! rc= 5
+052 smJ11 TSM.sh 4p_casasc_dh clm_std 19981231:NONE:1800 48x96 gx3v5 48 arb_ic ..................FAIL! rc= 4 <<<< bug 708
+053 erJ11 TER.sh 4p_casasc_dh clm_std 19981231:NONE:1800 48x96 gx3v5 10+38 arb_ic ...............FAIL! rc= 5 <<<< bug 708
+054 brJ11 TBR.sh 4p_casasc_dh clm_std 19981231:NONE:1800 48x96 gx3v5 24+24 arb_ic ...............FAIL! rc= 5 <<<< bug 708
+055 blJ11 TBL.sh 4p_casasc_dh clm_std 19981231:NONE:1800 48x96 gx3v5 48 arb_ic ..................FAIL! rc= 4
+059 blK11 TBL.sh 10p_dgvmsc_dh clm_std 19981231:NONE:1800 48x96 gx3v5 48 arb_ic .................FAIL! rc= 5
+064 blK71 TBL.sh 10p_dgvmsc_s clm_std 19971231:NONE:1800 1x1_brazil navy -670 arb_ic ............FAIL! rc= 5
+068 blL53 TBL.sh _sc_dh clm_std^nl_crcrop 19980115:MONTHLY:1800 10x15 USGS 24 arb_ic ............FAIL! rc= 5
+072 blL63 TBL.sh _sc_h clm_std 19980101:MONTHLY:1800 1.9x2.5 gx1v5 -10 startup ..................FAIL! rc= 5
+076 bl563 TBL.sh _h clm_std 19980101:MONTHLY:1800 1.9x2.5 gx1v5 -10 startup .....................FAIL! rc= 5
+080 blL52 TBL.sh _sc_ds clm_std 19980115:MONTHLY:1800 10x15 USGS 24 arb_ic ......................FAIL! rc= 5
+084 blL73 TBL.sh _sc_s clm_std 19980101:6_HOURLY:1800 1x1_brazil navy -10 arb_ic ................FAIL! rc= 5
+089 blL83 TBL.sh _sc_dh clm_std 19980115:DAILY:3600 5x5_amazon navy -10 arb_ic ..................FAIL! rc= 5
+101 sm921 TSMext_ccsmseq_cam.sh ext_ccsm_seq_4x5_dh ext_ccsm_seq_cam 48 .........................FAIL! rc= 4 <<<<< bug 707
+102 sm982 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4 <<<<<< bug 707
+ CAM tests all PASS except
+060 sm711 TSM.sh h5x8adm adia 9s ..................................FAIL! rc= 6
+062 sc001 TSC.sh e64bfbiop scm_prep scm64bfbiop scm_b4b_o1 7s .....FAIL! rc= 5
+
+First was a Build-namelist error, next was core-dump.
+
+ tempest:
+004 blA71 TBL.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ...................FAIL! rc= 5
+007 blD91 TBL.sh _persc_ds clm_per 19981231:YEARLY:1200 4x5 gx3v5 144 arb_ic ....................FAIL! rc= 5
+010 blE11 TBL.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ...............FAIL! rc= 5
+014 blA92 TBL.sh _sc_dm clm_std 19990101:NONE:3600 4x5 gx3v5 -10 arb_ic .........................FAIL! rc= 5
+016 blG71 TBL.sh 17p_sc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic .........FAIL! rc= 5
+018 blH71 TBL.sh 17p_cnnsc_ds clm_pftdyn 10001230:NONE:3600 1x1_tropicAtl test -100 arb_ic ......FAIL! rc= 5
+ lightning/pathscale:
+004 blA91 TBL.sh _sc_dh clm_std 19990101:NONE:3600 4x5 gx3v5 -10 arb_ic .........................FAIL! rc= 5
+008 blA71 TBL.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ...................FAIL! rc= 5
+011 blCA1 TBL.sh _sc_ds clm_std^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 5
+013 blCA2 TBL.sh _sc_ds clm_std^nl_urb 19971231:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 5
+015 blNB1 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 5
+017 blOC1 TBL.sh _vansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 5
+019 er112 TER.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 13 <<<< bug 694
+026 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:MONTHLY:1800 10x15 USGS 48 arb_ic ................FAIL! rc= 5
+031 blH52 TBL.sh 17p_cnnsc_dm clm_std 19980115:MONTHLY:1800 10x15 USGS@1890 48 arb_ic ...........FAIL! rc= 5
+035 blK51 TBL.sh 10p_dgvmsc_dm clm_std 19981231:NONE:1800 10x15 USGS 48 arb_ic ..................FAIL! rc= 5
+039 blL51 TBL.sh _sc_dh clm_std 19980115:MONTHLY:1800 10x15 USGS 48 arb_ic ......................FAIL! rc= 5
+043 blL53 TBL.sh _sc_dh clm_std^nl_crcrop 19980115:MONTHLY:1800 10x15 USGS 24 arb_ic ............FAIL! rc= 5
+047 blL73 TBL.sh _sc_s clm_std 19980101:6_HOURLY:1800 1x1_brazil navy -10 arb_ic ................FAIL! rc= 5
+ jaguarcnl:
+008 blA92 TBL.sh _sc_dm clm_std 19990101:NONE:3600 4x5 gx3v5 -10 arb_ic .........................FAIL! rc= 7
+012 blE12 TBL.sh 4p_vodsrsc_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ...............FAIL! rc= 7
+016 blE32 TBL.sh 4p_vodsrsc_dm clm_std 19981231:YEARLY:1800 64x128^360x720 USGS 48 arb_ic .......FAIL! rc= 7
+020 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:MONTHLY:1800 10x15 USGS 48 arb_ic ................FAIL! rc= 7
+024 blH12 TBL.sh 17p_cnnsc_dm clm_std 19980101:MONTHLY:1800 48x96 gx3v5@1890 48 arb_ic ..........FAIL! rc= 7
+028 blJ12 TBL.sh 4p_casasc_dm clm_std 19981231:NONE:1800 48x96 gx3v5 48 arb_ic ..................FAIL! rc= 7
+ bangkok/lf95:
+004 blA92 TBL.sh _sc_dm clm_std 19990101:NONE:3600 4x5 gx3v5 -10 arb_ic .........................FAIL! rc= 7
+008 blA71 TBL.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ...................FAIL! rc= 7
+011 blD91 TBL.sh _persc_ds clm_per 19981231:YEARLY:1200 4x5 gx3v5 144 arb_ic ....................FAIL! rc= 7
+014 blCA2 TBL.sh _sc_ds clm_std^nl_urb 19971231:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 7
+016 blOC1 TBL.sh _vansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 7
+018 er112 TER.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 13 <<<<<< 694
+025 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:MONTHLY:1800 10x15 USGS 48 arb_ic ................FAIL! rc= 7
+030 blH52 TBL.sh 17p_cnnsc_dm clm_std 19980115:MONTHLY:1800 10x15 USGS@1890 48 arb_ic ...........FAIL! rc= 7
+034 blJ12 TBL.sh 4p_casasc_dm clm_std 19981231:NONE:1800 48x96 gx3v5 48 arb_ic ..................FAIL! rc= 7
+038 blK51 TBL.sh 10p_dgvmsc_dm clm_std 19981231:NONE:1800 10x15 USGS 48 arb_ic ..................FAIL! rc= 7
+042 blL51 TBL.sh _sc_dh clm_std 19980115:MONTHLY:1800 10x15 USGS 48 arb_ic ......................FAIL! rc= 7
+047 blL73 TBL.sh _sc_s clm_std 19980101:6_HOURLY:1800 1x1_brazil navy -10 arb_ic ................FAIL! rc= 7
+052 sm982 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+
+TBL _sc tests fail because of addition of area-corrected fluxes and addition of running
+ time-step 1 when time-step 0 is done on initialization.
+er111/112 tests fail due to previous bug 694
+sm921/982 test fail on bluevista due to new bug 707 on (compiler bug on bluevista)
+
+CLM tag used for the baseline comparison tests if applicable: clm3_5_17
+
+Changes answers relative to baseline: Yes -- greater than roundoff
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: Any mode with sequential-CCSM
+ - what platforms/compilers: ALL
+ - nature of change: larger than roundoff
+
+Fluxes in driver are corrected by ratio's of area's from different components.
+Also at time-step 0 you also run time-step 1 -- rather than just time-step 0.
+
+===============================================================
+===============================================================
+Tag name: clm3_5_17
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Wed Feb 6 10:10:17 MST 2008
+One-line Summary: Merge Tony Craig's FMI branch fmi12_clm3_5_16 to the clm trunk
+
+Purpose of changes: Reducing the debug level in some initialization routines, fixing a few diagnostics,
+ updating timers, improve the write_diagnostic performance, update of rtm init to improve scaling and performance.
+
+Bugs fixed (include bugzilla ID): 597
+
+Known bugs (include bugzilla ID): 251, 512, 546, 652, 672, 675, 676, 680, 681, 694, 696, 698, 701, 702
+ http://bugs.cgd.ucar.edu/
+
+ New bugs found: datm7 restart files NOT being archived (696), cprnc found to have problems (698),
+ version autoinsertion in tools (701), test_driver times out on jaguar (702)
+
+Describe any changes made to build system: Add BUILDPIO CPP variable
+
+Describe any changes made to the namelist: Add new namelist variables dealing with PIO (see below)
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: erik,tcraig
+
+List any svn externals directories updated (csm_share, mct, etc.): pio to pio11_prod
+
+List all files eliminated: None
+
+List all files added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M doc/UpDateChangeLog.pl ----------------- Tweak update of date to work correctly for both files
+M bld/run-frost.csh ---------------------- Tony gets working, add PIO build as commented out, turn querys off, put files
+ in explicitly, tests new ncd_ options
+M src/biogeochem/STATICEcosysDynMod.F90 -- Add timers
+M src/main/clm_comp.F90 ------------------ Add timers
+M src/main/driver.F90 -------------------- Add timers, add barrier for diagnostics write, change send/recv into reduce (leave option for old code)
+M src/main/decompInitMod.F90 ------------- Reduce debug level for initialization
+M src/main/initializeMod.F90 ------------- Add timers
+M src/main/histFileMod.F90 --------------- PIO option
+M src/main/ncdio.F90 --------------------- Work for PIO, new options
+M src/main/gen_ncdio_global_subs.csh ----- Work for PIO, new options
+M src/main/gen_ncdio_local_subs.csh ------ Work for PIO, new options
+M src/main/controlMod.F90 ---------------- Add new namelist items
+
+History experimental options (mostly for PIO which isn't fully implemented yet)
+
+ o hist_pioflag = logical true if want to turn on hist with pio [.FALSE., .TRUE.]
+ o ncd_lowmem2d = logical true if want to turn on low memory 2d writes in clm hist [.TRUE., .FALSE.]
+ o ncd_pio_def = logical true if want default pio use setting [.FALSE., .TRUE.]
+ o ncd_pio_UseRearranger = logical true if want to use MCT as Rearranger [.TRUE., .FALSE.]
+ o ncd_pio_UseBoxRearr = logical true if want to use box as Rearranger [.FALSE., .TRUE.]
+ o ncd_pio_SerialCDF = logical true if want to write with pio serial netcdf mode [.FALSE., .TRUE.]
+ o ncd_pio_IODOF_rootonly = logical true if want to write history in pio from root only [.FALSE., .TRUE.]
+ o ncd_pio_DebugLevel = integer pio debug level ( default 2)
+ o ncd_pio_num_iotasks = integer number of iotasks to use for PIO (default all PEs)
+
+M src/main/clm_varctl.F90 ----------------- New ncd and PIO history options
+M src/main/program_off.F90 ---------------- Add mpi barrier
+M src/main/areaMod.F90 -------------------- Improve performance/robustness
+M src/main/clm_mct_mod.F90 ---------------- Use pelocs
+M src/riverroute/RtmMod.F90 --------------- Add timers, update of rtm init to improve scaling and performance
+M test/system/test_driver.sh -------------- Fix for new account names on jaguar/phoenix
+
+Summary of testing:
+
+ bluevista: All PASS except
+021 blE11 TBL.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ...............FAIL! rc= 7
+026 blF27 TBL.sh 17p_vodsrsc_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ..............FAIL! rc= 7
+031 blE31 TBL.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 64x128^360x720 USGS 48 arb_ic .......FAIL! rc= 7
+101 sm921 TSMext_ccsmseq_cam.sh ext_ccsm_seq_4x5_dh ext_ccsm_seq_cam 48 .........................FAIL! rc= 4
+102 sm982 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ jaguarcnl: All PASS except
+012 blE12 TBL.sh 4p_vodsrsc_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ...............FAIL! rc= 7
+016 blE32 TBL.sh 4p_vodsrsc_dm clm_std 19981231:YEARLY:1800 64x128^360x720 USGS 48 arb_ic .......FAIL! rc= 7
+ bangkok/lf95: All PASS except
+018 er112 TER.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 13
+020 bl112 TBL.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic .................FAIL! rc= 7
+051 sm951 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dm ext_ccsm_seq_cam 48 .......................FAIL! rc= 4
+052 sm982 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ lightning/pathscale: All PASS except
+019 er112 TER.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 13
+021 bl112 TBL.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic .................FAIL! rc= 7
+ tempest: All PASS except
+010 blE11 TBL.sh 4p_vodsrsc_dh clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ...............FAIL! rc= 7
+
+er112, bl112, sm921, sm951, sm982 tests failed previously
+other bl tests fail because of the changes in RTM
+
+CLM tag used for the baseline comparison tests if applicable: clm3_5_16
+
+Changes answers relative to baseline: Only RTM
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers:
+ - what code configurations: RTM
+ - what platforms/compilers: All
+ - nature of change: Roundoff change
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? Only fields with RTM show differences and only to roundoff level
+
+ RMS DVOLRDT_ 8.8031E-22
+ RMS DVOLRDT_ 3.4573E-23
+ RMS QCHANR 3.6282E-16
+ RMS QCHOCNR 5.4893E-17
+
+The above is on bluevista after running for a day (other fields show RMS difference of zero)
+
+===============================================================
+===============================================================
+Tag name: clm3_5_16
+Originator(s): erik (Erik Kluzek)
+Date: Mon Jan 28 15:00:53 MST 2008
+One-line Summary: Get point version of Urban code onto trunk (urban code can not restart)
+
+Purpose of changes: Move urban branch onto trunk. Fix bug so hv files are saved. Add
+ high resolution datasets from Art Mirin.
+
+ Urban code was started by Gordon Bonan, and taken up by Mariana Vertenstein and Keith Oleson.
+ This represents work that has been ongoing for several years. Revision dates go back to
+ before 2003.
+
+ Some papers on the work are available from:
+
+ Oleson et.-al. Journal of Applied Meteorology and Climatology, in-Press as of Jan/2008
+
+ http://www.cgd.ucar.edu/tss/staff/oleson/publications/JAMC1597_rev_jul27_2007.pdf
+ http://www.cgd.ucar.edu/tss/staff/oleson/publications/JAMC1598_rev_jul27_2007.pdf
+
+Bugs fixed (include bugzilla ID): 644 (save hv files)
+
+Known bugs (include bugzilla ID): 251, 512, 546, 652, 672, 675, 676, 680, 681, 694
+ http://bugs.cgd.ucar.edu/
+
+ New bug found from clm3_5_15 (694) -- restarts are NOT bit-for-bit on lightning and bangkok/lf95 for offline
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: Added furbinp -- for urban datasets
+
+List any changes to the defaults for the boundary datasets: Added more urban datasets.
+ Add 0.23x0.31 datasets from Art Mirin.
+
+Describe any substantial timing or memory changes: Approx 1% slower, memory should be very close
+
+Code reviewed by: oleson
+
+List any svn externals directories updated (csm_share, mct, etc.): bld/archiving
+ bld/archiving to scripts_080108
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+------ Urban point input datasets (ASCII)
+A bld/urban_input/asphaltjungle_fluxes.txt
+A bld/urban_input/metropolis_fluxes.txt
+A bld/urban_input/mexicocityMEX_fluxes.txt
+A bld/urban_input/vancouverCAN_fluxes.txt
+----- Main urban source codes
+A src/biogeophys/UrbanInitMod.F90
+A src/biogeophys/UrbanInputMod.F90
+A src/biogeophys/UrbanMod.F90
+----- Add testing for urban code
+A test/system/tests_posttag_urban ------ List of urban point tests
+A test/system/nl_files/clm_urb1pt ------ Namelist options for CLM1PT datasets
+A test/system/nl_files/nl_urb ---------- Urban namelist
+A test/system/config_files/_mexsc_ds --- Mexicocity, MEX
+A test/system/config_files/_vansc_ds --- Vancouver, CAN
+----- Add tool to convert Urban point datasets to sequential-CCSM mode for datm7
+A tools/ncl_scripts/convertUrbanOffline2Seq.ncl
+
+List all existing files that have been modified, and describe the changes:
+
+-------- Add in urban datasets to build-namelist
+M bld/configure ---------------------------- Move subroutine definition to before first reference
+M bld/datm_dshr_in.pm
+M bld/clm_inparm.pm
+M bld/datm.streams.template.xml ------------ Add in CLM1PT datasets for Urban
+M bld/DefaultTIMEMGR_INPARM_Namelist.xml
+M bld/DefaultSettings.xml
+M bld/DefaultDATM_DSHR_NML_Namelist.xml
+M bld/DefaultCLM_INPARM_Namelist.xml
+M bld/timemgr_inparm.pm
+M bld/run-pc.pm ---------------------------- Remove extra line, set mode in configure, add note about step=coupling step
+M bld/run-ibm.pm --------------------------- Add note about step=coupling step
+M bld/run-lightning.pm --------------------- Add note about step=coupling step
+-------- source code changes to add in urban code
+-------- mostly adding urban and non-urban filters
+M src/biogeochem/DGVMMod.F90 --------------- Add urban filters
+M src/main/atmdrvMod.F90 ------------------- Add RH and rainf, zero out solar if coszen<0, Urban pt CPPs
+M src/main/clm_varcon.F90 ------------------ Add PI, RGAS, SECSPDAY, urban PFT types, urban ponding depth
+M src/main/clm_varpar.F90 ------------------ Add maxpatch_urb for 5 PFT's
+M src/main/clm_atmlnd.F90 ------------------ Fill RH and rainf
+M src/main/clmtype.F90 --------------------- Add urban state data
+M src/main/clmtypeInitMod.F90 -------------- Initialize urban state data
+M src/main/controlMod.F90 ------------------ Add furbinp namelist item for urban input data
+M src/main/driver.F90 ---------------------- Pass urban filters, call urban modules
+M src/main/filterMod.F90 ------------------- Add urban filters
+M src/main/histFileMod.F90 ----------------- Add scale types needed for urban which needs to calculate area-averages based on urban input
+M src/main/histFldsMod.F90 ----------------- Add new output fields:
+
+ BUILDHEAT heat flux from urban building interior to walls and roof W/m^2 active
+ LWdown atmospheric longwave radiation W/m^2
+ PSurf surface pressure Pa
+ Qh sensible heat W/m^2
+ Qle total evaporation W/m^2
+ Qstor storage heat flux (includes snowmelt) W/m^2
+ RH atmospheric relative humidity %
+ Rainf atmospheric rain mm/s
+ Rnet net radiation W/m^2
+ SWdown atmospheric incident solar radiation W/m^2
+ TBUILD internal urban building temperature K active
+ TRAFFICFLUX sensible heat flux from urban traffic W/m^2 active
+ Tair atmospheric air temperature K
+ WASTEHEAT sensible heat flux from heating/cooling sources of urban waste heat W/m^2 active
+
+(Fields not mentioned above as active are set to inactive unless asked for. They are "ALMA" variables needed for
+ an urban model intercomparision project.)
+
+M src/main/iniTimeConst.F90 ---------------- Initialize urban data
+M src/main/initGridCellsMod.F90 ------------ Add initialization of urban landunits
+M src/main/initSurfAlbMod.F90 -------------- Call urban albedo calc
+M src/main/initializeMod.F90 --------------- Urban initialization
+M src/main/lnd_comp_mct.F90 ---------------- Add saturation vapor calc to compute RH
+M src/main/mkarbinitMod.F90 ---------------- Initialize urban state
+M src/main/pftvarcon.F90 ------------------- Fix typo
+M src/main/program_off.F90 ----------------- Pass declination angle from orbit to atmdrv (so solar can be nullified for coszen<0)
+M src/main/subgridAveMod.F90 --------------- Setup grid info for urban
+M src/main/subgridMod.F90 ------------------ Set urban landunit
+M src/main/surfrdMod.F90 ------------------- Initialize urban weights -- remove old code that aborted if urban fraction>0
+M src/biogeophys/BalanceCheckMod.F90 ------- Incoming rain does NOT include sun or shade wall, some checks only non-urban
+M src/biogeophys/Biogeophysics1Mod.F90 ----- Take into account type of urban column
+M src/biogeophys/Biogeophysics2Mod.F90 ----- Take into account type of urban column
+M src/biogeophys/FrictionVelocityMod.F90 --- Change index and filters
+M src/biogeophys/Hydrology1Mod.F90 --------- Take into account no water flow through urban buildings and impervious road
+M src/biogeophys/Hydrology2Mod.F90 --------- Send urban filters down, and no water flow in certain urban column types
+M src/biogeophys/SnowHydrologyMod.F90 ------ Urban similar to bare-soil landunit
+M src/biogeophys/SoilHydrologyMod.F90 ------ Determine ponding limits for urban roof and impervious road, no runoff for sun/shade wall
+M src/biogeophys/SoilTemperatureMod.F90 ---- Take into account that urban columns interact
+M src/biogeophys/SurfaceAlbedoMod.F90 ------ Filter urban columns appropriately
+M src/biogeophys/SurfaceRadiationMod.F90 --- Filter urban columns out
+---------- Make MPI and OpenMP settings explicit in configuration files
+M test/system/config_files/17p_vodsr_dm
+M test/system/config_files/17p_vodsr_do
+M test/system/config_files/4p_casa_m
+M test/system/config_files/4p_casa_o
+M test/system/config_files/17p_vodsr_m
+M test/system/config_files/17p_vodsr_o
+M test/system/config_files/4p_vodsr_dm
+M test/system/config_files/17p_cnn_m
+M test/system/config_files/4p_vodsr_do
+M test/system/config_files/17p_cnn_o
+M test/system/config_files/17p_cnn_dm
+M test/system/config_files/17p_cnn_do
+M test/system/config_files/10p_dgvm_m
+M test/system/config_files/4p_casa_dm
+M test/system/config_files/10p_dgvm_o
+M test/system/config_files/4p_casa_do
+M test/system/config_files/10p_dgvm_dm
+M test/system/config_files/README
+M test/system/config_files/10p_dgvm_do
+M test/system/config_files/4p_vodsr_m
+M test/system/config_files/4p_vodsr_o
+---------- Add urban tests to testing system
+M test/system/input_tests_master
+M test/system/README.testnames
+M test/system/mknamelist
+M test/system/test_driver.sh
+M test/system/tests_posttag_bangkok
+M test/system/tests_posttag_blueice
+M test/system/tests_posttag_lightning
+M test/system/tests_posttag_hybrid_regression
+M test/system/tests_posttag_purempi_regression
+M test/system/tests_pretag_bangkok
+M test/system/tests_pretag_bluevista
+---------- Put options on separate lines, explicitly set source
+M test/system/nl_files/clm_pftdyn
+M test/system/nl_files/clm_per
+M test/system/nl_files/clm_per0
+M test/system/nl_files/clm_std
+---------- Add note about need of other directories to build
+M tools/ncl_scripts/README ---------------------- Also add note about new script
+M tools/mksurfdata/README
+M tools/ncl_scripts/README
+M tools/interpinic/README
+M tools/mkgriddata/README
+M tools/mkdatadomain/README
+
+
+Summary of testing:
+
+ bluevista: All PASS except
+034 blCA1 TBL.sh _sc_ds clm_std^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 5
+036 blNB1 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 5
+076 bl563 TBL.sh _h clm_std 19980101:MONTHLY:1800 1.9x2.5 gx1v5 -10 startup .....................FAIL! rc= 7
+101 sm921 TSMext_ccsmseq_cam.sh ext_ccsm_seq_4x5_dh ext_ccsm_seq_cam 48 .........................FAIL! rc= 4
+102 sm982 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ tempest: ALL PASS
+ jaguarcnl: All PASS except TBL tests which fail because of a problem with the previous version on jaguar.
+004 blA71 TBL.sh _sc_ds clm_std 19990101:NONE:3600 1x1_brazil navy -10 arb_ic ...................FAIL! rc= 4
+008 blA92 TBL.sh _sc_dm clm_std 19990101:NONE:3600 4x5 gx3v5 -10 arb_ic .........................FAIL! rc= 5
+012 blE12 TBL.sh 4p_vodsrsc_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic ...............FAIL! rc= 5
+016 blE32 TBL.sh 4p_vodsrsc_dm clm_std 19981231:YEARLY:1800 64x128^360x720 USGS 48 arb_ic .......FAIL! rc= 5
+020 blG42 TBL.sh 17p_sc_dm clm_pftdyn 10001230:MONTHLY:1800 10x15 USGS 48 arb_ic ................FAIL! rc= 5
+024 blH12 TBL.sh 17p_cnnsc_dm clm_std 19980101:MONTHLY:1800 48x96 gx3v5@1890 48 arb_ic ..........FAIL! rc= 5
+028 blJ12 TBL.sh 4p_casasc_dm clm_std 19981231:NONE:1800 48x96 gx3v5 48 arb_ic ..................FAIL! rc= 5
+032 blK12 TBL.sh 10p_dgvmsc_dm clm_std 19981231:NONE:1800 48x96 gx3v5 48 arb_ic .................FAIL! rc= 5
+ bangkok/lf95: All PASS except
+014 blCA2 TBL.sh _sc_ds clm_std^nl_urb 19971231:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 5
+016 blOC1 TBL.sh _vansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 5
+018 er112 TER.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 13 <<<<
+020 bl112 TBL.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic .................FAIL! rc= 7
+051 sm951 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dm ext_ccsm_seq_cam 48 .......................FAIL! rc= 4
+052 sm982 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+
+ lightning/pathf90: All PASS except
+011 blCA1 TBL.sh _sc_ds clm_std^nl_urb 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic ..........FAIL! rc= 5
+013 blCA2 TBL.sh _sc_ds clm_std^nl_urb 19971231:NONE:3600 1x1_asphaltjungleNJ navy -90 arb_ic ...FAIL! rc= 5
+015 blNB1 TBL.sh _mexsc_ds clm_urb1pt^nl_urb 19931201:NONE:3600 1x1_mexicocityMEX navy 159 arb_ic FAIL! rc= 5
+017 blOC1 TBL.sh _vansc_ds clm_urb1pt^nl_urb 19920812:NONE:3600 1x1_vancouverCAN navy 330 arb_ic FAIL! rc= 5
+019 er112 TER.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 10+38 arb_ic ..............FAIL! rc= 13 <<<<
+021 bl112 TBL.sh 4p_vodsr_dm clm_std 19981231:YEARLY:1800 48x96 gx3v5 48 arb_ic .................FAIL! rc= 7
+
+
+Urban TBL point tests do NOT pass because previous model version didn't have urban enabled.
+cam standalone tests require the ccsm4_alpha series version of clm.
+<<<<< Tests are the 694 bug found in clm3_5_15.
+
+
+CLM tag used for the baseline comparison tests if applicable: clm3_5_15
+
+Changes answers relative to baseline: None bit-for-bit
+ (except albedo's will be different when running in offline mode see below)
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All mode=offline
+ - what platforms/compilers: All
+ - nature of change: Solar now set to zero when coszen<0, which influences
+ some non-common instances near twilight when the dataset shows solar>0
+ but coszen<0. It doesn't change the model results -- because everything is
+ reflected anyway, but it will change how albedo is averaged over those periods.
+
+===============================================================
+===============================================================
+Tag name: clm3_5_15
+Originator(s): erik (Erik Kluzek)
+Date: Fri Dec 21 20:33:01 MST 2007
+One-line Summary: Fix interpinic for half degree grid, add in large-file support, allow configure to work with ccsm directory structure
+
+Purpose of changes:
+
+Change configure so it will work with the ccsm4_alpha directory structure (especially for
+the test suite). Add in large-file support for main code as well as tools (mksurfdata).
+Add in tests for create_croplandunit and large-file support. Get interpinic to work for
+half degree, and optimize and verify it's use of Open-MP. Add tool to interpolate
+Nitrogen deposition files (ndepregrid.ncl). Update run scripts with suggestions from Sam
+(and fix a couple of minor bugs).
+
+interpinic problem: Previous version may have incorrectly found nearest points for PFT data.
+ Data would have been valid -- but possibly NOT from the nearest point.
+ There was also a potential Open-MP problem where answers could change depending on the
+ number of threads used. The new version corrects both of these problems. The new version
+ should be used to interpolate critical datasets.
+
+Bugs fixed (include bugzilla ID): 656 (interpinic), 660 (large-file), 674 (diff -q in run script), 679 (testing task/thread change)
+
+Known bugs (include bugzilla ID): 251, 512, 546, 652, 664, 672, 675, 676
+ http://bugs.cgd.ucar.edu/
+
+Describe any changes made to build system: Remove mpi include/lib for jaguarcnl
+ (as already included with the ftn command)
+
+ Make ccsm_seq -- the default way to run.
+
+Describe any changes made to the namelist: Add outnc_large_files option
+
+ outnc_large_files --- TRUE => use NetCDF 64-bit large file format for output files
+ (history and restart files)
+
+ The NetCDF 64-bit large file format became available in NetCDF3.6.0 and allows larger dimensions as well as allowing
+ output files > 2 GBytes. For more info. on Large File Support (LFS) for NetCDF see...
+
+ http://www.unidata.ucar.edu/software/netcdf/docs/faq.html#lfs
+
+ Since, file offsets are stored with 64-bit words rather than 32-bit words -- file sizes may change slightly with LFS.
+
+List any changes to the defaults for the boundary datasets:
+
+ Added in new clmi files:
+
++lnd/clm2/initdata/clmi.BCN.1980-01-01-00000.071207.nc
++lnd/clm2/initdata/clmi.F_0000-01-01_1.9x2.5_gx1v5_c071203.nc
++lnd/clm2/initdata/clmi.F_0000-09-01_1.9x2.5_gx1v5_c071203.nc
+
+ Added in ndep files at half degree
+
++lnd/clm2/ndepdata/ndep_clm_2100_0.47x0.63_c071213.nc
++lnd/clm2/ndepdata/ndep_clm_2000_0.47x0.63_c071213.nc
++lnd/clm2/ndepdata/ndep_clm_1890_0.47x0.63_c071213.nc
++lnd/clm2/ndepdata/fndep_clm_1890-2100_0.47x0.63_c071213.nc
+
+ Add documentation and delete extra variables from T42 base ndep datasets
+
++lnd/clm2/ndepdata/ndep_clm_2100_64x128_c071221.nc
++lnd/clm2/ndepdata/ndep_clm_2000_64x128_c071221.nc
++lnd/clm2/ndepdata/ndep_clm_1890_64x128_c071221.nc
+
+ Added in urban testing dataset
+
++lnd/clm2/surfdata/surfdata_1x1pt_camdenNJ_navy_070824.nc
++lnd/clm2/griddata/griddata_1x1pt_camdenNJ_navy_070824.nc
++lnd/clm2/griddata/fracdata_1x1pt_camdenNJ_navy_070824.nc
+
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: slevis (interpinic, run-ibm.csh),
+ thornton (ndepregrid.ncl, outnc_large_files option)
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ perl5lib to perl5lib_071204 which includes new Decomp module.
+
+List all files eliminated: None
+
+List all files added and what they do:
+
+Add files for testing different tool configurations and ncl scripts, and for testing of
+create_crop_landunit, large_file support, and an urbin test. Also change offline configuration
+files so they have offline explicitly set as the mode.
+
+A + test/system/config_files/tools__ds
+A + test/system/config_files/tools__o
+A + test/system/TSMncl_tools.sh
+A + test/system/nl_files/nl_crcrop
+A + test/system/nl_files/nl_std
+A + test/system/nl_files/nl_lfiles
+
+Add ncl script to regrid Nitrogen deposition files
+
+A + tools/ncl_scripts
+A + tools/ncl_scripts/README
+A + tools/ncl_scripts/ndepregrid.ncl
+A + tools/mkgriddata/mkgriddata.ccsm_dom ------ add sample script for using CCSM domain files
+
+List all existing files that have been modified, and describe the changes:
+
+
+ Testing system updates...
+
+M test/system/config_files/* <-- offline configure files -- explicitly set offline mode
+M test/system/config_files/README
+M test/system/tests_posttag_spot1
+M test/system/tests_pretag_jaguar
+M test/system/README.testnames
+M test/system/tests_pretag_bangkok
+M test/system/TCBtools.sh
+M test/system/test_driver.sh
+M test/system/mknamelist
+M test/system/tests_posttag_hybrid_regression
+M test/system/tests_posttag_purempi_regression
+M test/system/tests_pretag_tempest
+M test/system/tests_pretag_bluevista
+M test/system/tests_posttag_blueice
+M test/system/input_tests_master
+M test/system/README
+M test/system/TSMtools.sh
+M test/system/TCBext_ccsmseq_cam.sh
+M test/system/tests_posttag_lightning
+M test/system/TBLtools.sh
+M test/system/TSM.sh
+
+ Update tools makefile and change svn keyword strings
+
+M tools/mksurfdata/mkvarctl.F90
+M tools/mksurfdata/README
+M tools/mksurfdata/mkfileMod.F90
+M tools/mksurfdata/mksrfdat.F90
+M tools/mksurfdata/Makefile
+M tools/interpinic/interpinic.F90
+M tools/interpinic/Srcfiles
+M tools/interpinic/Makefile
+M tools/mkgriddata/creategridMod.F90
+M tools/mkgriddata/Makefile
+M tools/mkdatadomain/Makefile
+M tools/README
+
+M bld/configure ---------------------- changes to work with ccsm4.alpha directory structure, and jaguarcnl
+M bld/DefaultCLM_INPARM_Namelist.xml - Add new datasets
+M bld/Makefile.in -------------------- changes needed for jaguarcnl and Darwin
+M bld/scpDefaultNamelist.pl ---------- extend to work with ndep files
+ Make changes to run scripts -- move section of things to change to top
+ Remove stuff not used. Add more documentation. Add suggestions from Sam Levis.
+M bld/run-ibm.csh -------------------- remove -q option to diff
+M bld/run-lightning.csh -------------- add bit about comparing rpointer files to see if advancing from run-ibm.csh
+M bld/run-pc.csh --------------------- add bit about comparing rpointer files to see if advancing from run-ibm.csh
+
+ Add large-file support
+
+M src/biogeochem/CASAMod.F90
+M src/biogeochem/DGVMMod.F90
+M src/main/ncdio.F90
+M src/main/restFileMod.F90
+M src/main/controlMod.F90
+M src/main/clm_varctl.F90
+
+Summary of testing:
+
+ tempest: All PASS
+ bluevista: All PASS, except
+033 smEA1 TSM.sh _sc_ds clm_std 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic .................FAIL! rc= 10 --> Urban not active yet
+034 blEA1 TBL.sh _sc_ds clm_std 19981001:NONE:3600 1x1_camdenNJ navy -90 arb_ic .................FAIL! rc= 4 ---> Urban not active yet
+066 blL53 TBL.sh _sc_dh clm_std^nl_crcrop 19980115:MONTHLY:1800 10x15 USGS 24 arb_ic ............FAIL! rc= 5 ---> New test
+070 blL63 TBL.sh _sc_h clm_std 19980101:MONTHLY:1800 1.9x2.5 gx1v5 -10 startup ..................FAIL! rc= 7 ---> New clmi file
+074 bl563 TBL.sh _h clm_std 19980101:MONTHLY:1800 1.9x2.5 gx1v5 -10 startup .....................FAIL! rc= 7 ---> New clmi file
+092 bl711 TBLtools.sh mksurfdata tools__ds namelist .............................................FAIL! rc= 4 ---> Test changed
+094 bl771 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 4 ---> Test changed
+099 sm921 TSMext_ccsmseq_cam.sh ext_ccsm_seq_4x5_dh ext_ccsm_seq_cam 48 .........................FAIL! rc= 4
+100 sm982 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+ jaguarcnl: ALL PASS, except TBL tests because previous code didn't run on jaguar with recent changes
+ lightning: ALL PASS, except
+035 blL53 TBL.sh _sc_dh clm_std^nl_crcrop 19980115:MONTHLY:1800 10x15 USGS 24 arb_ic ............FAIL! rc= 5 ---> New test
+042 bl771 TBLtools.sh mksurfdata tools__ds singlept .............................................FAIL! rc= 5 ---> New test
+ bangkok/lf95: All PASS, except
+047 sm951 TSMext_ccsmseq_cam.sh ext_ccsm_seq_10x15_dm ext_ccsm_seq_cam 48 .......................FAIL! rc= 4
+048 sm982 TSCext_ccsmseq_scam.sh ext_ccsm_seq_64x128_s scam_prep scam_ds scam 3 .................FAIL! rc= 4
+
+ CAM tests fail because of incomptabilities of csm_share code.
+
+CLM tag used for the baseline comparison tests if applicable: clm3_5_14
+
+Changes answers relative to baseline: None -- bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_5_14
+Originator(s): erik (Erik Kluzek)
+Date: Thu Nov 29 12:18:47 MST 2007
+One-line Summary: Use build-streams, and archiving, multiple bug-fixes
+
+Purpose of changes: Move bstrms5_clm3_5_13 to trunk
+
+Remove long-term archiving from clm code. Use Mat's long-term and short-term archiving
+scripts like cam. Short term script runs at the end of your run script -- then the
+long-term archiving script is submitted to the batch que at the end. Update to newer
+version of csm_share that doesn't have any mss_ options. Tune usage of build-namelist.
+Make streams file on the fly. Remove references to get_env and $HEADUrl$. Fix interpinic
+for CASA and RTM (from Sam). Change testing from being done in offline mode to
+seq_ccsm mode. Make default in run scripts to run seq_ccsm mode. Add option to run scripts
+to resubmit itself until reaches a given model date.
+
+Add in HCSOI and HCSOISNO from Dave Lawrence. Add PERGRO test to test suite. Simple PERGRO
+fix from Jerry Olson. Use branch of driver code for seq-ccsm and removing archiving. Add
+in lnd_comp_mct changes from ccsm4.alpha series.
+
+Bugs fixed (include bugzilla ID): 449 (create_crop), 548 (rm getenv), 579 (cam config),
+Changes answers relative to baseline: None
+
+ To verify bit-for-bit ran standard offline test case (bl111) on: tempest, bluevista, bangkok
+ (pass on bangkok, and bluevista -- but failed on tempest)
+
+===============================================================
+===============================================================
+Tag name: clm3_5_13
+Originator(s): erik (Erik Kluzek)
+Date: Fri Nov 16 10:17:38 MST 2007
+One-line Summary: Update xml file with file needed for ccsm3_5_beta18
+
+Describe any changes made to build system: Add models/utils/perl5lib to path for perl tools
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: New 0.47x0.63 fraction
+ dataset compatible with CCSM datasets
+
+Describe any substantial timing or memory changes: None
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all subroutines eliminated: None
+
+List all subroutines added and what they do:
+
+A bld/scpDefaultNamelist.pl -- Script to help copy files in xml database.
+
+List all existing files that have been modified, and describe the changes:
+
+M bld/configure --- add models/util to path
+M bld/DefaultCLM_INPARM_Namelist.xml -- add new file
+M bld/queryDefaultNamelist.pl --- add models/util to path
+M bld/build-namelist --- add models/util to path
+
+Summary of testing: None
+
+Changes answers relative to baseline: No
+
+===============================================================
+===============================================================
+Tag name: clm3_5_12
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Thu Nov 8 13:49:25 MST 2007
+One-line Summary: Tag with new files needed for ccsm3_5_beta17
+
+Purpose of changes: Add new files needed for new resolutions being adding in ccsm3_5_beta17
+
+Bugs fixed (include bugzilla ID): None
+
+Describe any changes made to build system: Small changes to configure from bstrms branch
+
+Describe any changes made to the namelist: Add new files to Default*.xml files
+
+List any changes to the defaults for the boundary datasets: New resolutions added
+
+Describe any substantial timing or memory changes: None
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all existing files that have been modified, and describe the changes:
+
+Move files over from the bstrms3_clm3_5_11 branch with the new resolutions needed.
+
+M bld/configure
+M bld/DefaultCLM_INPARM_Namelist.xml
+M bld/DefaultDATM_NML_Namelist.xml
+M bld/DefaultSettings.xml
+M bld/DefaultTIMEMGR_INPARM_Namelist.xml
+M bld/DefaultPROF_INPARM_Namelist.xml
+M bld/queryDefaultNamelist.pl
+M bld/DefaultCCSM_INPARM_Namelist.xml
+M bld/build-namelist
+M bld/DefaultDATM_DSHR_NML_Namelist.xml
+
+Summary of testing: None
+
+Changes answers relative to baseline: None
+
+===============================================================
+===============================================================
+Tag name: clm3_5_11
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Fri Sep 28 12:03:30 MDT 2007
+Date:One-line Summary: Update datasets in the DefaultCLM file for 0.23x0.31, 0.47x0.63, 0.9x1.25 and add fndepdyn file for 1.9x2.5
+
+Purpose of changes: Needed for CCSM 20th Century simulation needed for ccsm3_5_beta13
+
+Bugs fixed (include bugzilla ID): 585, 589, 593, 611
+
+ Add T42_gx1v5, 0.9x1.25_gx1v5 support.
+ also look in scripts/ccsm_utils/Tools for perl5lib.
+ abort if set -cycle_begyr or cycle_nyrs on namelist rather than on build-namelist command-line.
+
+Known bugs (include bugzilla ID): 251, 449, 512, 546, 608, 618, 622, 624
+
+ New nasty bugs found:
+
+618 You can't add new fields using: hist_fincl*.
+622 CLM blindly continues even if needed fields are missing from surface dataset.
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: Just add more files to XML database
+
+List any changes to the defaults for the boundary datasets: Add new files for:
+ 0.23x0.31, 0.47x0.63, 0.9x1.25, (64x128 with mask=gx1v5) and add fndepdyn file for 1.9x2.5
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: None
+
+List any svn externals directories updated (csm_share, mct, etc.): csm_share updated to trunk_tags/share3_070927
+
+ This is the version needed in ccsm3_5_beta13 tag (previous version causes problems building on tempest)
+
+List all subroutines eliminated: None
+
+List all subroutines added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M bld/DefaultCLM_INPARM_Namelist.xml ---- Add new datasets.
+M bld/clm_inparm.pm --------------------- Abort if try to define cycle_nyr and cycle_begyr on namelist
+ rather than with command line options.
+M bld/queryDefaultNamelist.pl ----------- Add ability to use perl5lib from under ccsm_utils/scripts/Tools.
+M bld/build-namelist -------------------- Add ability to use perl5lib from under ccsm_utils/scripts/Tools.
+
+ ------------------------- Remove test blZ11 (can't do the comparision), and update cam tag comparing to.
+M test/system/tests_pretag_bangkok
+M test/system/test_driver.sh
+M test/system/tests_posttag_purempi_regression
+M test/system/tests_posttag_hybrid_regression
+M test/system/tests_pretag_bluevista
+M test/system/tests_posttag_lightning
+
+Summary of testing: None
+
+Changes answers relative to baseline: No clm source code changed
+
+===============================================================
+===============================================================
+Tag name: clm3_5_10
+Originator(s): jet
+Date: Tue Sep 18 12:00:23 MDT 2007
+Date:One-line Summary: Fixed scam bugs when reading initial land dataset
+ and moved scam_setlatlon functionality to shr_scam_mod in
+ csm_shr repos. Merged in Mariana's changes to add new boundary
+ dataset file to help scam determine land/ocn/ice fractions.
+
+Purpose of changes: Fix scam bugs and refactor code to allow scam to easily
+ determine land/ocean/ice fractions.
+
+Bugs fixed (include bugzilla ID): 612, 480
+
+Known bugs (include bugzilla ID): 251, 449, 512, 546, 608, 618, 622
+
+Describe any changes made to build system: Change configure to include new focndomain file.
+
+Describe any changes made to the namelist: focndomain file added to ocn_in
+
+List any changes to the defaults for the boundary datasets: Mariana created
+ a new focndomain boundary dataset (at the standard resolutions) which
+ describe the grid fraction of land/ocn/ice
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self, mariana
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ clm3_5_10
+ branches/scm_drvseq1_0_43
+ branches/csm_share3_070824_scm
+
+List all subroutines eliminated: scam_setlatlonidx.F90
+
+List all subroutines added and what they do: moved scm functionality
+ from scam_setlatlonidx.F90 into a csm_share module that can
+ now be used by all surface models.
+
+List all existing files that have been modified, and describe the changes:
+M test/system/test_driver.sh - use latest cam in testing
+M test/system/nl_files/scam - fixed scam bug
+M test/system/nl_files/scam_prep - fixed scam bug
+M test/system/nl_files/ext_ccsm_seq_cam - use latest cam in testing
+M SVN_EXTERNAL_DIRECTORIES - point to needed external dirs
+M src/biogeochem/STATICEcosysDynMod.F90 - use new shr code instead of scam_setlatlonidx.F90
+M src/main/ncdio.F90 - use new shr code instead of scam_setlatlonidx.F90
+M src/main/initializeMod.F90 - use new shr code instead of scam_setlatlonidx.F90
+M src/main/iniTimeConst.F90 - use new shr code instead of scam_setlatlonidx.F90
+M src/main/restFileMod.F90 - use new shr code instead of scam_setlatlonidx.F90
+D src/main/scam_setlatlonidx.F90 - use new shr code instead of scam_setlatlonidx.F90
+M src/main/clm_varctl.F90 - use new shr code instead of scam_setlatlonidx.F90
+M src/main/surfrdMod.F90 - use new shr code instead of scam_setlatlonidx.F90
+Summary of testing:
+
+ bluevista: Everything but ccsm tests pass ( due to requirment on external
+ directories)
+
+ bangkok/lf95: all passed except ccsm - expected due to requirment on external
+ directories
+ tempest all passed except 034 br531 (failed previous to this commit)
+
+ CLM tag used for the baseline comparison tests if applicable: clm3_5_09
+
+Changes answers relative to baseline: None
+
+===============================================================
+===============================================================
+Tag name: clm3_5_09
+Originator(s): erik (Kluzek Erik 1326 CGD)
+Date: Fri Aug 31 13:58:46 MDT 2007
+Date:One-line Summary: Change configure to NOT have csm_share code for ccsm_con option, and add in 1x1.25 file, and update datm7 and csm_share
+
+Purpose of changes: Fix for ccsm3_5_beta12 tag
+
+Bugs fixed (include bugzilla ID): 581, 583
+
+Known bugs (include bugzilla ID): 251, 449, 512, 546, 608,found with a suggested fix by Inez Fung
+
+Bugs fixed (include bugzilla ID): 389 (partial), 442, 443, 445, 450
+
+Describe any changes made to build system: Fix build for jaguar and phoenix
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: nanr, slevis, dlawren, oleson, and bonan reviewed the mklai changes
+
+List any svn externals directories updated (csm_share, mct, etc.): None
+
+List all subroutines eliminated: None
+
+List all subroutines added and what they do: Documentation files
+
+A + doc/README.DGVM
+A + doc/KnownBugs
+
+List all existing files that have been modified, and describe the changes:
+
+---------------------> Improve testing
+M test/system/nl_files/t31_cnall
+M test/system/nl_files/singlept_dgvm_long
+M test/system/nl_files/1.9x2.5
+M test/system/nl_files/t31_dgvm
+M test/system/nl_files/singlept
+M test/system/nl_files/10x15_cnall
+M test/system/nl_files/10x15_dgvm
+M test/system/nl_files/t31_casa
+M test/system/nl_files/regional
+M test/system/nl_files/10x15_pftdyn
+M test/system/nl_files/t31_dgvm_long
+M test/system/nl_files/t42half
+M test/system/nl_files/t31
+M test/system/nl_files/10x15
+M test/system/tests_posttag_robin
+M test/system/input_tests_master
+M test/system/tests_pretag_jaguar
+M test/system/tests_posttag_phoenix
+M test/system/test_driver.sh
+M test/system/TSCscam.sh
+
+---------------------> Change calculation of LAI,SAI,Canopy-top/bottom so weighted by %-PFT
+M tools/mksurfdata/mkfmax.F90
+M tools/mksurfdata/mklaiMod.F90
+M tools/mksurfdata/mkglacier.F90
+M tools/mksurfdata/mkurban.F90
+M tools/mksurfdata/mksoitex.F90
+M tools/mksurfdata/areaMod.F90
+M tools/mksurfdata/mksrfdat.F90
+M tools/mksurfdata/Srcfiles
+M tools/mksurfdata/mksoicol.F90
+M tools/mksurfdata/mkpftMod.F90
+
+---------------------> Use new default files at T42, add and correct documentation
+M bld/run-pc.csh
+M bld/run-lightning.csh
+M bld/Makefile.in
+M bld/run-ibm.csh
+M bld/config_clm_defaults.xml
+M bld/system_defaults.xml
+M bld/run-frost.csh
+
+---------------------> Remove uneeded shr_sys_flush, put #ifndef UNICOSMP around shr_sys_flush(6), correct MCT vector calls
+ needed for phoenix/robin build.
+
+M src/biogeochem/CNCStateUpdate2Mod.F90
+M src/biogeochem/CNGapMortalityMod.F90
+M src/biogeochem/CNC13StateUpdate2Mod.F90
+M src/biogeochem/CNFireMod.F90
+M src/biogeochem/CASAMod.F90 -------------------> Fix CASA by uncommenting lines according to Inez Fung
+M src/biogeochem/CNPrecisionControlMod.F90
+M src/biogeochem/DUSTMod.F90 -------------------> Changes from Natalie M. and Francis Vitt for CAM/CLM3.5 Aerosols
+M src/biogeochem/CNPhenologyMod.F90
+M src/biogeochem/CNCStateUpdate1Mod.F90
+M src/biogeochem/CNDecompMod.F90
+M src/biogeochem/CNCStateUpdate3Mod.F90
+M src/biogeochem/CNC13StateUpdate1Mod.F90
+M src/biogeochem/CNC13StateUpdate3Mod.F90
+M src/biogeochem/CNAllocationMod.F90
+M src/biogeochem/CNC13FluxMod.F90
+M src/biogeochem/CNEcosystemDynMod.F90
+M src/biogeochem/CNVegStructUpdateMod.F90
+M src/main/inicFileMod.F90
+M src/main/abortutils.F90
+M src/main/driver.F90
+M src/main/ncdio.F90
+M src/main/atmdrvMod.F90 -----------------------> Changes from Keith O. to fix TKFRZ change
+M src/main/initializeMod.F90
+M src/main/clmtypeInitMod.F90
+M src/main/histFileMod.F90
+M src/main/clm_csmMod.F90
+M src/main/controlMod.F90 ----------------------> Fix #ifdef's so extra namelist items only on for COUP_CSM or OFFLINE
+M src/main/initSurfAlbMod.F90
+M src/main/clm_time_manager.F90
+M src/main/initGridCellsMod.F90
+M src/main/program_off.F90
+M src/main/surfrdMod.F90
+M src/main/decompMod.F90
+M src/main/areaMod.F90
+M src/main/clm_mct_mod.F90
+M src/riverroute/RtmMod.F90
+M src/biogeophys/SurfaceRadiationMod.F90
+M src/biogeophys/SurfaceAlbedoMod.F90
+M src/biogeophys/Hydrology2Mod.F90
+M src/biogeophys/CanopyFluxesMod.F90
+
+Summary of testing:
+
+ bluevista: All PASS except
+004 bl111 TBL.sh 4p_vodsr_dh t31 48 ...............................FAIL! rc=
+009 bl127 TBL.sh 17p_vodsr_dh t31 48 ..............................FAIL! rc=
+014 bl131 TBL.sh 4p_vodsr_dh t42half 48 ...........................FAIL! rc=
+019 bl141 TBL.sh 17p__dh 10x15_pftdyn 48 ..........................FAIL! rc=
+024 bl211 TBL.sh 17p_cnn_dh t31_cnall 48 ..........................FAIL! rc=
+028 bl311 TBL.sh 4p_casa_dh t31_casa 48 ...........................FAIL! rc=
+032 bl411 TBL.sh 10p_dgvm_dh t31_dgvm 48 ..........................FAIL! rc=
+037 bl471 TBL.sh 10p_dgvm_s singlept_dgvm_long -730 ...............FAIL! rc=
+041 bl563 TBL.sh _h 1.9x2.5 -10 ...................................FAIL! rc=
+045 bl552 TBL.sh _ds 10x15 24 .....................................FAIL! rc=
+049 bl573 TBL.sh _s singlept -10 ..................................FAIL! rc=
+053 bl583 TBL.sh _dh regional -10 .................................FAIL! rc=
+057 bl711 TBLtools.sh mksurfdata namelist .........................FAIL! rc=
+059 bl771 TBLtools.sh mksurfdata singlept .........................FAIL! rc=
+062 sm061 TSMconccsm.sh ERS f19_g13 ...............................FAIL! rc= 7
+ lightning: All PASS except
+004 bl112 TBL.sh 4p_vodsr_dm t31 48 ...............................FAIL! rc=
+009 bl142 TBL.sh 17p__dm 10x15_pftdyn 48 ..........................FAIL! rc=
+014 bl252 TBL.sh 17p_cnn_dm 10x15_cnall 48 ........................FAIL! rc=
+018 bl451 TBL.sh 10p_dgvm_dm 10x15_dgvm 48 ........................FAIL! rc=
+019 sm551 TSM.sh _dh 10x15 48 .....................................FAIL! rc= 8
+020 er551 TER.sh _dh 10x15 10+38 ..................................FAIL! rc= 5
+021 br551 TBR.sh _dh 10x15 24+24 ..................................FAIL! rc= 5
+022 bl551 TBL.sh _dh 10x15 48 .....................................FAIL! rc=
+026 bl573 TBL.sh _s singlept -10 ..................................FAIL! rc=
+029 bl771 TBLtools.sh mksurfdata singlept .........................FAIL! rc=
+ bangkok/lf95:
+004 bl112 TBL.sh 4p_vodsr_dm t31 48 ...............................FAIL! rc=
+009 bl142 TBL.sh 17p__dm 10x15_pftdyn 48 ..........................FAIL! rc=
+014 bl252 TBL.sh 17p_cnn_dm 10x15_cnall 48 ........................FAIL! rc=
+018 bl312 TBL.sh 4p_casa_dm t31_casa 48 ...........................FAIL! rc=
+022 bl451 TBL.sh 10p_dgvm_dm 10x15_dgvm 48 ........................FAIL! rc=
+026 bl551 TBL.sh _dh 10x15 48 .....................................FAIL! rc=
+030 bl573 TBL.sh _s singlept -10 ..................................FAIL! rc=
+033 sm982 TSCscam.sh seqccsm_64x128_s scam_prep scam_ds scam 7 ....FAIL! rc= 4
+ robin: All compile tests pass
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_98
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers:
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change: new LAI, TKFRZ change is roundoff different, Dust and CASA changes are significant
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_98
+Originator(s): erik (KLUZEK ERIK 1326 CGD)
+Date: Wed Apr 18 09:51:53 MDT 2007
+One-line Summary: Move externals to top, make SOM4 the default, rename setidx file, use new datafiles,
+ remove NUMLONS read, tweak testing, remove shell_cmd, remove read of old surfdata file
+
+Purpose of changes: Some simple cleanup preparing for CLM3.5 release
+
+Bugs fixed (include bugzilla ID): 440, 441
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: Use new datasets
+
+List any changes to the defaults for the boundary datasets: Use new NCEP forcing datasets,
+ and new Nitrogen deposition datasets
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: self
+
+List any svn externals directories updated (csm_share, mct, etc.):
+
+ Directories are the same -- but SVN externals themselves moved to top level
+
+List all subroutines eliminated:
+
+D test/system/tests_pretag_blueice > Rename to posttag filename
+D bld/empty -----------------------> Rename to usr.src
+D src/main/setlatlonidx.F90 -------> Rename to scam_setlatlonidx.F90 filename
+D src/main/system_cmd.c
+D src/main/cfort.h
+D src/SVN_EXTERNAL_DIRECTORIES ----> Move to top layer
+
+List all subroutines added and what they do:
+
+ -------------------> Files renamed from above
+A + test/system/tests_posttag_blueice
+A + bld/usr.src
+A + SVN_EXTERNAL_DIRECTORIES
+A + src/main/scam_setlatlonidx.F90
+
+ -------------------> New files
+A test/system/config_files/10p_dgvm_s -------> New DGVM test
+A + test/system/tests_posttag_blueice ---------> Rename
+A test/system/tests_posttag_robin -----------> Add tests for robin/phoenix
+A test/system/tests_posttag_phoenix
+A test/system/nl_files/singlept_dgvm_long ---> Add new singlept DGVM test
+ -------------------> Add new documentation README files
+A tools/README
+A bld/README
+A Copyright ----> CCSM Copyright file
+A README
+
+List all existing files that have been modified, and describe the changes:
+
+ -------------------> Tweak testing -- use new datasets, increase diversity of testing
+M test/system/tests_pretag_bluevista
+M test/system/nl_files/t31_cnall
+M test/system/nl_files/1.9x2.5
+M test/system/nl_files/t31_dgvm
+M test/system/nl_files/singlept
+M test/system/nl_files/10x15_cnall
+M test/system/nl_files/10x15_dgvm
+M test/system/nl_files/t31_casa
+M test/system/nl_files/regional
+M test/system/nl_files/10x15_pftdyn
+M test/system/nl_files/t31_dgvm_long
+M test/system/nl_files/t42half
+M test/system/nl_files/t31
+M test/system/nl_files/10x15
+M test/system/input_tests_master
+M test/system/test_driver.sh
+ ---------------------> Remove system_cmd.c from list of source files needed to compile
+M tools/mksurfdata/Srcfiles
+M tools/interpinic/interpinic.F90 <--- fix interpinic compile on bluevista
+M tools/mkgriddata/Srcfiles
+ ---------------------> Use new datasets, make sure works
+M bld/run-pc.csh
+M bld/configure ---- Remove SOM4 CPP declaration
+M bld/run-lightning.csh
+M bld/run-ibm.csh
+M bld/run-frost.csh
+ ---------------------> Make SOM4 the default remove other option, remove read of NUMLON
+ ---------------------> Remove read of old surfdata sets, remove shell_cmd
+M src/biogeochem/CNDecompMod.F90
+M src/biogeochem/STATICEcosysDynMod.F90
+M src/main/ncdio.F90
+M src/main/fileutils.F90 -----> Remove shell_cmd as unused now.
+M src/main/iniTimeConst.F90
+M src/main/clm_varsur.F90
+M src/main/surfrdMod.F90
+
+Summary of testing:
+
+ bluevista: All PASS, except
+062 sm061 TSMconccsm.sh ERS f19_g13 ...............................FAIL! rc= 7
+ bangkok/lf95: All PASS
+ tempest: All PASS, except
+033 sm982 TSCscam.sh seqccsm_64x128_s scam_prep scam_ds scam 7 ....FAIL! rc= 4
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_97
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_97
+Originator(s): erik (KLUZEK ERIK 1326 CGD)
+Date: Wed Apr 11 12:18:32 MDT 2007
+One-line Summary: Remove SPMD, update to clm proc tag, update timing, improve testing
+
+Purpose of changes:
+ Remove SPMD #ifdefs -- use mpi-serial code
+ Remove COUP_CAM #ifdefs for SEQ_MCT || SEQ_ESMF
+ Remove LOCAL_DEBUG CPP #ifdefs
+ Update to prof05_clm3_expa_92 tag (timing changes, SCAM fixes)
+ Update timing library to latest
+ Fix bugs
+ Improve test suite
+ Change scripts so will rebuild each time (only configure first time if config file DNE)
+ Change tool Makefile to be consistent and have USER_ overload options.
+ Add script to update ChangeLog
+
+Bugs fixed (include bugzilla ID): 337, 361, 389(partial), 407, 408, 417, 428
+ 337 -- SPMD
+ 361 -- IRIX
+ 389 -- Testing
+ 407 -- Single gridcell
+ 408 -- mksurfdata,mkgriddata compiling
+ 417 -- write last file to mss correctly
+ 428 -- pdt-dyn mode now restarts correctly
+
+Describe any changes made to build system: Remove HIDE_MPI, remove
+ stuff left over from CAM Makefile, put FORTRAN name definition in configure
+ remove LOCAL_DEBUG CPP #ifdefs
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: mvertens, oleson, thornton (all just briefly)
+
+List any svn externals directories updated (csm_share, mct, etc.):
+ Update csm_share to share3_070321
+ Update timing to timing_070328
+
+List all subroutines eliminated:
+
+D bld/run-sgi.csh --------------------> Remove SGI run script
+D test/system/tests_pretag_bluesky ---> Remove since bluesky is gone
+D tools/mksurfdata/mksrfdat.namelist -> Change name to mksurfdata.namelist
+
+ -------> Rename interpinic files to *.F90
+
+D tools/interpinic/fmain.f90
+D tools/interpinic/wrap_nf.f90
+D tools/interpinic/shr_kind_mod.f90
+D tools/interpinic/interpinic.f90
+D tools/interpinic/addglobal.f90
+
+ --------> Remove file no longer needed by SCAM or for SPMD mode
+D src/main/getnetcdfdata.F90
+D src/main/mpiinc.F90
+
+
+List all subroutines added and what they do:
+
+------- Add concurrent and sequential CCSM tests, add more resolutions, improve tools tests
+A test/system/TSMconccsm.sh ----------- Concurrent CCSM test
+--------------------> New configurations to test
+A test/system/config_files/scam_ds
+A test/system/config_files/_h
+A test/system/config_files/_dh
+A test/system/config_files/_m
+A test/system/config_files/_o
+A test/system/config_files/_dm
+A test/system/config_files/_do
+A test/system/config_files/_s
+A test/system/config_files/_ds
+A test/system/config_files/seqccsm_4x5_dh
+A test/system/config_files/seqccsm_64x128_s
+A test/system/config_files/seqccsm_10x15_dm
+A test/system/config_files/17p__m
+A test/system/config_files/17p__o
+A test/system/config_files/17p__dh
+A test/system/config_files/17p__dm
+A test/system/config_files/17p__do
+A test/system/config_files/17p__h
+A test/system/TSMseqccsm.sh ------------- Sequential CCSM test
+--------------------> New namelists and resolutions to test
+A test/system/nl_files/scam
+A test/system/nl_files/1.9x2.5
+A test/system/nl_files/singlept
+A test/system/nl_files/10x15_cnall
+A test/system/nl_files/10x15_dgvm
+A test/system/nl_files/seqccsm
+A test/system/nl_files/regional
+A test/system/nl_files/scam_prep
+A test/system/nl_files/10x15
+A test/system/README
+A test/system/TCBseqccsm.sh --------> Sequential CCSM configure/build
+A test/system/TSCscam.sh -----------> Sequential CCSM SCAM mode configure/build
+A test/system/TCTconccsm.sh --------> CCSM create-test
+A test/system/TBLtools.sh ----------> Compare tools to baseline version
+
+----------- add singlept and regional tests
+A tools/mksurfdata/mksurfdata.singlept
+A tools/mksurfdata/mksurfdata.regional
+A + tools/mksurfdata/mksurfdata.namelist
+
+----------- Get improved code from Sam Levis (change names to *.F90)
+
+A tools/interpinic/interpinic.runoptions
+A + tools/interpinic/fmain.F90
+A tools/interpinic/clmi_1999-01-02_10x15_c070330.nc <---- Test file
+A + tools/interpinic/wrap_nf.F90
+A tools/interpinic/Filepath
+A + tools/interpinic/interpinic.F90
+A + tools/interpinic/addglobal.F90
+A tools/interpinic/Srcfiles
+----------- add singlept and regional tests
+A tools/mkgriddata/mkgriddata.singlept
+A tools/mkgriddata/mkgriddata.regional
+
+----------- Help to update ChangeLog
+A doc/UpDateChangeLog.pl
+
+----------- New code needed for SCAM mode
+A + src/main/setlatlonidx.F90
+
+
+List all existing files that have been modified, and describe the changes:
+
+----------- Improve test system (tweak tests, add new tests to various machines)
+M test/system/tests_pretag_bluevista
+M test/system/nl_files/t31_cnall
+M test/system/nl_files/t31_dgvm
+M test/system/nl_files/t31_casa
+M test/system/nl_files/10x15_pftdyn
+M test/system/nl_files/t31_dgvm_long
+M test/system/nl_files/t42half
+M test/system/nl_files/t31
+M test/system/CLM_runcmnd.sh ------- Use mpirun instead of mpiexec on bangkok/calgary
+M test/system/tests_pretag_blueice
+M test/system/input_tests_master
+M test/system/tests_pretag_jaguar
+M test/system/TSMtools.sh
+M test/system/tests_pretag_bangkok
+M test/system/TCBtools.sh
+M test/system/test_driver.sh
+M test/system/tests_pretag_tempest
+M test/system/tests_posttag_lightning
+
+----------- Get tools to build
+M tools/mksurfdata/mklaiMod.F90
+M tools/mksurfdata/mkfileMod.F90
+M tools/mksurfdata/creategridMod.F90
+M tools/mksurfdata/Srcfiles
+M tools/mksurfdata/Makefile ------ Make makefile consistent and add USER_ options
+M tools/interpinic/Makefile ------ Make makefile consistent and add USER_ options
+M tools/mkgriddata/mkgriddata.F90
+M tools/mkgriddata/creategridMod.F90
+M tools/mkgriddata/Srcfiles
+M tools/mkgriddata/Makefile ------ Make makefile consistent and add USER_ options
+
+----------- Improvements to run scripts and build system
+ Change scripts so will rebuild each time (only configure first time if config file DNE), remove left over
+ features from CAM Makefile.
+M bld/run-pc.csh
+M bld/configure
+M bld/run-lightning.csh
+M bld/Makefile.in
+M bld/run-ibm.csh
+M bld/run-frost.csh
+
+----------- Source code changes, removing SPMD #ifdef, LOCAL_DEBUG, get SCAM mode working with new CAM, change
+ to work with new timing library, fix code bugs above. Remove COUP_CAM #ifdefs for SEQ_MCT || SEQ_ESMF
+M src/biogeochem/CASAMod.F90
+M src/biogeochem/CNPhenologyMod.F90
+M src/biogeochem/STATICEcosysDynMod.F90
+M src/biogeochem/DGVMMod.F90
+M src/biogeochem/CNAllocationMod.F90
+M src/biogeochem/CNVegStructUpdateMod.F90
+M src/main/spmdGathScatMod.F90
+M src/main/abortutils.F90
+M src/main/clm_comp.F90
+M src/main/driver.F90
+M src/main/ncdio.F90
+M src/main/atmdrvMod.F90
+M src/main/fileutils.F90
+M src/main/pftdynMod.F90
+M src/main/iniTimeConst.F90
+M src/main/histFileMod.F90
+M src/main/program_csm.F90
+M src/main/restFileMod.F90
+M src/main/clm_csmMod.F90
+M src/main/controlMod.F90
+M src/main/ndepFileMod.F90
+M src/main/initGridCellsMod.F90
+M src/main/lnd_comp_mct.F90
+M src/main/program_off.F90
+M src/main/pftvarcon.F90
+M src/main/spmdMod.F90
+M src/main/surfrdMod.F90
+M src/main/decompMod.F90
+M src/main/areaMod.F90
+M src/main/iobinary.F90
+M src/main/do_close_dispose.F90
+M src/riverroute/RtmMod.F90
+M src/biogeophys/Hydrology2Mod.F90
+M src/biogeophys/BiogeophysRestMod.F90
+
+Summary of testing:
+
+ tempest: ALL PASS
+ bluevista:
+019 bl141 TBL.sh 17p_vodsr_dh 10x15_pftdyn 48 .....................FAIL! rc= 7
+022 er211 TER.sh 17p_cnn_dh t31_cnall 10+38 .......................FAIL! rc= 6
+059 sm061 TSMconccsm.sh ERS f19_g13 ...............................FAIL! rc= 5
+ bangkok/lf95:
+033 sm982 TSCscam.sh seqccsm_64x128_s scam_prep scam_ds scam 7 ....FAIL! rc= 4
+
+TBL test fails because of restart trouble with pftdyn.
+Concurrent CCSM test fails because of a problem with ccsm3_5_beta01 for datm7.
+bangkok scam test fails as is says that scm_crm_mode is not initialized in
+CAM code.
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_96
+ (had to add in new tests, and set SOM4)
+
+Changes Answers: No
+
+===============================================================
+
+===============================================================
+Tag name: clm3_expa_96
+Originator(s): tcraig
+Date: Mon Mar 12 16:41:58 MDT 2007
+One-line Summary: fixed finemesh, pftdyn modes, add new tests
+
+Purpose of changes: restore finemesh and pftdyn modes, improve
+ test coverage
+
+Bugs fixed (include bugzilla ID): 389 (partial)
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: reduced memory use in pftdyn
+
+Code reviewed by:
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all subroutines eliminated: none
+
+List all subroutines added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+ M test/system/tests_pretag_bluevista
+ A + test/system/nl_files/10x15_pftdyn
+ A + test/system/nl_files/t31_dgvm_long
+ A + test/system/nl_files/t42half
+ M test/system/input_tests_master
+ M test/system/tests_pretag_blueice
+ M test/system/tests_pretag_jaguar
+ M test/system/tests_pretag_bangkok
+ M test/system/test_driver.sh
+ M test/system/tests_posttag_lightning
+ M src/main/subgridMod.F90
+ M src/main/initializeMod.F90
+ M src/main/pftdynMod.F90
+ M src/main/clm_varsur.F90
+ M src/main/ndepFileMod.F90
+ M src/main/subgridAveMod.F90
+ M src/main/initGridCellsMod.F90
+ M src/main/lnd_comp_mct.F90
+ M src/main/program_off.F90
+ M src/main/surfrdMod.F90
+ M src/main/domainMod.F90
+ M src/main/decompMod.F90
+ M src/main/areaMod.F90
+
+- rename lvegxy,lwtxy to vegxy, wtxy
+- implement general setgatm, get finemesh working again
+- refactor pftdynMod for low memory implementation, validate pfydyn mode
+- modify ndep and pftdyn from x = x1*wt1 + x2*wt2 to x = x2 + wt1*(x1-x2)
+ as suggested by k.lindsay, improves roundoff performance
+- clean up some old code
+- add new tests configurations (10x15_pftdyn, t31_dgvm_long, t42half),
+- update pretag lists, add new tests
+
+Summary of testing:
+
+ bluevista:
+ all clm tests pass except bl for new cases including new tests
+ all cam tests pass except bl (due to clm changes in expa_94/95)
+ ccsm passes ERS.f45_g35.B.bluevista16 (answers change due to expa_94/95)
+ bangkok/lf95:
+ all clm tests pass including new tests in list
+ all cam tests pass except bl (due to clm changes in expa_94/95)
+ tempest:
+ all cam tests pass except bl (due to clm changes in expa_94/95)
+ lightning:
+ ccsm passes ERS.f45_g35.B2.lightning (answers change due to expa_94/95)
+
+CLM tag used for the baseline comparison tests if applicable:
+ clm3_expa_95, cam3_4_03, ccsm3_1_beta45
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+ answers are bfb with clm3_expa_95 in clm. cam and ccsm could not
+ be tested for bfb due to lagging clm version in latest cam and ccsm tags
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+Tag name: clm3_expa_95
+Originator(s): nanr, Keith Oleson, Peter Thornton
+Date: Thu Mar 8 17:06:06 MST 2007
+One-line Summary: Adding N limitation for CLM standalone w/o CN.
+
+Purpose of changes: Improve estimation of photosynthesis in CLM when it
+is run without CN active. These changes impose a N limitation as a fcn of
+PFT [0-1].
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: testing suites: fpftcon = pft-physiology.c070207
+
+List any changes to the defaults for the boundary datasets:
+ fptfcon = pft-physiology.c070207
+ pft-physiology.c070207.readme
+
+Describe any substantial timing or memory changes: none expected
+
+Code reviewed by: Keith Oleson, Peter Thornton, Dave Lawrence
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all subroutines eliminated: none
+
+List all subroutines added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+bv1103en.ucar.edu-/fis/cgd/tss/nanr/clm/clm3_trunk % !svn
+svn status | grep 'M '
+M test/system/nl_files/t31_cnall ! change pft-physiology.c070207
+M test/system/nl_files/t31_dgvm ! change pft-physiology.c070207
+M test/system/nl_files/t31 ! change pft-physiology.c070207
+M test/system/nl_files/t31_casa ! change pft-physiology.c070207
+M bld/run-pc.csh ! change pft-physiology.c070207
+M bld/run-sgi.csh ! change pft-physiology.c070207
+M bld/run-lightning.csh ! change pft-physiology.c070207
+M bld/run-ibm.csh ! change pft-physiology.c070207
+M bld/run-frost.csh ! change pft-physiology.c070207
+M src/main/clmtypeInitMod.F90 ! initialize new N limitation factor (fnitr)
+M src/main/iniTimeConst.F90 ! initialize new N limitation factor (fnitr)
+M src/main/pftvarcon.F90 ! read in new var (fnitr)
+M src/main/clmtype.F90 ! initialize new N limitation factor (fnitr)
+M src/biogeophys/CanopyFluxesMod.F90 ! apply new N limitation factor (fnitr)
+
+Summary of testing:
+
+ bluevista:
+ 001 sm111 TSM.sh 4p_vodsr_dh t31 48 ...............................PASS
+ 002 er111 TER.sh 4p_vodsr_dh t31 10+38 ............................PASS
+ 003 br111 TBR.sh 4p_vodsr_dh t31 24+24 ............................PASS
+ 004 bl111 TBL.sh 4p_vodsr_dh t31 48 ...............................SKIPPED*
+ 005 sm114 TSM.sh 4p_vodsr_h t31 48 ................................PASS
+ 006 sm121 TSM.sh 17p_vodsr_dh t31 48 ..............................PASS
+ 007 er121 TER.sh 17p_vodsr_dh t31 10+38 ...........................PASS
+ 008 br121 TBR.sh 17p_vodsr_dh t31 24+24 ...........................PASS
+ 009 bl121 TBL.sh 17p_vodsr_dh t31 48 ..............................SKIPPED*
+ 010 sm124 TSM.sh 17p_vodsr_h t31 48 ...............................PASS
+ 011 sm211 TSM.sh 17p_cnn_dh t31_cnall 48 ..........................PASS
+ 012 er211 TER.sh 17p_cnn_dh t31_cnall 10+38 .......................PASS
+ 013 br211 TBR.sh 17p_cnn_dh t31_cnall 24+24 .......................PASS
+ 014 bl211 TBL.sh 17p_cnn_dh t31_cnall 48 ..........................SKIPPED*
+ 015 sm311 TSM.sh 4p_casa_dh t31_casa 48 ...........................PASS
+ 016 er311 TER.sh 4p_casa_dh t31_casa 10+38 ........................PASS
+ 017 br311 TBR.sh 4p_casa_dh t31_casa 24+24 ........................PASS
+ 018 bl311 TBL.sh 4p_casa_dh t31_casa 48 ...........................SKIPPED*
+ 019 sm411 TSM.sh 10p_dgvm_dh t31_dgvm 48 ..........................PASS
+ 020 er411 TER.sh 10p_dgvm_dh t31_dgvm 10+38 .......................PASS
+ 021 br411 TBR.sh 10p_dgvm_dh t31_dgvm 24+24 .......................PASS
+ 022 bl411 TBL.sh 10p_dgvm_dh t31_dgvm 48 ..........................SKIPPED*
+ bangkok/lf95:
+
+CLM tag used for the baseline comparison tests if applicable:
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CN inactive
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ new climate
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ /OLESON/csm/hydp2_off_communn_hk39
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+http://www.cgd.ucar.edu/tss/clm/diagnostics/lmwg_hydro/hydp2_off_communn_hk39aa-hydp2_off_communn_hk38aa/setsIndex.html
+
+
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_94
+Originator(s): nanr, Keith Oleson, Peter Thornton
+Date: Thu Mar 8 14:22:36 MST 2007
+One-line Summary: BTRAN modification
+
+Purpose of changes: Change BTRAN calculation to improve prognostic
+ LAI estimation in high latitudes.
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Keith Oleson, Peter Thornton, Dave Lawrence
+
+List any svn externals directories updated (csm_share, mct, etc.): nanr
+
+List all subroutines eliminated: nanr
+
+List all subroutines added and what they do: nanr
+
+List all existing files that have been modified, and describe the changes:
+M src/biogeophys/CanopyFluxesMod.F90
+
+Changing calculation of rootr to allow non-zero rootr (and btran) in partially frozen layers.
+
+Summary of testing:
+
+ bluevista:
+
+ 001 sm111 TSM.sh 4p_vodsr_dh t31 48 ...............................PASS
+ 002 er111 TER.sh 4p_vodsr_dh t31 10+38 ............................PASS
+ 003 br111 TBR.sh 4p_vodsr_dh t31 24+24 ............................PASS
+ 004 bl111 TBL.sh 4p_vodsr_dh t31 48 ...............................SKIPPED*
+ 005 sm114 TSM.sh 4p_vodsr_h t31 48 ................................PASS
+ 006 sm121 TSM.sh 17p_vodsr_dh t31 48 ..............................PASS
+ 007 er121 TER.sh 17p_vodsr_dh t31 10+38 ...........................PASS
+ 008 br121 TBR.sh 17p_vodsr_dh t31 24+24 ...........................PASS
+ 009 bl121 TBL.sh 17p_vodsr_dh t31 48 ..............................SKIPPED*
+ 010 sm124 TSM.sh 17p_vodsr_h t31 48 ...............................PASS
+ 011 sm211 TSM.sh 17p_cnn_dh t31_cnall 48 ..........................PASS
+ 012 er211 TER.sh 17p_cnn_dh t31_cnall 10+38 .......................PASS
+ 013 br211 TBR.sh 17p_cnn_dh t31_cnall 24+24 .......................PASS
+ 014 bl211 TBL.sh 17p_cnn_dh t31_cnall 48 ..........................SKIPPED*
+ 015 sm311 TSM.sh 4p_casa_dh t31_casa 48 ...........................PASS
+ 016 er311 TER.sh 4p_casa_dh t31_casa 10+38 ........................PASS
+ 017 br311 TBR.sh 4p_casa_dh t31_casa 24+24 ........................PASS
+ 018 bl311 TBL.sh 4p_casa_dh t31_casa 48 ...........................SKIPPED*
+ 019 sm411 TSM.sh 10p_dgvm_dh t31_dgvm 48 ..........................PASS
+ 020 er411 TER.sh 10p_dgvm_dh t31_dgvm 10+38 .......................PASS
+ 021 br411 TBR.sh 10p_dgvm_dh t31_dgvm 24+24 .......................PASS
+ 022 bl411 TBL.sh 10p_dgvm_dh t31_dgvm 48 ..........................SKIPPED*
+
+ bangkok/lf95:
+
+CLM tag used for the baseline comparison tests if applicable: none
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: all
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ -larger than roundoff. Climate changes unknown.
+ -Improves prognostic LAI estimation in high latitudes.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository): clm3_expa_89
+ - platform/compilers: bluevista
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+ /OLESON/csm/hydp2_off_communn_hk38
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+http://www.cgd.ucar.edu/tss/clm/diagnostics/lmwg_hydro/hydp2_off_communn_hk38aa-hydp2_off_communn_expa89aa/setsIndex.html
+
+
+===============================================================
+
+===============================================================
+Tag name: clm3_expa_93 ! NOTE: Tag incremented to correct mistaken tag number in documentation. (nanr)
+Originator(s): tcraig
+Date: Tue Feb 27 16:53:41 MST 2007
+One-line Summary: merge fmf branch to trunk (low memory mods)
+
+Purpose of changes: reduce memory and improve memory scaling
+
+Bugs fixed (include bugzilla ID):
+
+Describe any changes made to build system: began adding support
+ for frost in Makefile and added a run-frost.csh (not yet
+ fully validated)
+
+Describe any changes made to the namelist: added new optional namelist
+ input, nsegspc (number of segments per clump for new decomp. default
+ is 20, 1 will produce poor loadbalance, infinity yields too many
+ segments per pe but good load balance. performance asymptotes for
+ several configurations at about 5-10 segments/pe, use 20 as default.)
+
+List any changes to the defaults for the boundary datasets: NONE
+
+Describe any substantial timing or memory changes: significant reduction
+ in memory use and improved memory scaling.
+
+Code reviewed by:
+
+List any svn externals directories updated (csm_share, mct, etc.): update
+ to mct external, MCT2_3_0_070206
+
+List all subroutines eliminated:
+
+List all subroutines added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+- implement new 1d decomp based on ngsegspc rather than balancing pfts,
+ ngsegspc is the number of segments per clump. the 1d gridcells will
+ be divided into clumps and segments per clump so there will be
+ a total number of segments, clumps*nsegspc, each of about equal number
+ of gridcells, that will be distributed round-robin to pes. pfts are
+ derived later and will hopefully end up being nearly as well load
+ balanced as the previous method without having to precompute pfts
+ and requiring much less memory. see above for more info on the namelist
+ input and default.
+- reorganize initialization, split decomp_init into three phases,
+ atm (coarse), lnd (finemesh), and glcp (subgrid).
+- add new datatype, latlon to hold some global grid info
+- now all domain info is local (although initialization still needs
+ to be modified)
+- remove some dead code
+- add new timers (bug #302)
+- split gatm out of domain type
+- create simple setgatm_UNITY routine, finemesh capability now disabled,
+ must fix setgatm in future version
+- move wtxy, vegxy, and pctspec to clm_varsur, allocate as local arrays
+ now (begg:endg) and modify surfrd to handle local data only both for
+ I/O and initialization.
+- implement gather/scatter routines in spmdGathScatMod that use gsmaps.
+- update MCT and share
+- port to frost
+- get rid of some of the global decomps use in code, still more to do
+- memory cleanup in STATICEcosysDynMod
+- implement new ncdio methods for reading to local gridcell data using gsmaps
+- clean up atmdrv, use newer low mem datatypes, reduce memory
+- clean up rtm, use newer low mem datatypes, reduce memory
+- remove history "lat/lon" fields
+
+M test/system/test_driver.sh
+M tools/mkgriddata/mkgriddata.F90
+M bld/configure
+M bld/Makefile.in
+A + bld/run-frost.csh
+M src/biogeochem/CASAMod.F90
+M src/biogeochem/STATICEcosysDynMod.F90
+M src/biogeochem/DGVMMod.F90
+M src/main/spmdGathScatMod.F90
+M src/main/abortutils.F90
+M src/main/clm_comp.F90
+M src/main/driver.F90
+M src/main/ncdio.F90
+M src/main/atmdrvMod.F90
+M src/main/subgridMod.F90
+M src/main/initializeMod.F90
+M src/main/pftdynMod.F90
+M src/main/iniTimeConst.F90
+M src/main/histFileMod.F90
+M src/main/program_csm.F90
+M src/main/clm_atmlnd.F90
+M src/main/clm_varsur.F90
+M src/main/clm_csmMod.F90
+M src/main/restFileMod.F90
+M src/main/controlMod.F90
+M src/main/clm_varctl.F90
+M src/main/ndepFileMod.F90
+M src/main/initGridCellsMod.F90
+M src/main/lnd_comp_mct.F90
+M src/main/program_off.F90
+M src/main/surfrdMod.F90
+M src/main/domainMod.F90
+M src/main/decompMod.F90
+M src/main/areaMod.F90
+M src/main/clm_mct_mod.F90
+M src/SVN_EXTERNAL_DIRECTORIES
+M src/riverroute/RtmMod.F90
+
+Summary of testing:
+
+ bluevista: all pass except
+ 004 bl111 TBL.sh 4p_vodsr_dh t31 48 ...............................FAIL! rc= 7
+ 009 bl121 TBL.sh 17p_vodsr_dh t31 48 ..............................FAIL! rc= 7
+ bangkok/lf95: all pass except
+ 004 bl112 TBL.sh 4p_vodsr_dm t31 48 ...............................FAIL! rc= 7
+ 009 bl122 TBL.sh 17p_vodsr_dm t31 48 ..............................FAIL! rc= 7
+ Due to roundoff change in rtm, only rtm fields affected, otherwise bfb
+
+ Also tested version in CCSM vs ccsm3_1_beta45
+ ERS.f45_g35.B.bluevista16
+ ERS.f45_g35.B2.lightning
+ Both PASS and bfb versus beta45 except for rtm roundoff difference and
+ associated error growth through ocean coupling
+
+ Also tested mods merged to clm3_expa_91 with cam3_4_00, all
+ CAM tests pass on bangkok, bluevista, and tempest including scam.
+ Tested on bangkok with cam3_4_01 and updated to clm3_expa_92, all
+ CAM tests pass on bangkok. bluevista and tempest not tested
+ due to time constraints and earlier adequate testing with
+ clm3_expa_91 and cam3_4_00.
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_89
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: anything with RTM on
+ - what platforms/compilers: all
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ roundoff change in RTM due to roundoff change in cell area calculation
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? tested in multiple systems, review growth of diffs in
+ stand-alone clm, only rtm fields affected, diffs remain roundoff for
+ 48 timesteps, no coupling to other fields or error growth in system.
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+
+
+===============================================================
+Tag name: clm3_expa_92
+Originator(s): erik,mvertens,mvr
+Date: Mon Feb 26 15:59:16 MST 2007
+One-line Summary: When running with Sequential CCSM -- use date for albedo calculation
+
+Purpose of changes: To work with cam3_4_01
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: erik,mvr,mvertens
+
+List any svn externals directories updated (csm_share, mct, etc.): none
+
+List all subroutines eliminated: none
+
+List all subroutines added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M src/main/clm_comp.F90
+M src/main/lnd_comp_mct.F90
+
+Have sequential CCSM give CLM the date of the next radiation calculation so that
+it can calculate albedo's for that specific time-step. This is needed to work with
+cam3_4_01.
+
+Summary of testing:
+
+ bluevista: Pass
+ bangkok/lf95: Pass
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_91
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_91
+Originator(s): erik
+Date: Wed Feb 21 13:19:51 MST 2007
+One-line Summary: Fix SCAM mode, add more machines for test_driver, have tools use csm_share,
+ make clmtype private (except for data exporting), fix several bugs
+
+Purpose of changes: Fix SCAM mode so can make a new CAM tag.
+
+Bugs fixed (include bugzilla ID): 252, 310, 370, 377, 385 (partial -- 302, 357, 389)
+
+Describe any changes made to build system: Remove -DNO_R16 from Makefile
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: MSS writes are now synchronous instead of
+ asynchronous
+
+Code reviewed by: slevis
+
+List any externals updated: Update to csm_share3_070220
+
+List all subroutines eliminated:
+
+Remove makdep -- as MkDepends replaces it
+Remove cprlndnc -- as newcprnc replaces it
+
+D tools/makdep
+D tools/makdep/main.c
+D tools/makdep/Makefile
+D tools/makdep/README
+D tools/cprlndnc
+D tools/cprlndnc/cprtps.F
+D tools/cprlndnc/lenchr.F
+D tools/cprlndnc/precision.F
+D tools/cprlndnc/printstats.F
+D tools/cprlndnc/wrap_nf.F
+D tools/cprlndnc/stats.F
+D tools/cprlndnc/ismax.F
+D tools/cprlndnc/initstats.F
+D tools/cprlndnc/nldat.F
+D tools/cprlndnc/cpr.F
+D tools/cprlndnc/prhddiff.F
+D tools/cprlndnc/header.F
+D tools/cprlndnc/Makefile
+D doc/BranchLog
+D doc/ChangeSum
+
+Delete files that tools use that are copied from main src directories (so we don't have
+to maintain separate copies of code)
+
+D Deleting tools/mkgriddata/fileutils.F90
+D Deleting tools/mkgriddata/nanMod.F90
+D Deleting tools/mkgriddata/shr_const_mod.F90
+D Deleting tools/mkgriddata/shr_kind_mod.F90
+D Deleting tools/mkgriddata/shr_sys_mod.F90
+D Deleting tools/mksurfdata/fileutils.F90
+D Deleting tools/mksurfdata/nanMod.F90
+D Deleting tools/mksurfdata/shr_const_mod.F90
+D Deleting tools/mksurfdata/shr_kind_mod.F90
+D Deleting tools/mksurfdata/shr_sys_mod.F90
+D Deleting tools/mksurfdata/shr_timer_mod.F90
+
+List all subroutines added and what they do:
+
+A test/system/TSMtools.sh -- for testing of the tools (not tested yet)
+A test/system/TCBtools.sh -- for build testing of the tools (not tested yet)
+A test/system/tests_pretag_blueice -- for running on blueice (does work)
+A test/system/tests_pretag_jaguar -- for running on jaguar (doesn't work yet)
+A test/system/tests_posttag_lightning -- for running on lightning (doesn't work yet)
+
+Files added so that tools build uses copies of files in main directories rather than separate copies
+
+A tools/mkgriddata/Filepath
+A tools/mkgriddata/Srcfiles
+A tools/mkgriddata/misc.h
+A tools/mkgriddata/preproc.h
+A tools/mksurfdata/Filepath
+A tools/mksurfdata/Srcfiles
+A tools/mksurfdata/misc.h
+A tools/mksurfdata/preproc.h
+
+List all existing files that have been modified, and describe the changes:
+
+Add check for soil energy balance:
+
+M src/biogeophys/BalanceCheckMod.F90
+
+Bigint bug fix (don't copy over static fields with bigint values when copying a domain)
+
+M src/main/domainMod.F90
+
+SCAM fixes (read datasets differently for SCAM)
+
+M src/main/surfrdMod.F90
+M src/main/ndepFileMod.F90
+M src/main/iniTimeConst.F90
+
+Change so that tools use main copies of code rather than own particular copy:
+
+M tools/mkgriddata/Makefile
+M tools/mkgriddata/mkgriddata.namelist
+M tools/mksurfdata/Makefile
+M tools/mksurfdata/domainMod.F90
+
+Make MSS write's synchronous instead of asynchronous: Required for LSF queing systems
+
+M src/main/fileutils.F90
+
+Landmask bug fix: (landmask now output globally with no missing or fill values)
+
+M src/main/histFileMod.F90
+M src/main/initializeMod.F90
+M src/main/ncdio.F90
+
+Timers
+
+M src/main/program_csm.F90
+M src/main/driver.F90
+
+Make clmtype private -- so only exports it's data not data it uses.
+
+M src/biogeochem/CNGapMortalityMod.F90
+M src/biogeochem/VOCEmissionMod.F90
+M src/biogeochem/CNrestMod.F90
+M src/biogeochem/CNC13FluxMod.F90
+M src/biogeochem/CNSetValueMod.F90
+M src/main/atmdrvMod.F90
+M src/main/clmtypeInitMod.F90
+M src/main/pftdynMod.F90
+M src/main/restFileMod.F90
+M src/main/clmtype.F90
+M src/biogeophys/SnowHydrologyMod.F90
+M src/biogeophys/SurfaceAlbedoMod.F90
+M src/biogeophys/BiogeophysRestMod.F90
+M src/biogeophys/DriverInitMod.F90
+
+Miscellaneous:
+
+M bld/Makefile.in --- Remove NO_R16 CPP token, some changes to start work on jaguar
+M test/system/test_driver.sh -- add more machines
+M test/system/CLM_runcmnd.sh -- add more machines
+
+Summary of testing:
+
+ bluevista: All PASS -- except TBL tests because of csm_share shr_const_mod TKFRZ change
+ bangkok/lf95: All PASS -- except TBL tests because of csm_share shr_const_mod TKFRZ change
+ blueice: All PASS -- except TBL tests because of csm_share shr_const_mod TKFRZ change
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_90
+
+ Summarize any changes to answers: larger than roundoff (all config/all machines)
+
+ (No simulations were performed as CCSM scientists deemed the change to be
+ insignificant)
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_90
+Originator(s): nanr
+Date: Tue Feb 6 13:17:55 MST 2007
+One-line Summary: Changed creategridMod.F90 to read variables from 10min USGS file.
+
+Purpose of changes:
+Added htopo and landfract to retrieve landfrac and topography for processing USGS-gtopo30_10min_c050419.nc
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: nanr
+
+List all subroutines eliminated: none
+
+List all subroutines added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M creategridMod.F90
+
+ Added lines to creategridMod.F90 to read variables from USGS-gtopo30-10min_c050419.nc
+
+ ier = nf_inq_varid (ncid, 'landfract', varid)
+ if (ier == NF_NOERR) then
+ if (landfracset) write(6,*) trim(subname),' WARNING, overwriting frac'
+ landfracset = .true.
+ write(6,*) trim(subname),' read landfract'
+ call check_ret(nf_inq_varid (ncid, 'landfract', varid), subname)
+ call check_ret(nf_get_var_double (ncid, varid, domain%frac), subname)
+ endif
+
+ ier = nf_inq_varid (ncid, 'htopo', varid)
+ if (ier == NF_NOERR) then
+ if (toposet) write(6,*) trim(subname),' WARNING, overwriting topo'
+ toposet = .true.
+ write(6,*) trim(subname),' read htopo'
+ call check_ret(nf_inq_varid (ncid, 'htopo', varid), subname)
+ call check_ret(nf_get_var_double (ncid, varid, domain%topo), subname)
+ endif
+
+
+Summary of testing: none. Affects tools/mkgriddata only.
+
+ bluesky:
+ tempest:
+ bangkok/lf95:
+
+CLM tag used for the baseline comparison tests if applicable:
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_89
+Originator(s): erik,oleson
+Date: Feb/02/2007
+One-line Summary:
+
+Purpose of changes: Use new water table rise calculation in SoilHydrology
+
+Bugs fixed (include bugzilla ID): 345, 353
+
+Describe any changes made to build system: None (although added Darwin to mksrfdat build Makefile)
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: Keith Oleson
+
+List all subroutines eliminated: None
+
+Remove bld/offline directory tree
+
+List all subroutines added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+M bld/Makefile.in ---- Add -DFORTRANUNDERSCORE so can compile mpi-serial on IRIX
+
+Change run scripts so that spmd and smp settings work for both on AND off
+
+M bld/run-pc.csh ----------------------- Also add LD_LIBRARY_PATH setting
+M bld/run-sgi.csh
+M bld/run-lightning.csh
+
+M src/main/spmdMod.F90 ------------------ Remove #ifdef around #include so will
+ run serial (this is a partial fix to bug 337. The longer term fix is to remove all
+ #ifdef SPMD as we can use the mpi-serial code to make the serial and SPMD code the same.
+
+M src/biogeophys/SoilHydrologyMod.F90 --- New drainage formulation from Keith Oleson
+
+Summary of testing:
+
+ bluesky: -- All but comparision to previous version
+ tempest: -- All but comparison to previous version and the following restart tests
+ (These tests fail on previous versions as well -- documented as bug 361)
+002 er111 TER.sh 4p_vodsr_dh t31 10+38 ............................FAIL! rc= 11
+003 br111 TBR.sh 4p_vodsr_dh t31 24+24 ............................FAIL! rc= 11
+005 sm116 TSM.sh 4p_vodsr_o t31 48 ................................FAIL! rc= 4
+007 er121 TER.sh 17p_vodsr_dh t31 10+38 ...........................FAIL! rc= 11
+008 br121 TBR.sh 17p_vodsr_dh t31 24+24 ...........................FAIL! rc= 11
+012 er211 TER.sh 17p_cnn_dh t31_cnall 10+38 .......................FAIL! rc= 11
+013 br211 TBR.sh 17p_cnn_dh t31_cnall 24+24 .......................FAIL! rc= 11
+016 er311 TER.sh 4p_casa_dh t31_casa 10+38 ........................FAIL! rc= 11
+017 br311 TBR.sh 4p_casa_dh t31_casa 24+24 ........................FAIL! rc= 11
+020 er411 TER.sh 10p_dgvm_dh t31_dgvm 10+38 .......................FAIL! rc= 11
+021 br411 TBR.sh 10p_dgvm_dh t31_dgvm 24+24 .......................FAIL! rc= 11
+ (We are going to remove tempest as a standard test for CLM)
+
+ bangkok/lf95: -- All but comparision to previous version
+
+CLM tag used for the baseline comparison tests if applicable: none
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: All
+ - what platforms/compilers: All
+ - nature of change (similar climate)
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers: Cray-XT3/jaguar
+ - configuration (CPP ifdefs): default
+ - build-namelist command (or complete namelist):
+ - MSS location of output: /CCSM/csm/b31.020ws/lnd/hist
+
+ URL for LMWG diagnostics output used to validate new climate: Not yet prepared
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_88
+Originator(s): nanr
+Date: Thu Jan 11 12:31:51 MST 2007
+One-line Summary: Minor formatting change in tools.
+ Correctons to ChangLog
+
+
+Purpose of changes:
+1. Update formatted write in tools/ mkgriddata.F90 and tools/mksrfdat.F90
+ to accomodate 4 digit lat/lons.
+2. Add note to ChangeLog to explain commit by nanr (10/27) that was not tagged.
+3. correct Changelog for tag clm3_expa_80. The changes listed below never happened.
+ surfFileMod.F90 was actually removed from the trunk in a previous tag (clm3_expa_66)
+ and renamed surfrdMod.F90. So this modification probably reflects the status of the branch
+ Keith Oleson was working on.
+ M src/main/surfFileMod.F90
+
+ Removed statements contained within CN ifdef (OK'd by P. Thornton) that:
+
+ ! the following test prevents the assignment of temperate deciduous
+ ! vegetation types in the tropics
+ ! 1. broadleaf deciduous temperate tree -> broadleaf deciduous tropical tree
+ ! 2. broadleaf deciduous temperate shrub -> broadleaf deciduous tropical tree
+ ! this reassignment from shrub to tree is necessary because there is currently no
+ ! tropical deciduous broadleaf shrub type defined.
+
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: nanr
+
+List all subroutines eliminated: none
+
+List all subroutines added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M tools/mksurfdata/mksrfdat.F90
+M tools/mkgriddata/mkgriddata.F90
+
+ Changed formatting strings to accomodate 4 char lat/lons.
+ OLD: write (resol,'(i3.3,"x",i3.3)') lsmlat,lsmlon
+ NEW: write (resol,'(i4.4,"x",i4.4)') lsmlat,lsmlon
+
+Summary of testing: none. Changes only to tools and ChangeLog
+
+ bluesky:
+ tempest:
+ bangkok/lf95:
+
+CLM tag used for the baseline comparison tests if applicable: none
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in: none
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate: NA
+
+ URL for LMWG diagnostics output used to validate new climate: NA
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_87
+Originator(s): tcraig, jet
+Date: Wed Dec 27 05:03:01 GMT 2006
+One-line Summary: merge fme branch, merge refactor_scam branch
+
+Purpose of changes: bug fixes, reduce memory usage, improve memory scaling,
+ add mct package, update scam
+
+Bugs fixed (include bugzilla ID):
+ #133 adomain,ldomain compare
+ #290 time bounds problem in history file
+ #291 fix rtm history bug on bangkok
+ #301 modify decomp info in i/o
+ #321 merge refactor_scam branch
+
+Describe any changes made to build system: none
+
+ consisting of src, dst, S (COL, ROW, S). update the internal clm
+ atm/lnd mappings to use new datatype. lnd/rtm and driver/atm
+ mapping still using gridmap_type. this will be updated in future
+ versions.
+- convert domain from 2d global to 1d global arrays. add glo
+ decomp which is global 1d indexing like ij to 1d or gsn
+ uncompressed.
+- convert wtxy, vegxy, pctspec from 2d to 1d arrays. migrate many other arrays
+ from 2d global (i,j) to 1d global. this is for nesting and to eventually
+ cut down on number of index mappings in decomp_type
+- reorganize order of initialization calls to start thinking about nesting
+- add gatm array to domain datatype
+- clean up dead code.
+- rename initSubgridMod to subgridMod
+- move map_indexes to subgridMod, rename get_subgrid_indexees
+- rearrange a few subroutines to improve filename hierarchy and use logic
+- delete get_sn routines, no longer needed
+- update indexing in clm_atmlnd, remove hardwire indexes
+- rewrite i/o as needed for 1d global arrays, not 2d
+- delete gatherWeightsDGVM from DGVMMod.F90, no longer needed
+- rename surfFileMod to surfrdMod.F90
+- reduce size of subgrid_type and redefine gcelldc and gcellsn
+
+
+Summary of testing:
+
+ bluesky: clm test passes, cam pretag passes except ccsm
+ tempest: cam pretag passes
+ bangkok/lf95: cam pretag passes except bl153, bl353, bl553 due to code
+ changes and resulting binary produced by compiler optimizations.
+ also scam fails.
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_65, cam3_3_16
+
+ Summarize any changes to answers: NONE bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_65
+Originator(s): Erik Kluzek
+Date: Mon Jul 10 13:52:20 MDT 2006
+One-line Summary: Use share clocks and inputinfo object at driver level
+
+Purpose of changes: Use new version of esmf_wrf and csm_share as next step
+ in sequential CCSM development.
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: Add ability to use Darwin, add eshr to
+Filepath
+
+Describe any changes made to the namelist: Instead of directing namelist from stdin
+ explicitly open namelist filename. Change namelist name from clmexp to clm_inparm.
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: Mariana Vertenstein, Tony Craig, Forrest Hoffman
+
+Externals changed: csm_share to share3_060710
+ esmf_wrf_timemgr to esmf_wrf_timemgr_060616
+
+List all subroutines eliminated: None
+
+List all subroutines added and what they do: control_setNL (controlMod.F90) sets the
+ namelist filename.
+
+List all existing files that have been modified, and describe the changes:
+
+tools/newcprnc/Makefile
+bld/offline/tests/CLM_namelist.pm
+bld/offline/tests/CLM_lab.pm
+bld/offline/tests/CLM.pm
+bld/offline/tests/model_specs.csh
+bld/offline/tests/configure.csh
+bld/offline/tests/config_machine_specs.csh
+bld/offline/tests/CLM_run.pm
+bld/offline/tests/test_batch.csh
+bld/offline/tests/Makefile
+bld/offline/jobscript.csh
+
+ Add eshr to Filepath, add Darwin as a valid platform, don't redirect unit 5 for
+namelist. Set MODEL_DATDIR explicitly. Use lnd.stdin as default namelist name.
+Add "-g" to Makefile. Change clm namelist from clmexp to clm_inparm. Get test_batch.csh
+to work both on bangkok for Linux/Lahey and tempest for SGI. Write out Rootdir file
+when configuring build directory.
+
+src/main/time_manager.F90 -- Use dayOfYear_r8 for calc_calday.
+src/main/clm_comp.F90 ------ Pass CCSMInit in.
+src/main/fileutils.F90 ----- Small changes to how using shr_file_mod.
+src/main/initializeMod.F90 - Pass clock in.
+src/main/program_csm.F90 --- Change where ESMF_Initialize is done.
+src/main/controlMod.F90 ---- Add method to set namelist name, pass clock in and use it.
+src/main/clm_varctl.F90 ---- Get rid of cam_ variables.
+src/main/lnd_comp_mct.F90 -- Pass in clock and CCSMInit object.
+
+Summary of testing:
+
+ bluesky: test-batch.csh -- PASS and CAM and CAM CCSM tests pass.
+ tempest: test-batch.csh -- PASS and CAM tests pass.
+ bangkok/lf95: test-batch.csh -- PASS and CAM tests pass.
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_64
+
+ Summarize any changes to answers: NONE bit-for-bit
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_64
+Originator(s): Dani Bundy Coleman
+Date: Thu Jun 29 14:44:07 MDT 2006
+One-line Summary: dust modifications from Natalie Mahowald
+
+Purpose of changes: update dust code
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Mariana Vertenstein
+
+List all subroutines eliminated: none
+
+List all subroutines added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+
+M src/biogeochem/DUSTMod.F90
+ OLD dmt_vma = 2.524e-6_r8 ! [m] Mass median diameter analytic She84 p.75 Table1
+ NEW dmt_vma = 3.500e-6_r8 ! [m] Mass median diameter analytic
+
+M src/main/clm_atmlnd.F90
+ add land-to-atmosphere communication of fv,ram1 & dust fluxes
+ (only active if defined DUST or PROGSEASALT )
+M src/main/lnd_comp_mct.F90
+ add land-to-atmosphere communication of fv,ram1 & dust fluxes
+ (only active if defined DUST or PROGSEASALT )
+
+Summary of testing:
+
+ bluesky: tested with cam, bfb when DUST and PROGSEASALT not defined
+ tempest:
+ bangkok/lf95: tested with cam, bfb when DUST and PROGSEASALT not defined
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_63
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_63
+Originator(s): Mariana Vertenstein
+Date: Fri May 12 16:08:03 MDT 2006
+One-line Summary: introduced mct domains in COUP_CAM mode
+
+Purpose of changes: To introduce generalized mct domains
+in COUP_CAM mode for the purposes of generating a sequential
+ccsm
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Tony Craig
+
+List all subroutines eliminated: none
+
+List all subroutines added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+ lnd_comp_mct.f90
+ removed lnd_CheckGrid_mct routine and replaced it with lnd_domain_mct
+ each processor sends it local domain information stored in an MCT
+ GeneralGrid data structure back to the top level application driver.
+ A global gather is done for the GeneralGrid and domain comparison is
+ performed on the master processor.
+
+Summary of testing:
+
+ bluesky: only cam test suite was run successfully
+ tempest: only cam test suite was run successfully
+ bangkok/lf95: only cam test suite was run successfully
+
+CLM tag used for the baseline comparison tests if applicable:
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_62
+Originator(s): erik, tcraig
+Date: Wed May 10 00:06:39 MDT 2006
+One-line Summary: merge shrgetput08_clm3_expa_61, fix finemesh bugs
+
+Purpose of changes: changes required for sequential ccsm. validate
+ finemesh is running properly.
+
+Bugs fixed (include bugzilla ID):
+ a couple finemesh bugs, not documented.
+
+Describe any changes made to build system: modified makefile slightly
+ to set HIDE_MPI when SPMD is FALSE, remove HIDE_SHR_MSG
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: erik, tcraig
+
+List all subroutines eliminated: none
+
+List all subroutines added and what they do: none
+
+List all existing files that have been modified, and describe the changes:
+Makefile:
+ - remove HIDE_SHR_MSG, not needed anymore
+ - add HIDE_MPI if SPMD is FALSE
+SVN_EXTERNALS:
+ - change csm_share version from share3_051205 to share3_060428
+ - change emsf_wrf_timemgr version from esmf_wrf_timemgr_051212 to esmf_wrf_timemgr_060501
+fileutils.F90:
+ - uses shr_file_mod.F90 routines
+ - use shr_file_mod syntax for archive_dir (using mss: prefix)
+initializeMod.F90:
+ - add pnamer_bin get
+program_csm.F90:
+ - add ESMF_Initialize call
+clm_atmlnd.F90:
+ - fix bug in call to grid_maparray for finemesh mapping, only affects finemesh runs.
+controlMod.F90:
+ - use shr_file_mod syntax for archive_dir (using mss: prefix)
+lnd_comp_mct.F90:
+ - change call to get_proc_bounds to get_proc_bounds_atm (bug for finemesh runs).
+program_off.F90:
+ - add calls to ESMF_Initialize and ESMF_Finalize
+
+
+Summary of testing:
+ bluesky: cam full suite bfb
+ clm full suite not bfb (TS is bfb for 2 days, history file not bfb
+ after ~1.5 days probably due to new esmf time manager, likely roundoff)
+ tempest: cam full suite bfb, ccsm build test
+ bangkok/lf95: cam full suite bfb
+ bluevista : ccsm TER.01a.1.9x2.5_gx1v3.B.bluevista bfb
+ cam finemesh T42half.clim0 test, bfb for 42 timesteps vs cam3_2_49
+ lightning : ccsm TER.01a.4x5_gx3v5.B.bluevista bfb
+
+CLM tag used for the baseline comparison tests if applicable:
+ clm3_expa_61, cam3_3_4, ccsm3_1_beta27
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: clm standalone only
+ - what platforms/compilers: only bluesky tested
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+ assume roundoff. it's bfb for at least a day. in cam and ccsm mode they
+ are bfb.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? guess
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+
+===============================================================
+Tag name: clm3_expa_61
+Originator(s): T Craig
+Date: Thu Apr 27 01:10:40 MDT 2006
+One-line Summary: merge cammct05_clm3_expa_58 onto main trunk,
+ modify surface dataset input
+
+Purpose of changes: merge branch
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the boundary datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: myself, developed my MV
+
+List all subroutines eliminated:
+ clm_camMod.F90 - interface to cam
+ MCT_atmlnd_cpl.F90 - mct migrated to sequential driver
+ MCT_lnd_comp.F90 - mct migrated to sequential driver
+
+List all subroutines added and what they do:
+ lnd_comp_mct.F90 - interface to sequential driver using mct coupling
+
+List all existing files that have been modified, and describe the changes:
+ clm_comp.F90 - separate init method into init1, init2
+ initializeMod.F90 - separate initialize into initialize1 and 2
+ program_csm.F90 - add call to clm_init0
+ clm_atmlnd.F90 - PWorley's changes to improve phoenix performance,
+ packed arrays in clm_mapa2l and clm_mapl2a interpolation.
+ program_off.F90 - add call to clm_init0
+ areaMod.F90 - PWorley's changes to improve phoenix performance,
+ interpolate packed arrays in gridmap_maparray
+ Hydrology2Mod.F90 - remove use of iam
+
+ mksurfdata, several files changed to convert
+ mksrf_fgrid_global/regional to mksrf_fgrid and mksrf_gridtype
+
+Summary of testing:
+
+ bluesky: clm full suite bfb, cam full suite bfb
+ bluevista: ccsm bfb TER.01a.T31_gx3v5.B.bluevista, TER.01a.4x5_gx3v5.B.bluevista
+ tempest: cam full suite bfb
+ bangkok/lf95: cam full suite bfb
+
+ mksrf tested on bluevista, 7 cases, bfb
+
+CLM tag used for the baseline comparison tests if applicable:
+ clm3_expa_60, cam3_3_2, ccsm3_1_beta25
+
+changes are bit-for-bit
+
+===============================================================
+
+
+===============================================================
+Tag name: clm3_expa_60
+Originator(s): Forrest Hoffman
+Date: Fri Apr 14 11:03:34 EDT 2006
+One-line Summary: Rearranged physiology fields, changed CO2 constants, fixed Bug #43
+
+Purpose of changes: Make radiation-related physiology fields standard, prepare code for C-LAMP experiments, and make test-model run on Cray X1E (phoenix)
+
+Bugs fixed (include bugzilla ID): Bug #43
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: Myself
+
+List all subroutines eliminated: None
+
+List all subroutines added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+nfwrappers.f90:
+Changed intent for ncid from intent(in) to intent(out) since it must be
+returned to the calling routines. This fixed Bug #43 which was discovered
+on the Cray X1E (phoenix).
+
+ChangeLog:
+Added this log entry.
+
+clm_varcon.F90:
+Changed co2_ppmv_const to 283.1878_r8 for both CASA' and CN in preparation
+for Experiment 1 of the C-LAMP.
+
+histFldsMod.F90
+Moved LAISUN, LAISHA, TLAI, TSAI, SLASUN, and SLASHA out of the CN-only
+section of the code so that they appear on the regular CLM output files
+since the two-leaf radiation code is now standard. In addition, TLAI and
+TSAI were removed from the DGVM-only section of the code since these
+output fields are now standard.
+
+Summary of testing:
+
+ cheetah:
+Ran test-model for T31, T31cn, T31cnall, T31casa, and T31dgvm with
+baseline clm3_expa_59. T31 and T31dgvm passed all tests. The others
+passed tests 01-05, but not the 06_control test because of the change
+in co2_ppmv_const.
+
+ phoenix:
+Ran test-model for T31, T31cn, T31cnall, T31casa, and T31dgvm with
+baseline clm3_expa_59. T31 and T31dgvm passed all tests. The others
+passed tests 01-05, but not the 06_control test because of the change
+in co2_ppmv_const.
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_59
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: co2_ppmv_const modification changes answers
+ - what platforms/compilers: IBM (cheetah) and Cray X1E (phoenix)
+ - nature of change (roundoff; larger than roundoff/same climate; new climate): larger than roundoff because of changes in carbon pools
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? cprnc
+
+ * There is no validated climate in these model configurations. *
+
+===============================================================
+===============================================================
+Tag name: clm3_expa_59
+Originator(s): Tony Craig
+Date: Wed Apr 5 18:03:23 MDT 2006
+One-line Summary: add fatmlndfrc capability
+
+Purpose of changes: Support new datasets, other minor improvements,
+ update mkgrid and mksurf tools for new dataset generation.
+
+Bugs fixed (include bugzilla ID):
+
+Describe any changes made to build system: jobscript.csh modified for
+ new surface datasets, change baseline resolution to T31.
+
+Describe any changes made to the namelist: added optional fatmlndfrc
+ namelist input for landfrac file on atm grid.
+
+List any changes to the defaults for the boundary datasets: all new
+ grid, frac, and surf datasets generated, located in
+ /fs/cgd/csm/inputdata/lnd/clm2/[griddata,surfdata]
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: myself
+
+List all subroutines eliminated: None
+
+List all subroutines added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+tools/mksurfdata/mkrfdat.F90 - change default output filenames
+tools/mksurfdata/mklaiMod.F90 - removed masking since there is no mask anymore
+tools/mksurfdata/mkfileMod.F90 - removed read of LANDMASK and LANDFRAC
+tools/mksurfdata/creategridMod.F90 - removed write of LANDMASK and LANDFRAC
+tools/mksurfdata/Makefile - fix clean bug
+tools/mkgriddata/mkgriddata.F90 - add generation of ffracdat file
+ force area calculation for ccsm domain files due to noise in scrip areas
+ change default output filenames
+tools/mkgriddata/mkvarctl.F90 - add support for area recomputation
+tools/mkgriddata/mkfileMod.F90 - removed, merged into creategridMod.F90
+tools/mkgriddata/areaMod.F90 - add flush(6)
+tools/mkgriddata/creategridMod.F90 - add mkfile subroutine
+ fix bug in setting of corner points
+ handle wrap-around points better with corner points
+ add ability to adjust units of area (not automatic)
+ add checks for area
+ add ability write eigher grid or frac file in write_domain
+bld/offline/tests/test_batch.csh - change default version from 53 to 58
+ turn on dgvm testing by default
+bld/offline/jobscript.csh - change to share queue on bluesky
+ run mixed mpi/openmp by default, 2x2
+ change default resolution to T31 (was T42)
+ update to use new surface datasets
+ change default, turn on DUST, RTM, VOC, turn off CN, SUPLN, SUNSHA, STOMATA2
+ add unlimit unlimited for AIX
+ fix redirection to compile_log.clm output file
+src/main/initializeMod.F90 - add fatmlndfrc stuff
+ add computation of ldomain%frac and ldomain%mask
+src/main/controlMod.F90 - add fatmlndfrc stuff
+src/main/clm_varctl.F90 - add fatmlndfrc stuff
+src/main/driver.F90 - remove redundant definition of caldayp1
+src/main/clmtypeInitMod.F90 - remove landfrac variable for clm3 gridcell_type
+src/main/histFileMod.F90 - add indxupsc, jndxupsc indices for upscaling
+src/main/program_csm.F90 - move shr_msg_stdio to after MPI_INIT, change
+ call so it only redirects log file for masterproc. this will clean
+ up the log file significantly but may lead to error messages ending
+ up in stdout.
+src/main/surfFileMod.F90 - add fatmlndfrc stuff
+src/main/initGridCellsMod.F90 - remove landfrac variable for clm3 gridcell_type
+src/main/domainMod.F90 - reorder domain data slightly (nothing changed)
+src/main/areaMod.F90 - change default of i_ovr and j_ovr from bigint to -1,
+ allows for cleaner writing of indxupsc and jndxupsc in history file.
+ change gridmap_setmapsFM to use _a and _l notation instead of _i and _o
+src/main/clmtype.F90 - remove landfrac from clm3 gridcell_type
+
+
+Summary of testing:
+
+ Baseline versions, clm3_expa_58, cam3_2_56, ccsm3_1_beta24
+
+ bluesky: clm full suite passes, cam full suite passes
+ tempest: cam full suite passes
+ bangkok/lf95: cam full suite passes
+ bluevista: ccsm TER.01a B passes for several resolutions, new datasets
+ in scripts
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_58
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Code changes are bfb, but answers may change when using new surface datasets
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: CCSM answers will change with new datasets as
+ default datasets are changing. clm default test is bfb at T31 with new datasets,
+ cam will be bfb as current default datasets are not being updated.
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+
+
+===============================================================
+Tag name: clm3_expa_58
+Originator(s): Forrest Hoffman
+Date: Thu Mar 9 17:04:27 EST 2006
+One-line Summary: Updates for the Cray X1E and a forcing height error check.
+
+Purpose of changes: Improvements on the Cray X1E and avoiding arithmetic exceptions when the forcing height is below the canopy height.
+
+Bugs fixed (include bugzilla ID): Bug #36
+
+Describe any changes made to build system: jobscript.csh modified for Cray X1E
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: None
+
+Code reviewed by: myself and (for forcing height check code) Mariana Vertenstein
+
+List all subroutines eliminated: None
+
+List all subroutines added and what they do: None
+
+List all existing files that have been modified, and describe the changes:
+
+ jobscript.csh - Modified for Cray X1E and cross-compiler
+ inicFileMod.F90 - Added compiler directives for vectorization
+ driver.F90 - Commented out CSDs to avoid model hangs caused by write
+ statements in science routines within the associated loops
+ controlMod.F90 - Changed default clump_pproc to 1 for the Cray (since CSDs
+ are not used in driver.F90
+ histFldsMod.F90 - Changed type2d='levlak' for the TLAKE field
+ CanopyFluxesMod.F90 - Inserted code to check if the forcing height
+ is below the canopy height for any pft. Model will now abort when this
+ occurs instead of taking the log() of a negative number. See Bug #36
+
+Summary of testing:
+
+ bluesky: test-model ran as follows
+01_debug_run_SPMD: T31 ran
+02_debug_run_nonSPMD: T31 ran
+03_start: T31 ran
+04_restart: T31 ran
+05_norestart_compare_to_restart: T31 ran
+06_control: T31 ran
+01_debug_run_SPMD: T31cn ran
+02_debug_run_nonSPMD: T31cn ran
+03_start: T31cn ran
+04_restart: T31cn ran
+05_norestart_compare_to_restart: T31cn ran
+06_control: T31cn ran
+01_debug_run_SPMD: T31cnall ran
+02_debug_run_nonSPMD: T31cnall ran
+03_start: T31cnall ran
+04_restart: T31cnall ran
+05_norestart_compare_to_restart: T31cnall ran
+06_control: T31cnall ran
+01_debug_run_SPMD: T31casa ran
+02_debug_run_nonSPMD: T31casa ran
+03_start: T31casa ran
+04_restart: T31casa ran
+05_norestart_compare_to_restart: T31casa ran
+06_control: T31casa ran
+ cheetah:
+01_debug_run_SPMD: T31cnall ran
+02_debug_run_nonSPMD: T31cnall ran
+03_start: T31cnall ran
+04_restart: T31cnall ran
+05_norestart_compare_to_restart: T31cnall ran
+06_control: T31cnall ran
+01_debug_run_SPMD: T31 ran
+02_debug_run_nonSPMD: T31 ran
+03_start: T31 ran
+04_restart: T31 ran
+05_norestart_compare_to_restart: T31 ran
+06_control: T31 ran
+01_debug_run_SPMD: T31cn ran
+02_debug_run_nonSPMD: T31cn ran
+03_start: T31cn ran
+04_restart: T31cn ran
+05_norestart_compare_to_restart: T31cn ran
+06_control: T31cn ran
+01_debug_run_SPMD: T31casa ran
+02_debug_run_nonSPMD: T31casa ran
+03_start: T31casa ran
+04_restart: T31casa ran
+05_norestart_compare_to_restart: T31casa ran
+06_control: T31casa ran
+ tempest:
+ bangkok/lf95:
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_57
+
+===============================================================
+Tag name: clm3_expa_57
+Originator(s): Peter Thornton
+Date: 31 Jan 2006
+One-line Summary: Mods to allow switching between 3 and 4 soil
+ organic matter pools
+
+Purpose of changes: New science.
+
+Bugs fixed (include bugzilla ID): bugs in pftdynMod.F90 and ndepfileMod.F90
+
+Describe any changes made to build system:
+
+Describe any changes made to the namelist: Added SOM4 as new CPP directive
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: myself
+
+List all subroutines eliminated:
+
+List all subroutines added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+main/clmtype.F90: added soil4c (and 13C equiv.), soil4n states and associated
+ flux variables. Also added long name commenting for many previously defined
+ variables.
+main/clmtypeInitMod.F90: added initialization for new variables
+main/CNiniTimeVar.F90: initialize new state variables.
+main/histFldsMod.F90: added new variables, and also added long names for
+ many previously defined variables.
+main/pftdynMod.F90: bug fix in mpi_bcast, change MPI_REAL8 to MPI_INTEGER.
+main/ndepFileMod.F90: bug fix for mpi_bcast, change MPI_REAL8 to MPI_INTEGER.
+biogeochem/CNSetValueMod.F90: add code for new state and flux variables.
+biogeochem/CNDecompMod.F90: add code to allow either 3 or 4 SOM pools. Default
+ behavior is 3 pools, 4-pool behavior triggered by SOM4 CPP directive.
+biogeochem/CNCStateUpdate1Mod.F90: handling for new variables.
+biogeochem/CNNStateUpdate1Mod.F90: handling for new variables.
+biogeochem/CNSummaryMod.F90: handling for new variables.
+biogeochem/CNBalanceCheckMod.F90: handling for new variables
+biogeochem/CNPrecisionControlMod.F90: handling for new variables
+biogeochem/CNC13FluxMod.F90: handling for isotope version of new variables
+biogeochem/C13StateUpdate1Mod.F90: handling for new variables
+biogeochem/C13SummaryMod.F90: handling for new variables
+biogeochem/CNrestMod.F90: handling for new variables, and modify EXIT_SPINUP
+ controls
+
+Summary of testing:
+
+ bluesky:
+ tempest:
+ bangkok/lf95:
+
+CLM tag used for the baseline comparison tests if applicable: in 3-pool mode
+(SOM4 not set), results are bfb with clm3_expa_55. in 4-pool mode, changes
+answers, as expected.
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+bfb under 3-pools is demonstrated at:
+http:/www.cgd.ucar.edu/tss/clm/diagnostics/clm3cn/c13/ccsm3_bgc31_I_5a-ccsm3_bgc31_I_2b/setsIndex.html
+
+===============================================================
+Tag name: clm3_expa_56
+Originator(s): Tony Craig
+Date: 31 Jan 2006
+One-line Summary: Final changes for finemesh implementation.
+
+Purpose of changes: Integrate final changes for finemesh implementation. These
+changes are 100% backward compatable and bfb, but also support use of a finemesh grid. finemesh implementation has been test in clm and cam.
+
+Bugs fixed (include bugzilla ID):
+
+Describe any changes made to build system: Update Makefile so Depends are regenerated if any code is changed.
+
+Describe any changes made to the namelist: Added one new optional namelist, fatmgrid. This is a dataset for the coarse grid in clm. The format is the same as the surface dataset but only needs to include grid variables.
+
+List any changes to the defaults for the boundary datasets: Generated some new datasets with filled wetland and higher resolution. Not required and not yet added to default suite of datasets.
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by:
+
+List all subroutines eliminated:
+
+List all subroutines added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+tools/mksurfdata:
+ mksrfdat.F90,areaMod.F90,creategridMod.F90
+bld/offline/Makefile
+doc/ChangeLog
+ src/main:
+clm_comp.F90,driver.F90,clm_camMod.F90,atmdrvMod.F90,clmtypeInitMod.F90,initializeMod.F90,histFileMod.F90,program_csm.F90,clm_atmlnd.F90,clm_csmMod.F90,surfFileMod.F90,controlMod.F90,clm_varctl.F90,initGridCellsMod.F90,MCT_lnd_comp.F90,program_off.F90,domainMod.F90,decompMod.F90,areaMod.F90,clmtype.F90
+src/biogeophys/SurfaceAlbedoMod.F90
+
+Code changes:
+add pftm to domain datatype and history file
+modify program_off and program_cs to use clm_init[1,2], clm_run[1,2]
+modify coupling to handle coarse <-> finemesh for standlaone, cam, and
+ ccsm.
+modify Makefile so depends file is reset whenever there is a code change
+add normalized area to history files
+add lat_a, lon_a, latdeg_a, londeg_a to clm3 datatype for atm lats/lons.
+ required in SurfaceAlbedo computation where the the zenith angle has
+ to be based on the atm (coarse) grid, not the fine clm grid.
+merge with clm3_expa_53_brnchT_cam01 tag
+
+Summary of testing:
+
+ bluesky: clm full suite bfb, cam full suite bfb, ccsm bfb
+ tempest: cam full suite bfb
+ bangkok/lf95: cam full suite bfb
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_55
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in: bfb
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? bfb
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+Tag name: clm3_expa_55
+Originator(s): Peter Thornton
+Date: 24 Jan 2006
+One-line Summary: Fixes for 13C isotope code, migration from cvs
+
+Purpose of changes:
+
+Bugs fixed (include bugzilla ID):
+
+Describe any changes made to build system:
+
+Describe any changes made to the namelist:
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by: myself
+
+List all subroutines eliminated:
+
+List all subroutines added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+Files modified:
+1. CNFireMod.F90
+Added initialization of variable mep.
+
+2. CNrestMod.F90
+Added EXIT_SPINUP controls on column-level 13C pools
+
+3. C13SummaryMod.F90
+Added current and excess maintenance respiration terms to summary MR variable.
+
+4. CNC13FluxMod.F90
+Added new routines to calculate 13C fluxes. Litter to column, non-mortality fluxes
+at the column level, pft-level gap mortality fluxes, pft and column level fire mortality fluxes,
+
+5. CNEcosystemDynMod.F90
+Added calls for C13Flux2, C13Flux3, C13StateUpdate2, and C13StateUpdate3.
+
+6. clm_varcon.F90
+Added parameters to define a fixed pre_industrial del13C (set to -6 permil)
+
+Summary of testing:
+
+ bluesky:
+ tempest:
+ bangkok/lf95:
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_40
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+Changes answers for CN only, and then only for the isotope prognostics.
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+http://www.cgd.ucar.edu/tss/clm/diagnostics/clm3cn/c13/ccsm3_bgc31_I_2a-ccsm3_bgc26_I_1d/setsIndex.html
+===============================================================
+Tag name: clm3_expa_54
+Originator(s): Tony Craig
+Date: 17 Jan 2006
+One-line Summary: Update infrastructure in support of finemesh, migration from cvs
+
+Purpose of changes: bfb infrastructure changes committed, partial step towards finemesh implementation
+
+Bugs fixed (include bugzilla ID): None
+
+Describe any changes made to build system: None
+
+Describe any changes made to the namelist: None
+
+List any changes to the defaults for the boundary datasets: None
+
+Describe any substantial timing or memory changes: Some memory added for extra coarse grid, atmosphere domain, as well as changes to interpolation datatypes and code. Redundant memory deleted from some datasets.
+
+Code reviewed by: Mariana Vertenstein
+
+List all subroutines eliminated:
+
+List all subroutines added and what they do:
+
+List all existing files that have been modified, and describe the changes:
+
+src/main:
+initGridIndexMod.F90,lnd2atmMod.F90,CNiniTimeVar.F90,abortutils.F90,clm_comp.F90,driver.F90,clm_camMod.F90,atmdrvMod.F90,subgridRestMod.F90,accFldsMod.F90,clmtypeInitMod.F90,initializeMod.F90,pftdynMod.F90,iniTimeConst.F90,histFileMod.F90,program_csm.F90,clm_atmlnd.F90,clm_varsur.F90,clm_csmMod.F90,restFileMod.F90,surfFileMod.F90,controlMod.F90,initSurfAlbMod.F90,initSubgridMod.F90,clm_varctl.F90,ndepFileMod.F90,initGridCellsMod.F90,MCT_lnd_comp.F90,program_off.F90,domainMod.F90,decompMod.F90,areaMod.F90,clmtype.F90,histFldsMod.F90
+
+src/riverroute:
+RtmMod.F90
+
+src/biogeochem:
+CASAMod.F90,DUSTMod.F90,CNPhenologyMod.F90,STATICEcosysDynMod.F90,DGVMMod.F90,CNrestMod.F90,VOCEmissionMod.F90,CNNDynamicsMod.F90,CNVegStructUpdateMod.F90
+
+src/biogeophys:
+BalanceCheckMod.F90,SurfaceRadiationMod.F90,SoilTemperatureMod.F90,Biogeophysics1Mod.F90,Biogeophysics2Mod.F90,FrictionVelocityMod.F90,Hydrology1Mod.F90,Hydrology2Mod.F90,BiogeophysicsLakeMod.F90,HydrologyLakeMod.F90,BareGroundFluxesMod.F90,CanopyFluxesMod.F90
+
+bld/offline/tests:
+CLM_lab.pm,test_batch.csh
+
+tools/mksurfdata:
+mkdynpftMod.F90,mkgridMod.F90,shr_timer_mod.F90,mklaiMod.F90,mkglacier.F90,mkurban.F90,fileutils.F90,mksoitex.F90,mkfileMod.F90,domainMod.F90,areaMod.F90,creategridMod.F90,mkvarsur.F90,mksrfdat.F90,nanMod.F90,mklanwat.F90,mksoicol.F90,Makefile,mkpftMod.F90
+
+tools/mkgriddata:
+mkvarctl.F90,fileutils.F90,mkgriddata.F90,mkfileMod.F90,domainMod.F90,areaMod.F90,creategridMod.F90,mkvarsur.F90,nanMod.F90,Makefile
+
+Code changes:
+Merge atm2lnd_state_type, atm2lnd_flux_type. Same for lnd2atm state/flux.
+ Related changes in clm3 and elsewhere in code.
+Add domainMod.F90 and domain_type. Migrate grid data into domain type.
+ Instantiate adomain(atm/coarse), ldomain(lnd/finemesh), rdomain(rtm),
+ ddomain(atmdrv external data) in model.
+Add lats, latn, lonw, lone 2d arrays and associated code changes.
+Cleanup areaMod.F90; merging subroutines, removing redundant code, eliminate
+ *_point routines.
+Remove numlon
+Add decomp_type for gcelldc and gcellsn. Remove redundant data in other
+ arrays related to addressing physical space and logical space.
+Clean up interface in set_landunit subroutines. Remove redundant code.
+Clean up procs and clumps datatypes, removing redundant data.
+Migrate clm3 topology data to pointers from copies
+Add gridmap_type for interpolation and associated code and routines to
+ support the type.
+Add clm_atmlnd.F90 file for upscale/downscale code. Add clm_mapa2l
+ and clm_mapl2a to carry out mapping associated with upscale/downscale.
+Add gridmap_setmapsFM for generation of weights for downscale/upscale
+ routines.
+forc_ndep should not be in atm2lnd_type.
+Reuse code as much as possible throughout.
+Update mksurfdata, mkgriddata. Speed code up, bfb, new fields added,
+ new input options for files. Fill with wetland, add PFTDATA_MASK
+ field for real/fake land.
+Update code to clm3_expa_53
+Rename latixy and longxy to latc and lonc.
+Remove fullgrid attribute.
+
+Summary of testing:
+
+ bluesky: full clm test, full cam test
+ tempest: full cam test
+ bangkok/lf95: full cam test
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_53
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in: bfb
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? bfb
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+
+Tag name: clm3_expa_53
+Originator(s): Mariana Vertenstein
+Date: Fri Dec.16 2005
+One-line Summary: Put in MCT communication for cam-clm coupling
+
+Purpose of changes: removed lp_coupling communication and put in
+MCT communication interfaces
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the input datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: Mariana Vertenstein, Rob Jacob
+
+List all subroutines eliminated: none
+
+List all subroutines added and what they do:
+ main/MCT_atmlnd_cpl.F90
+ clm/cam MCT coupling interface - will be moved out of clm code in
+ near future
+ main/MCT_lnd_comp.F90
+ clm MCT wrapper layer
+ main/clm_comp.F90
+ module containing wrapper routines that separate clm into chunks of
+ code that contain no communication (e.g. clm_run1, clm_run2 has no
+ communicaiton). This is needed to satisfy requirement for implementing
+ multiple coupling interfaces (e.g. concurrent/MCT, sequential/MCT,
+ sequential/ESMF) within ccsm.
+ main/clm_varorb.F90
+ module for orbital parameters
+ (this will be added to program_off.F90 and program_csm.F90)
+
+List all existing files that have been modified, and describe the changes:
+ clm_camMod.F90 - all coupling still exists here
+
+Summary of testing:
+
+ bluesky:
+ test-model.pl -res T31
+ test-model.pl -res T31cn
+ test-model.pl -res T31cnall
+ test-model.pl -res T31casa
+ test-model.pl -res T31dgvm
+
+ tempest: No testing
+
+ bangkok/lf95: No testing
+
+CLM tag used for the baseline comparison tests if applicable: clm3_expa_48
+ results were bfb with clm3_expa48
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff?
+
+ If this tag changes climate describe the run(s) done to evaluate the new
+ climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+ MSS location of control simulations used to validate new climate:
+
+ URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+
+Tag name: clm3_expa_52
+Originator(s): Mariana Vertenstein
+Date: Tues Dec.12 2005
+One-line Summary: Put in scam fix needed in CAM mode
+
+Purpose of changes: clean up svn clm structure
+
+Bugs fixed (include bugzilla ID):
+
+Describe any changes made to build system:
+None
+
+Describe any changes made to the namelist:
+None
+
+List any changes to the defaults for the input datasets:
+None
+
+Describe any substantial timing or memory changes:
+None
+
+Code reviewed by:
+Mariana Vertenstein
+
+List all subroutines eliminated:
+None
+
+List all subroutines added and what they do:
+None
+
+List all existing files that have been modified, and describe the changes:
+clm_camMod.F90 (this change was put into cam3_2_41)
+
+Summary of testing:
+No testing done
+
+CLM tag used for the baseline comparison tests if applicable:
+NA
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+If bitwise differences were observed, how did you show they were no worse
+than roundoff?
+
+If this tag changes climate describe the run(s) done to evaluate the new
+climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+MSS location of control simulations used to validate new climate:
+
+URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+
+Tag name: clm3_expa_51
+Originator(s): mvr
+Date: Tues Dec.12 2005
+One-line Summary: removed src/utils dir
+
+Purpose of changes: should've been done with external setup in prev tag
+
+Bugs fixed (include bugzilla ID): none
+
+Describe any changes made to build system: none
+
+Describe any changes made to the namelist: none
+
+List any changes to the defaults for the input datasets: none
+
+Describe any substantial timing or memory changes: none
+
+Code reviewed by: mvr, mvertens
+
+List all subroutines eliminated:
+D src/utils
+
+List all subroutines added and what they do:
+none
+
+List all existing files that have been modified, and describe the changes:
+none
+
+Summary of testing:
+
+bluesky: none
+tempest: none
+bangkok/lf95: none
+
+CLM tag used for the baseline comparison tests if applicable:
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+If bitwise differences were observed, how did you show they were no worse
+than roundoff? b4b
+
+If this tag changes climate describe the run(s) done to evaluate the new
+climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+MSS location of control simulations used to validate new climate:
+
+URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+Tag name: clm3_expa_50
+Originator(s): Mariana Vertenstein
+Date: Tues Dec.12 2005
+One-line Summary: Updated external definitions for utils
+
+Purpose of changes: clean up svn clm structure
+
+Bugs fixed (include bugzilla ID):
+
+Describe any changes made to build system:
+None
+
+Describe any changes made to the namelist:
+None
+
+List any changes to the defaults for the input datasets:
+None
+
+Describe any substantial timing or memory changes:
+None
+
+Code reviewed by:
+NA
+
+List all subroutines eliminated:
+None
+
+List all subroutines added and what they do:
+None
+
+List all existing files that have been modified, and describe the changes:
+None
+
+Summary of testing:
+No testing done
+
+CLM tag used for the baseline comparison tests if applicable:
+NA
+
+IF tag changes answers relative to baseline comparison the
+following should be filled in:
+
+Summarize any changes to answers, i.e.,
+ - what code configurations:
+ - what platforms/compilers:
+ - nature of change (roundoff; larger than roundoff/same climate; new climate):
+
+If bitwise differences were observed, how did you show they were no worse
+than roundoff?
+
+If this tag changes climate describe the run(s) done to evaluate the new
+climate in enough detail that it(they) could be reproduced, i.e.,
+ - source tag (all code used must be in the repository):
+ - platform/compilers:
+ - configuration (CPP ifdefs):
+ - build-namelist command (or complete namelist):
+ - MSS location of output:
+
+MSS location of control simulations used to validate new climate:
+
+URL for LMWG diagnostics output used to validate new climate:
+
+===============================================================
+
+Tag name: clm3_expa_49
+Originator(s): Mariana Vertenstein
+Date: Mon Dec 12 2005
+One-line Summary: Updated clm with changes put into cam3_2_38
+
+Purpose of changes:
+Cray-X1E OpenMP/CSD compatibility modifications.
+
+Bugs fixed (include bugzilla ID):
+None
+
+Describe any changes made to build system:
+None
+
+Tested that restarts are bit-for-bit: yes
+Tested that different domain decompositions match bit-for-bit: yes
+Code reviewed by: myself
+Changes answers: no (bit-for-bit)
+Changes made:
+
+This is the start of the clm_exp experimental development
+branch. This version is the same as clm3_deva_10 which was checked
+into clm_dev by Mariana Vertenstein on April 27th, 2004.
+===============================================================
+===============================================================
+===============================================================
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ChangeSum b/ChangeSum
new file mode 100644
index 0000000000..505427874e
--- /dev/null
+++ b/ChangeSum
@@ -0,0 +1,335 @@
+Tag Who Date Summary
+============================================================================================================================
+ clm4_5_1_r120 andre 08/29/2015 CLM 5 nitrogen models Flexible CN and LUNA
+ clm4_5_1_r119 erik 08/26/2015 Bring hobart/nag bug fixes to trunk, and fix a few bugs
+ clm4_5_1_r118 sacks 08/05/2015 Minor rework of glc coupling fields
+ clm4_5_1_r117 sacks 07/28/2015 Repartition rain vs. snow from atmosphere
+ clm4_5_1_r116 sacks 07/22/2015 Rename some history fields
+ clm4_5_1_r115 sacks 07/15/2015 Remove redundant code, rename a variable
+ clm4_5_1_r114 sacks 07/10/2015 Update cime external, remove genf90-generated files
+ clm4_5_1_r113 sacks 07/09/2015 Support backwards compatibility of restart variable names
+ clm4_5_1_r112 oleson 07/01/2015 Justin Perket snow on vegetation
+ clm4_5_1_r111 sacks 06/12/2015 Remove temporary hack to get bfb results in InitSnowLayers
+ clm4_5_1_r110 sacks 06/12/2015 Add flexibility to have more snow layers
+ clm4_5_1_r109 sacks 06/06/2015 Fix bug in DivideSnowLayers
+ clm4_5_1_r108 andre 05/29/2015 Crop changes from Sam Levis
+ clm4_5_1_r107 andre 05/19/2015 Update externals to use github version of cime1.0.7.
+ clm4_5_1_r106 erik 05/14/2015 Fix CO2 forcing for MEGAN
+ clm4_5_1_r105 erik 04/16/2015 Move test lists to beneath active components, change build scripts from cshell to perl, move to new cime directory structure
+ clm4_5_1_r104 erik 01/27/2015 Update externals to latest cesm beta tag + bring in shared build for clm4_5/clm5_0 for testing
+ clm4_5_1_r103 sacks 01/01/2015 enable transient crops
+ clm4_5_1_r102 sacks 12/27/2014 make new input datasets to support transient crops
+ clm4_5_1_r101 sacks 12/09/2014 rework cold start initialization for transient runs
+ clm4_5_1_r100 sacks 12/03/2014 update pio calls to pio2 API
+ clm4_5_1_r099 sacks 12/02/2014 add ozone stress code from Danica Lombardozzi
+ clm4_5_1_r098 sacks 11/29/2014 update externals to cesm1_3_beta14 or beyond
+ clm4_5_1_r097 mvertens 11/24/2014 major refactorization to introduce new soilbiogeochem data types and routines that are independent of either ED or CN datatypes
+ clm4_5_1_r096 erik 11/19/2014 Several answer changing bug-fixes: snow grain size, lake hydrology, default settings, organic soil
+ clm4_5_1_r095 andre 11/10/2014 N comp refactoring by Jinyun Tang (LBL) and transpiration sink isolation by Gautam Bisht (LBL)
+ clm4_5_1_r094 sacks 11/07/2014 misc. glacier-related updates
+ clm4_5_1_r093 sacks 11/07/2014 change cold-start snow initialization, update cism external
+ clm4_5_1_r092 muszala 11/04/2014 bug fixes from santos that address valgrind problems. update rtm external
+ clm4_5_1_r091 muszala 10/27/2014 update externals. fix bug so CLM runs with Intel 14x.
+ clm4_5_1_r090 sacks 10/16/2014 modularize irrigation; do some unit test rework
+ clm4_5_1_r089 erik 10/13/2014 Bring new urban building temperature to trunk as a clm5.0 feature as well as human-stress index calculations
+ clm4_5_1_r088 muszala 10/01/2014 pull out ED deps. in TemperatureTypeMod, can now compile with pgi 14.7
+ clm4_5_1_r087 erik 09/30/2014 Fix two balance check errors, and turn abort for balance check back on to appropriate levels
+ clm4_5_1_r086 muszala 09/25/2014 critical ED modifications from r fisher, fix bug 2043
+ clm4_5_1_r085 sacks 09/19/2014 replace conditionals with polymorphism for soil water retention curve
+ clm4_5_1_r084 sacks 09/18/2014 make glc_dyn_runoff_routing spatially-varying, based on input from glc
+ clm4_5_1_r083 muszala 09/17/2014 only update scripts and run new baselines. this due to an error in yellowstone pgi test naming (clm_aux45 changed to aux_clm45)
+ clm4_5_1_r082 muszala 09/11/2014 Merge in a number of ED changes to address science bugs and infrastructure (partiulararly restarts)
+ clm4_5_1_r081 mvertens 08/24/2014 major infrastructure changes and directory reorganization under src
+ clm4_5_1_r080 erik 08/16/2014 Update externals to trunk version, allow eighth degree as a valid resolution
+ clm4_5_1_r079 andre 07/31/2014 G. Bisht (LBL) soil temperature refactor; machines update for goldbach-intel
+ clm4_5_1_r078 muszala 07/23/2014 add lai stream capability and the ability to run with V5 cruncep data
+ clm4_5_1_r077 andre 07/10/2014 Refactor from Jinyun Tang (LBL) to make hydrology more modular.
+ clm4_5_1_r076 erik 07/07/2014 Answer changes for fire code from Fang Li
+ clm4_5_75 muszala 05/30/2014 update externals to rtm1_0_38 and esmf_wrf_timemgr_140523
+ clm4_5_74 sacks 05/28/2014 misc. bfb changes - see detailed summary below
+ clm4_5_73 erik 05/28/2014 Add the stub ability for clm5_0 physics to CLM build system
+ clm4_5_72 muszala 05/05/2014 Introduce code for Ecosystem Demography (CLM(ED)) Model
+ clm4_5_71 sacks 05/02/2014 2-way feedbacks for glacier, veg columns compute glacier SMB, and related changes
+ clm4_5_70 muszala 04/18/2014 bring in SHR_ASSERT macros
+ clm4_5_69 andre 03/18/2014 start unit testing build-namelist
+ clm4_5_68 erik 03/07/2014 Update scripts to version that turns on transient CO2 streams for transient compsets, and update CISM (changes answers)
+ clm4_5_67 mvertens 03/06/2014 removed initSurfAlb as part of the initialization
+ clm4_5_66 mvertens 03/03/2014 refactoring of initialization and introduction of run-time finidat interpolation
+ clm4_5_65 mvertens 02/25/2014 Turn off MEGAN vocs when crops is running
+ clm4_5_64 muszala 02/19/2014 fix and clean ncdio_pio.F90.in. clean clm_time_manager. update externals.
+ clm4_5_63 sacks 02/14/2014 add some code needed for dynamic landunits; activate 0-weight veg landunit sometimes
+ clm4_5_62 erik 02/10/2014 Get PTCLM working robustly, US-UMB test working, add CO2 streams to datm, add more consistency testing between compsets and user settings
+ clm4_5_61 sacks 02/04/2014 add 3-d snow history fields; continue harvest past end of pftdyn timeseries
+ clm4_5_60 andre 01/30/2014 refactor build-namelist
+ clm4_5_59 sacks 01/22/2014 use new get_curr_yearfrac function in clm_time_manager
+ clm4_5_58 sacks 01/22/2014 major refactor of transient pft code, in prep for dynamic landunits
+ clm4_5_57 sacks 01/07/2014 change CNDV water conservation to use the pftdyn method
+ clm4_5_56 sacks 01/02/2014 update scripts external to fix I20TRCLM45BGC compset
+ clm4_5_55 sacks 12/27/2013 add hooks to Sean Santos's unit test frameworks, and begin to add CLM unit tests
+ clm4_5_54 sacks 12/27/2013 update externals to cesm1_3_beta06
+ clm4_5_53 muszala 12/19/2013 refactor restart interfaces
+ clm4_5_52 sacks 11/26/2013 turn on longwave radiation downscaling for glc_mec by default
+ clm4_5_51 sacks 11/26/2013 rework downscaling of atm fields for glc_mec
+ clm4_5_50 erik 11/24/2013 Bring in a bunch of b4b bugfixes, fix getregional script, start move of PTCLM to PTCLMmkdata tool
+ clm4_5_49 muszala 11/16/2013 swenson anomaly forcing - part 1
+ clm4_5_48 muszala 11/14/2013 bug fixes for CLM dry deposition and MEGAN VOC emissions
+ clm4_5_47 muszala 11/12/2013 fix Bug 1858 - AGDD now reset annually
+ clm4_5_46 sacks 11/08/2013 remove zeroing out of slope for special landunits
+ clm4_5_45 sacks 11/08/2013 refactor daylength calculation, and other minor changes
+ clm4_5_44 sacks 11/08/2013 temporary hack to daylength initialization to provide baselines for the next tag
+ clm4_5_43 sacks 11/06/2013 allocate memory for most landunits in every grid cell (needed for dynamic landunits)
+ clm4_5_42 sacks 11/04/2013 fix bug 1857 for CLM4.5 - CNDV running temperature means are incorrect
+ clm4_5_41 andre 10/30/2013 update scripts to convert clm4_5 CPP flags to namelist variables.
+ clm4_5_40 muszala 10/24/2013 fix Bug 1752 - urban conductances depend on weights in an undesirable way
+ clm4_5_39 muszala 10/23/2013 bug fix from santos - h2osoi_vol not passed to atmosphere model on restart
+ clm4_5_38 sacks 10/18/2013 change irrigation variables to be pft-level
+ clm4_5_37 muszala 10/10/2013 Modifications to bring clm up to date with major driver refactoring in drvseq5_0_01
+ clm4_5_36 sacks 10/04/2013 new surface datasets, and other minor fixes
+ clm4_5_35 sacks 10/01/2013 get CLM running on edison
+ clm4_5_34 erik 09/30/2013 Get PTCLM working, fix a few small bugs
+ clm4_5_33 muszala 09/26/2013 clean up from mistakes in previous tag
+ clm4_5_32 muszala 09/26/2013 bug fix tag - 1798, 1810
+ clm4_5_31 sacks 09/25/2013 fix bug 1820: incomplete conditional in CNSoyfix leads to buggy results and decomposition dependence
+ clm4_5_30 sacks 09/24/2013 fix performance bug in decomposition initialization
+ clm4_5_29 sacks 09/24/2013 fix threading in CLM4.5, and other misc fixes
+ clm4_5_28 sacks 09/20/2013 fix FracH2oSfc bug
+ clm4_5_27 sacks 09/20/2013 fix crop nyrs bug
+ clm4_5_26 muszala 09/19/2013 water balance and SMS_Ly1.f19_g16.ICLM45BGCCROP fix
+ clm4_5_25 erik 09/13/2013 Bring in Tony's changes to kick sno all the way up to the coupler layer, makes all
+ CESM components more similar to each other
+ clm4_5_24 sacks 09/03/2013 update externals to cesm1_3_beta02 or later
+ clm4_5_23 muszala 08/22/2013 refactor to allow CH4 params. to be read from netcdf file and clean up clm4_5_20
+ clm4_5_22 muszala 07/30/2013 aux_clm testlist reorganization
+ clm4_5_21 muszala 07/24/2013 ifdef and bounds refactor
+ clm4_5_20 muszala 07/20/2013 refactor to allow CN and BGC params. to be read from netcdf file
+ clm4_5_19 sacks 07/17/2013 fix setting of bd in iniTimeConst
+ clm4_5_18 sacks 07/09/2013 rework urban indexing
+ clm4_5_17 sacks 07/03/2013 misc cleanup and bug fixes
+ clm4_5_16 sacks 07/02/2013 only run filters over 'active' points
+ clm4_5_15 muszala 07/01/2013 complete associate refactor for pointers in clm4_5 source
+ clm4_5_14 muszala 06/20/2013 preparation for associate refactor in clm4_5_15
+ clm4_5_13 andre 06/14/2013 hydrology reordering from Jinyun Tang
+ clm4_5_12 muszala 06/13/2013 NoVS test, NAG mods and remove TWS from restart file
+ clm4_5_11 sacks 06/11/2013 Change pct_pft and related surface dataset variables to be % of landunit
+ clm4_5_10 muszala 06/10/2013 refactor clmtype
+ clm4_5_09 muszala 06/04/2013 volr and vic fix, update mct and rtm
+ clm4_5_08 muszala 06/03/2013 port for NAG compiler
+ clm4_5_07 erik 05/31/2013 New spinup files for CLM45 AND RTM, work on PTCLM, turn drydep off by default, update externals
+ clm4_5_06 erik 05/15/2013 A few small bug fixes, more updates to README files
+ clm4_5_05 muszala 05/14/2013 hcru bug fixes
+ clm4_5_04 erik 05/13/2013 Fix the previous broken tag
+ clm4_5_03 erik 05/10/2013 Several bug fixes for release, urban and test single point surface datasets
+ clm4_5_02 sacks 05/07/2013 make 'shared' tools directory, and other minor tools fixes
+ clm4_5_01 muszala 05/06/2013 update externals
+ clm4_5_00 erik 05/02/2013 Official end to CLM4.5 development for CLM offline
+ clm4_0_81 bandre 04/29/2013 Charlie Koven's variable consolidation, cryoturbation and BSW CPP changes
+ clm4_0_80 erik 04/26/2013 Bring Fang Li. Fire model into CLM4.5 science, update ALL CLM4.5 surface datasets,
+ provide a working initial condition file for CLM45BGC@f19_g16-1850
+ clm4_0_79 muszala 04/24/2013 pftdyn, pft-phys*.nc and datm8 update
+ clm4_0_78 muszala 04/23/2013 MEGAN fixes
+ clm4_0_77 sacks 04/23/2013 fix carbon balance bug in transient runs with VERTSOI, and fix Soil Hydrology bug
+ clm4_0_76 muszala 04/22/2013 spinup changes from Charlie Koven (part 1)
+ clm4_0_75 muszala 04/19/2013 run propset
+ clm4_0_74 muszala 04/17/2013 snow_depth changes, major scripts overhaul, bug fix for tools
+ clm4_0_73 sacks 04/15/2013 update mksurfdata_map for CLM4.5, and other misc. updates, mainly to tools
+ clm4_0_72 muszala 04/11/2013 maoyi bug fix for vic hydro
+ clm4_0_71 muszala 04/10/2013 compsets refactoring by mvertens
+ clm4_0_70 muszala 04/01/2013 bring in vic hydrology
+ clm4_0_69 muszala 03/26/2013 remove hydro reorder, volr and esmf mods
+ clm4_0_68 erik 03/16/2013 Fix some issues in mksurfdata_map for generation of ne120np surface data file.
+ Put error back in CLM if weights don't sum to 100. Add in Keith's photosynthesis change for CLM45.
+ clm4_0_67 muszala 03/12/2013 Jinyun photosynthesis and hydrology reorder
+ clm4_0_66 sacks 03/07/2013 turn off subgrid topography snow parameterization for glc_mec landunits
+ clm4_0_65 sacks 03/07/2013 back out Machines external to get more tests to pass, especially IG
+ clm4_0_64 muszala 03/06/2013 update externals. fixes 40/45 intial condition problem
+ clm4_0_63 muszala 03/04/2013 bug 1635 fix - 4_0 CN bug
+ clm4_0_62 sacks 02/24/2013 add active flags, change subgrid weighting convention, other misc fixes
+ clm4_0_61 muszala 02/20/2013 rtm, drv and clm mods: tws, olr, r01 rdric file and SoilHydroMod
+ clm4_0_60 erik 02/11/2013 Bring CLM4.5 code from clm45sci branch to trunk as an option set at configure time
+ clm4_0_59 mvertens 12/20/2012 restructure clmtype and all pointer references, new directory structure
+ clm4_0_58 erik 12/14/2012 Uncomment us20 and wus12 datasets, more testing to: bluefire, yellowstone, frankfurt
+ clm4_0_57 muszala 11/30/2012 update trunk with release mods, some rtm fixes
+ clm4_0_56 sacks 11/27/2012 fix s2x tsrf, add s2x diagnostics
+ clm4_0_55 muszala 11/14/2012 bring in flooding capability
+ clm4_0_54 erik 10/09/2012 Fix esmf for carma field, fix some CLM_USRDAT issues
+ clm4_0_53 erik 10/03/2012 Update to fsurdat, fpftdyn, finidat datasets, new high resolution organic/fmax/glacier raw datasets
+ clm4_0_52 sacks 09/27/2012 new pct_glacier raw data file
+ clm4_0_51 muszala 09/26/2012 bug fixes, pio performance and SCRIP files
+ clm4_0_50 muszala 09/21/2012 testing of clm and new rof component
+ clm4_0_49 erik 09/16/2012 Move clm testing to use CESM test framework
+ clm4_0_48 muszala 09/11/2012 bug fixes, xFail to tests and normalize test output for CLM
+ clm4_0_47 muszala 08/23/2012 bug fixes
+ clm4_0_46 muszala 08/08/2012 R01 support and update externals
+ clm4_0_45 sacks 07/20/2012 fix virtual columns; new urban mksurfdata_map
+ clm4_0_44 erik 07/09/2012 Add wrf resolutions, update externals to cesm1_1_beta15, all components use build-namelist now
+ clm4_0_43 sacks 04/06/2012 Add diagnostic fields, modify some existing history fields
+ clm4_0_42 erik 03/27/2012 Bring in Francis Vitt's MEGAN changes.
+ clm4_0_41 erik 03/13/2012 Bring rmfmesh/rtmmap branches to trunk
+ clm4_0_40 erik 02/16/2012 Back out update to new T31 surface datasets
+ clm4_0_39 erik 02/01/2012 Bring newgrid branch to trunk
+ clm4_0_38 erik 01/23/2012 Fix f09 surface datasets
+ clm4_0_37 erik 09/26/2011 Fix unstructured grids history files
+ clm4_0_36 erik 09/22/2011 Comment out RTM mapping files for f09 and f19
+ clm4_0_35 erik 09/13/2011 Bring in Mariana's non2D grid branch to trunk, enabling HOMME grids: ne30np4/ne120np4
+ clm4_0_34 erik 08/18/2011 Bring tcens branch to trunk, fix a few issues
+ clm4_0_33 erik 07/25/2011 Move changes on release branch over to trunk
+ clm4_0_32 erik 05/19/2011 Make I1850SPINUPCN compset use MOAR data, various bug fixes, work on test lists
+ clm4_0_31 erik 05/13/2011 Fix answers for transient_CN, fix interpinic
+ clm4_0_30 erik 05/11/2011 New finidat/fsurdat files for T31
+ clm4_0_29 erik 05/05/2011 Backout interpinic changes to one that works
+ clm4_0_28 erik 05/03/2011 Remove DUST/PROGSSLT in land coupler layer, update driver and scripts
+ clm4_0_27 erik 05/02/2011 Move crop branch over to trunk
+ clm4_0_26 erik 03/23/2011 Update externals, driver update changes answers, drydep changes from fvitt, fix bugs
+ clm4_0_25 erik 03/22/2011 Always output restart-history files add more meta-data to them, fix urbanc_alpha and 2.5x3.33 datasets, Changes from Keith O on SNOWLIQ/SNOWICE
+ clm4_0_24 erik 02/09/2011 Fix mksurfdata and add ability to override soil_fmax
+ clm4_0_23 erik 02/03/2011 Add in new glacier-MEC use-cases
+ clm4_0_22 erik 01/20/2011 Move coupler field indicies to clm, move cpl_* directories up a level, add the cpl_share directory
+ clm4_0_21 jedwards 01/12/2011 Remove includes, finish PIO transition
+ clm4_0_20 erik 01/11/2011 Update for ESMF metadata, update doc. from release branch, bug fixes (doc of qflx_evap_tot, threading CNDV, aer/ndepregrid)
+ clm4_0_19 erik 12/08/2010 Bring irrigation branch to the trunk
+ clm4_0_18 erik 11/21/2010 Fix a problem with the clm template, update scripts version to fix bug with linking with ESMF
+ clm4_0_17 erik 11/20/2010 Update to externals that change answers to roundoff, use drv pio namelist, add in T341 datasets
+ clm4_0_16 erik/mverten 10/27/2010 Fix downscaling roundoff difference for same-grids by copying scale factor when needed
+ clm4_0_15 erik/mverten 10/24/2010 Move pio branch to trunk
+ clm4_0_14 erik 10/19/2010 Fix finidat file for T31 sim_year=2000 cases
+ clm4_0_13 erik 10/16/2010 Bring in PTCLM branch, add in T31 finidat file and turn off ice_runoff for T31
+ clm4_0_12 erik 09/10/2010 Add U10 to history, cesm1_0_rel06 updates, PTCLM02 updates (except mksurfdata), remove ndepdat/dyn/faerdep
+ clm4_0_11 erik 08/27/2010 New files for rcp6, fix MPI bug, update externals
+ clm4_0_10 erik 08/04/2010 Update doc to cesm_rel05, bug-fixes, fix issues for single-point, mksurfdata/getregional scripts
+ clm4_0_09 erik 06/14/2010 Fix some small issues, update documentation, and externals
+ clm4_0_08 erik 06/04/2010 Snow hydrology bug fix from Keith and Dave
+ clm4_0_07 erik 06/03/2010 Some cleanup/fix bugs, add RTM var, add albice to namelist, allow last-millenium in mksurfdata, allow setting of datm_presaero in clm test-suite
+ clm4_0_06 erik 05/26/2010 Update gglc to cism
+ clm4_0_05 erik 05/25/2010 Move Nitrogen deposition stream branch to trunk
+ clm4_0_04 erik 05/20/2010 New namelist items: ice_runoff, scaled_harvest, carbon_only,
+ new RTM hist vars, new finidat files, update esmf interface, turn off aerosol read quicker
+ clm4_0_03 erik 05/17/2010 Changes from Francis for VOC and drydep
+ clm4_0_02 erik 05/13/2010 Make sure dtime is initialized, so that answers are consistently the same as clm4_0_00
+ clm4_0_01 erik 05/11/2010 Move glacier multiple elevation class branch to the trunk so that we can work with the active glacier model
+ clm4_0_00 erik 05/04/2010 Update to datm8, redirect aquifer overflow
+ to drainage, add gx3v7 masks, script to extract regional
+ datasets, add harvesting for CN, modify shrubs, include urban
+ model, ice stream for snowcapping, new build-namelist system,
+ scale solar by solar zenith angle in datm, deep soil with
+ bedrock at bottom, organic matter in soils, SNICAR for snow
+ radiation, sparce dense aero, snow cover changes
+ clm3_8_00 erik 05/04/2010 Get future scenarios working, finalize
+ documentation, bring in MEGAN VOC and CNDV, simplify,
+ mksurfdata optimization, fix bugs: snow enthalpy, BMOZ, pergro,
+ use pft weights from fsurdat NOT finidat
+ clm3_7_15 erik 04/27/2010 Finish User's Guide, surfdata files for urban-1pt, fix mksurfdata ifort bugs, work with testing
+ clm3_7_14 erik 04/08/2010 Fix rcp=2.6/4.5 1-degree fndepdyn filenames
+ clm3_7_13 erik 04/08/2010 Add in missing rcp=2.6/6 use-cases, and fix syntax errors in the namelist_defaults file
+ clm3_7_12 erik 04/08/2010 rcp=2.6/4.5 datasets for fndepdyn and aerdepdat, fix some minor issues, new 1pt urban surfdata files
+ clm3_7_11 erik 04/07/2010 qtr-degree and T85 surfdata, rcp=2.6/4.5 datasets, doc updates
+ clm3_7_10 erik 03/22/2010 Fix drydep so that BMOZ case will work
+ clm3_7_09 erik 03/21/2010 Fix snow enthalpy bug, cndv datasets, various fixes
+ clm3_7_08 mvertens 03/12/2010 Removal of check for weights if dynamic land use is
+used
+ clm3_7_07 erik 03/10/2010 New finidat datasets for 1-deg, 2-deg, and abort if weights from finidat/fsurdat files are too different, and use fsurdat files as truth
+ clm3_7_06 erik 03/10/2010 Bring cndv branch to trunk
+ clm3_7_05 erik 02/24/2010 Bring VOC branch source code to trunk
+ clm3_7_04 erik 02/17/2010 Bring VOC branch (vocemis-drydep18_clm3_7_03) tools, testing, and build to trunk (everything other than VOC code changes)
+ clm3_7_03 erik 02/10/2010 Add in more future scenario datasets, new history fields from Keith
+ clm3_7_02 erik 02/06/2010 Start adding in new rcp=8.5 datasets, remove some junk, change some env_conf variables, add user_nl_clm
+ clm3_7_01 erik 01/29/2010 OpenMP fix for pftdyn, start adding in rcp's, update ndeplintInterp.ncl script
+ clm3_7_00 erik 01/22/2010 Update to datm8, redirect aquifer overflow to drainage, add gx3v7 masks, script to extract regional datasets, add harvesting for CN, modify shrubs, include urban model, ice stream for snowcapping, new build-namelist system, scale solar by solar zenith angle in datm, deep soil with bedrock at bottom, organic matter in soils, SNICAR for snow radiation, sparce dense aero, snow cover changes
+ clm3_6_64 erik 01/22/2010 Update documentation and README/Quickstart files, set NetCDF large-file format on by default in template, update pio, update some fsurdat files to vocemis-drydep versions, add 2.5x3.33_gx3v7 frac file, make gx3v7 default for 4x5 res
+ clm3_6_63 erik 01/09/2010 Get answers to be identical with ccsm4_0_beta38 for 1 and 2 degree transient cases
+ clm3_6_62 erik 01/08/2010 Fix startup of PFT transient cases so properly use data from pftdyn file rather than finidat file
+ clm3_6_61 erik 01/07/2010 Comment out endrun on finidat and fsurdat weights being incomptable, and go back to using finidat weights
+ clm3_6_60 erik 01/05/2010 Fix clm template
+ clm3_6_59 erik 01/05/2010 Update to datm8, fix so wts used are from fsurdat file NOT finidat file
+ clm3_6_58 erik 12/08/2009 Fix rpointer, correct units for export of nee, start adding testing for intrepid
+ clm3_6_57 erik 11/20/2009 Redirect aquifer overflow to drainage, so doesn't end up in ocean
+ clm3_6_56 erik 11/10/2009 New ndepdat and ndepdyn datasets
+ clm3_6_55 erik 11/05/2009 Fix tool to create Nitrogen deposition datasets, and change configure to use CCSM Makefile as source for TopLevel Makefile
+ clm3_6_54 erik 10/28/2009 Allow comp_intf to change on ccsm build, reduce default hist fields, start adding 2.5x3.33, start adding VOC fsurdat datasets, new finidat files for f09 and f19
+ clm3_6_53 erik 09/22/2009 Fix so that T31_gx3v7 file is actually included
+ clm3_6_52 erik 09/17/2009 Add T31_gx3v7 support, remove forganic, read from fsurdat, add script to extract regional datasets, work with CN output, add more urban/rural fields
+ clm3_6_51 erik 09/01/2009 Update fndepdyn and aerdep datasets (f02,f05,f09,f10) (1850,2000) and f09, f10 transient (1850-2000)
+ clm3_6_50 erik 08/28/2009 Fix ncl regridding scripts so that NO missing values are allowed for aerosol and nitrogen deposition
+ clm3_6_49 erik 08/25/2009 Fix ncl interpolation scripts, update externals, turn on CLM_CCSM_BLD for bluefire,jaguar, ESMF3 compliance
+ clm3_6_48 erik 08/12/2009 New aerosol/nitrogen deposition datasets, mksurfdata work, scm work, clm_usr_name option to build-namelist
+ clm3_6_47 erik 08/03/2009 Fix hybrid bug for dynpft case, update externals
+ clm3_6_46 erik 07/22/2009 Get more tests to work/document them, add use cases for 1850_control, 2000_control, and
+ 20thC_transient, straighten out single-point grids, Listen to LND_CDF64 env variable from
+ template, remove CLM_ARB_IC.
+ clm3_6_45 erik 07/10/2009 Remove inconsistent finidat file in clm3_6_44
+ clm3_6_44 erik 07/09/2009 Fix C13 bug, update scripts, drv, datm. Add domain files for idmap atm-ocn grids for datm. Remove SEQ_MCT, add new ESMF env vars to template. Work with ndeplintInterp
+ clm3_6_43 erik 06/10/2009 Fix pftdyn bug, enable 1D primary hist files, fix time-const3D output, fix template bug, enable cpl_esmf/cpl_mct
+ clm3_6_42 erik 06/02/2009 Bring CN Harvest branch to trunk
+ clm3_6_41 kauff 05/29/2009 shrub mods, abort if nthreads > 1 (temporary, wrt bugz #965)
+ clm3_6_40 erik 05/28/2009 Fix openMP bug, add fndepdyn ncl script, fix interpinic for urban, add mkharvest to mksurfdata, new spinups, turn CLAMP on for CASA or CN
+ clm3_6_39 erik 05/07/2009 Bug fix for script version and maxpatchpft back to numpft+1
+ clm3_6_38 erik 05/06/2009 New fsurdat for other resolutions, bug-fixes, deep wetlands to bedrock, new spinups for 1.9x2.5 1850, 2000
+ clm3_6_37 erik 04/27/2009 Update faerdep dataset for 1.9x2.5 to point to version cice is using for 1850 and 2000
+ clm3_6_36 erik 04/27/2009 Handle transient aersol, make maxpatchpft=numpft+1 default, new datasets for 1.9x2.5 and 0.9x1.25, change doalb
+ clm3_6_35 erik 04/20/2009 Fix major logic bug in mksurfdata
+ clm3_6_34 oleson 04/19/2009 Fix bangkok urban bug
+ clm3_6_33 erik 04/16/2009 Bring in dynpft changes from cbgc branch
+ clm3_6_32 erik 04/15/2009 Add irrigation area to mksrfdata, fix high-res and pftdyn problems
+ clm3_6_31 erik 04/01/2009 New surface datasets for 1850,2000, support for 0.9x1.25_gx1v6, urban always on. New pft-physiology file. Update scripts so remove some CLM_ env_conf vars. Fix CN for urban/pftdyn.
+ clm3_6_30 oleson 03/19/2009 Fix urban roof/wall layers
+ clm3_6_29 oleson 03/19/2009 CN SAI, CN testing fix, rad step size fix
+ clm3_6_28 oleson 03/17/2009 Fix permission denied error when reading surface dataset
+ clm3_6_27 oleson 03/16/2009 Urban model changes and FGR12 fix
+ clm3_6_25 dlawren 03/13/2009 Daylength control on Vcmax, 1%Lake,wetland,glacier in mksrfdat, remove ELEVATION in surface data file
+ clm3_6_24 oleson 03/09/2009 Fix urban testing and some history field changes
+ clm3_6_23 oleson 03/08/2009 Prune history fields and change to snowdp threshold for solar radiation penetration into snow
+ clm3_6_21 oleson 03/04/2009 History file changes and finish testing on tags clm3_6_19 and clm3_6_20
+ clm3_6_19 oleson 02/27/2009 Changes to urban model and urban surface data
+ clm3_6_17 oleson 02/26/2009 Urban model changes and mksurfdata changes to incorporate urban data
+ clm3_6_16 erik 02/12/2009 Multiple elevation classes on surface dataset, urban fixes, mpi-serial and testing fixes
+ clm3_6_15 erik 01/19/2009 Bring clm36sci branch to the trunk
+ clm3_6_14 erik 10/10/2008 Fix some global urban issues, fix pftdyn, really get compile-only option
+ working in testing
+ clm3_6_13 erik 10/01/2008 Update to new version of cpl7 scripts and build, update externals for versions
+ needed for clm36sci branch, add new CASA tests
+ clm3_6_12 erik 09/21/2008 Fix restarts for urban, add capability to do global urban experiments,
+ add in new forcing height changes
+ clm3_6_11 dlawren 08/26/2008 Ice stream for snow capped regions
+ clm3_6_10 tcraig 08/15/2008 extend rtm tracer, ascale for tri-grids, AIX O3 to O2
+ clm3_6_09 erik 08/11/2008 Fix clm.cpl7.template to run hybrid and branch cases
+ clm3_6_08 erik 08/06/2008 Fix bugs, and build changes for inputdata repo
+ clm3_6_07 erik 07/08/2008 Implement new build namelist system from Vertenstein/Eaton, bluefire, and BGP updates
+ clm3_6_06 erik 05/30/2008 Small fix needed for ccsm4_alpha30
+ (use gx1v5 for some resolutions when OCN_GRID==ATM_GRID)
+ clm3_6_05 erik 05/27/2008 Fix to compile with PGI-6, update scripts, fix cpl7.template for new scripts LND_GRID,
+ fix 2.65x3.33 frac dataset.
+ clm3_6_04 erik 05/20/2008 Remove all MCT permutes, fix cpl7 script issues, remove offline mode,
+ add ability to run over a range of years
+ clm3_6_03 erik 05/08/2008 Fix so listen to next_swcday to calculate albedo rather than using irad
+ clm3_6_02 erik 03/25/2008 Minor fix in configure remove perl5lib version under models/lnd/clm/bld
+ clm3_6_01 erik 03/20/2008 40 m forcing height changes for clm
+ clm3_6_00 erik 03/20/2008 Fully implement sequential-ccsm mode, upgrade configure, build-namelist and testing,
+ upgrade interpolation tool, add mkdatadomain, write to iulog rather than 6 explicitly,
+ SCAM update, Update datasets, add archiving, and build-streams, add in point version
+ of Urban model, change directory structure to mimic CCSM
+ clm3_5_20 erik 03/17/2008 Bug fixes before spinning off clm3_6_00, put in changes from ccsm4a01_clm3_5_18
+ to ccsm4a04_clm3_5_18
+ clm3_5_19 erik 03/06/2008 Change directory structure to mimic CCSM, fix so no NaNS on BGC interpinic output,
+ new half degree CN clmi dataset
+ clm3_5_18 erik 02/21/2008 Update to latest seq-ccsm4.alpha tag
+ clm3_5_17 erik 02/06/2008 Merge Tony Craig's FMI branch fmi12_clm3_5_16 to the clm trunk
+ clm3_5_16 erik 01/28/2008 Get point version of Urban code onto trunk (urban code can not restart)
+ clm3_5_15 erik 12/10/2007 Fix interpinic for half degree grid, add in large-file support,
+ allow configure to work with ccsm directory structure
+ clm3_5_14 erik 11/27/2007 Use build-streams, and archiving, multiple bug-fixes
+ clm3_5_13 erik 11/16/2007 Update xml file with file needed for ccsm3_5_beta18
+ clm3_5_12 erik 11/08/2007 Tag with new files needed for ccsm3_5_beta17
+ clm3_5_11 erik 09/28/2007 Update datasets in the DefaultCLM file for 0.23x0.31, 0.47x0.63, 0.9x1.25 and
+ add fndepdyn file for 1.9x2.5
+ clm3_5_10 jet 09/18/2007 SCAM update
+ clm3_5_09 erik 08/31/2007 Change configure to NOT have csm_share code for ccsm_con option, and add in 1x1.25 file,
+ and update datm7 and csm_share
+ clm3_5_08 tcraig 08/20/2007 convert 6 to iulog in logfile, updates for I/O
+ clm3_5_07 erik 08/17/2007 Add mkdatadomain tool, add cprnc and perl5lib as externals
+ clm3_5_06 erik 08/10/2007 Update: interpolation, testing, script namelist build, and scripts. Fix bugs,
+ and fix possible
+ clm3_5_05 tcraig 07/11/2007 seq clm mods and first hist refactor mods
+ clm3_5_04 mvertens 06/05/2007 lnd_comp_mct.F90 change to work with sequential diagnostics
+ clm3_5_03 tcraig 05/23/2007 reduce memory, partial I/O refactor, downscaling implementation
+ clm3_5_02 mvertens 05/22/2007 put in hourly coupling with sequential driver
+ clm3_5_01 erik 05/16/2007 Move newcn06 branch to trunk
+ clm3_5_00 erik 05/03/2007 New surface datasets, improved canopy integration, and various improvements to Hydrology
diff --git a/Copyright b/Copyright
new file mode 100644
index 0000000000..1e8c87a1c6
--- /dev/null
+++ b/Copyright
@@ -0,0 +1,59 @@
+--------------------------------------------------------------------------------
+ CESM1.0
+--------------------------------------------------------------------------------
+The Community Earth System Model (CESM) was developed in cooperation with the
+National Science Foundation, the Department of Energy, the National Aeronautics
+and Space Administration, and the University Corporation for Atmospheric
+Research National Center for Atmospheric Research.
+
+Except for the segregable components listed below, CESM is public domain software.
+There may be other third party tools and libraries that are embedded, and they may
+have their own copyright notices and terms.
+
+The following components are copyrighted and may only be used, modified, or
+redistributed under the terms indicated below.
+
+Code ESMF
+Institution University of Illinois/NCSA
+Copyright Copyright 2002-2009, University of Illinois/NCSA Open Source License
+Terms of Use http://www.gnu.org/copyleft/gpl.html
+
+Code POP, SCRIP, CICE
+Institution Los Alamos National Laboratory
+Copyright Copyright 2008 Los Alamos National Security, LLC
+Terms of Use http://oceans11.lanl.gov/trac/CICE/wiki/CopyRight
+
+Code Glimmer-CISM
+Institution LANL/U. Bristol/U. Edinburgh/U. Montana/U. Swansea)
+Copyright Copyright 2004-2010, GNU General Public License
+Terms of Use GNU General Public License
+
+Code AER RRTMG
+Institution Atmospheric and Environmental Research, Inc.
+Copyright Copyright 2002-2010, Atmospheric and Environmental Research, Inc.
+Terms of Use http://rtweb.aer.com/rrtm_frame.html
+
+Code MCT
+Institution Argonne National Laboratory
+Copyright Copyright 2000, 2010, University of Chicago.
+Terms of Use http://www.cesm.ucar.edu/models/cesm1.0/copyright_MCT.html
+
+Code ICSSP
+Institution N/A
+Copyright Copyright 2003, 2010, Steve Klein and Mark Webb
+Terms of Use http://gcss-dime.giss.nasa.gov/simulator.html
+
+Code XML/Lite
+Institution Wadsack-Allen Digital Group
+Copyright Copyright 2001,2010 Wadsack-Allen Digital Group
+Terms of Use http://aspn.activestate.com/ASPN/CodeDoc/XML-Lite/Lite.html
+
+Code Inf_NaN_Detection module
+Institution Lahey Computer Systems, Inc.
+Copyright Copyright(c) 2003, Lahey Computer Systems, Inc.
+Terms of Use Copies of this source code, or standalone compiled files derived from
+ this source may not be sold without permission from Lahey Computers Systems.
+ All or part of this module may be freely incorporated into executable
+ programs which are offered for sale. Otherwise, distribution of all or
+ part of this file is permitted, provided this copyright notice and header
+ are included.
diff --git a/README b/README
new file mode 100644
index 0000000000..029a891482
--- /dev/null
+++ b/README
@@ -0,0 +1,113 @@
+components/clm/README 04/07/2015
+
+Community Land Surface Model (CLM) science version 4.5.1 series -- source code, tools,
+offline-build and test scripts. This gives you everything you need
+to run CLM with CESM with datm8 to provide Qian or CRU NCEP forcing data in
+place of a modeled atmosphere.
+
+General directory structure:
+
+components/clm/doc ---- Documentation of CLM.
+components/clm/bld ---- Template, configure and build-namelist scripts for clm.
+components/clm/src ---- CLM Source code.
+components/clm/test --- CLM Testing scripts for CLM offline tools.
+components/clm/tools -- CLM Offline tools to prepare input datasets and process output.
+
+cime/scripts --------------- CPL7 scripts
+
+cime/driver_cpl/driver ---------- CESM top level driver source code.
+cime/driver_cpl/shr ------------- CESM top level driver shared code.
+cime/driver_cpl/shr_esmf -------- CESM top level driver shared code for ESMF.
+cime/components/data_comps/datm - CESM Data model version 8 source code.
+components/cism ----------------- CESM Community land Ice Sheet Model.
+components/rtm ------------------ CESM River Transport Model.
+cime/components/stub_comps/sice - CESM stub sea-ice model source code.
+cime/components/stub_comps/socn - CESM stub ocean model source code.
+cime/components/stub_comps/sglc - CESM stub glacier model source code.
+cime/external ------------------- CESM external utility codes
+ (Model Coupling Toolkit (MCT)
+ (Earth System Model Framework)
+ (timing -- code timing utility)
+ (pio -- Parallel Input/Output)
+
+Top level documentation:
+
+README ------------------- This file
+README_EXTERNALS --------- Information on how to work with subversion externals for clm
+SVN_EXTERNAL_DIRECTORIES - Subversions externals to use
+Copyright ---------------- CESM Copyright file
+UpDateChangeLog.pl ------- Script to add documentation on a tag to the
+ ChangeLog/ChangeSum files
+ChangeLog ---------------- Documents different CLM versions
+ChangeSum ---------------- Summary documentation of different CLM versions
+ChangeLog/ChangeSum ------ Also copied to components/lnd/clm/doc
+
+Documentation of Namelist Items: (view the following in a web browser)
+
+components/clm/bld/namelist_files/namelist_definition.xml --- Definition of all namelist items
+components/clm/bld/namelist_files/namelist_defaults_clm.xml - Default values
+
+=============================================================================================
+Important files in main directories:
+=============================================================================================
+
+components/lnd/clm/doc/Quickstart.GUIDE -------- Quick guide to using cpl7 scripts.
+components/lnd/clm/doc/Quickstart.userdatasets - Quick guide to using your own datasets.
+components/lnd/clm/doc/IMPORTANT_NOTES --------- Some important notes about this version of
+ clm, configuration modes and namelist items
+ that are not validated or functional.
+components/clm/doc/KnownBugs --------------- List of known bugs.
+components/clm/doc/KnownLimitations -------- List of known limitations and workarounds.
+components/clm/doc/ChangeLog --------------- Detailed list of changes for each model version.
+components/clm/doc/ChangeSum --------------- Summary one-line list of changes for each
+ model version.
+components/clm/doc/README ------------------ Documentation similar to this file
+components/clm/doc/UsersGuide -------------- CLM Users Guide
+components/clm/doc/CodeReference ----------- CLM Code Reference Guide
+
+components/clm/bld/configure --------------- Script to prepare CLM to be built.
+
+components/clm/test/tools/test_driver.sh -- Script for general software testing of
+ CLM's offline tools.
+
+components/clm/tools/clm4_5/mksurfdata_map --- Directory to build program to create surface dataset
+ at any resolution.
+components/clm/tools/clm4_5/interpinic ------- Directory to build program to interpolate initial
+ conditions to any resolution.
+components/clm/tools/shared/mkdatadomain ----- Directory to build program to create datm7 or docn7
+ domain files from clm files.
+components/clm/tools/shared/mkprocdata_map --- Process history data from unstructed grids to a gridded
+ format.
+components/clm/tools/shared/ncl_scripts ----- Directory of NCL and perl scripts to do various
+ tasks. Most notably to plot perturbation error growth
+ testing and to extract regional information from
+ global datasets for single-point/regional simulations.
+
+components/clm/bld/README ------------- Description of how to use the configure and
+ build-namelist scripts.
+
+=============================================================================================
+Source code directory structure:
+=============================================================================================
+
+components/clm/src/biogeochem -- Biogeochemisty
+components/clm/src/main -------- Main control and high level code
+components/clm/src/cpl --------- Land model high level MCT and ESMF drivers
+components/clm/src/biogeophys -- Biogeophysics (Hydrology)
+
+=============================================================================================
+ QUICKSTART: using the CPL7 scripts:
+=============================================================================================
+
+ cd cime/scripts
+ ./create_newcase # get help on how to run create_newcase
+ ./create_newcase -case testI -mach bluefire -res f19_g16 -compset I
+ # create new "I" case for bluefire at 1.9x2.5_gx1v6 res
+ # "I" case is clm active, datm8, and inactive ice/ocn
+ cd testI
+ ./cesm_setup # create the $CASE.run file
+ ./testI.bluefire.build # build model and create namelists
+ ./testI.bluefire.submit # submit script
+ # (NOTE: ./xmlchange RESUBMIT=10 to set RESUBMIT to number
+ # # of times to automatically resubmit -- 10 in this example)
+
diff --git a/README_EXTERNALS b/README_EXTERNALS
new file mode 100644
index 0000000000..66afc744f6
--- /dev/null
+++ b/README_EXTERNALS
@@ -0,0 +1,56 @@
+Some guidelines on working with externals in CLM:
+
+Also see:
+
+ https://wiki.ucar.edu/display/ccsm/Creating+a+CLM+Tag
+
+ https://wiki.ucar.edu/display/ccsm/Using+SVN+to+Work+with+CLM+Development+Branches
+
+Example taken from bulletin board forum for "Subversion Issues" in the
+thread for "Introduction to Subversion"...(070208)
+
+Working with externals:
+
+checkout the HEAD of clm's trunk into working copy directory
+> svn co $SVN/clm2/trunk clm_trunk_head_wc
+
+view the property set for clm's external definitions
+> svn propget svn:externals clm_trunk_head_wc
+
+view revision, URL and other useful information specific to external files
+> cd clm_trunk_head_wc/components/clm/src
+> svn info main
+
+create new clm branch for mods required of clm
+> svn copy $SVN/clm2/trunk_tags/ $SVN/clm2/branches/ -m "appropriate message"
+
+have external directories in working copy refer to new clm branch to make changes
+> svn switch $SVN/clm2/branches//src/main main
+
+--make changes to clm files--
+
+when satisfied with changes and testing, commit to HEAD of clm branch
+> svn commit main -m "appropriate message"
+
+tag new version of clm branch - review naming conventions!
+> svn copy $SVN/clm2/branches/ $SVN/clm2/branch_tags/_tags/ -m "appropriate message"
+
+have external directories in working copy refer to new clm tag
+> svn switch $SVN/clm2/branch_tags/_tags//src/main main
+
+modify clm's property for external definitions in working copy
+> vi clm_trunk_head_wc/SVN_EXTERNAL_DIRECTORIES
+
+--point definition to URL of new-tag-name--
+
+set the property - don't forget the 'dot' at the end!
+> svn propset svn:externals -F SVN_EXTERNAL_DIRECTORIES clm_trunk_head_wc
+
+--continue with other clm mods--
+
+commit changes from working copy directory to HEAD of clm trunk - NOTE: a commit from here will *NOT* recurse to external directories
+> cd clm_trunk_head_wc
+> svn commit -m "appropriate message"
+
+tag new version of clm trunk
+> svn copy $SVN/clm2/trunk $SVN/clm2/trunk_tags/ -m "appropriate message"
diff --git a/README_cime b/README_cime
new file mode 100644
index 0000000000..98c25c94b7
--- /dev/null
+++ b/README_cime
@@ -0,0 +1,62 @@
+For the trunk:
+
+1.) Start at the top level directory of your sandbox
+
+ cd $clm_root
+
+2.) Update JUST the top level directory with the externals.
+
+ svn update --depth immediates
+
+3.) Move any local changes you have under clm
+ (This preserves any local changes you have in your sandbox, as well as changes on your branch)
+
+ svn mv models/lnd/clm/ components/clm
+
+4.) Move any local changes you have under scripts to save your cases
+ (if you don't have cases to save under scripts, you don't have
+ to do this step)
+
+ svn mkdir --parents cime/scripts
+ mv scripts cime/scripts/
+
+5.) Bring in updated externals and updates to clm source files
+
+ svn update
+
+6.) Mark changes to models directory as resolved
+
+ svn resolved models
+
+
+For branches:
+
+0.) Make sure ALL your changes are checked into your branch.
+
+1.) Checkout your branch WITHOUT externals
+
+ svn co --ignore-externals $SVN_MOD_URL/clm2/branches/
+
+2.) Move any changes you have under clm on your branch
+
+ svn mkdir components
+ svn mv models/lnd/clm/ components/clm
+
+3.) Merge the top level
+
+ svn merge --depth immediates $SVN_MOD_URL/clm2/trunk_tags/clm4_5_1_r104 $SVN_MOD_URL/clm2/trunk_tags/clm4_5_1_r105
+
+4.) Merge changes in the clm directory
+
+ svn merge $SVN_MOD_URL/clm2/trunk_tags/clm4_5_1_r104/models/lnd/clm $SVN_MOD_URL/clm2/trunk_tags/clm4_5_1_r105/components/clm
+
+5.) Bring in updated externals and updates to clm source files
+
+ svn update
+
+6.) Mark changes to models and components/clm directories as resolved
+
+ svn resolved models
+ svn resolved components
+ svn resolved components/clm
+
diff --git a/SVN_EXTERNAL_DIRECTORIES b/SVN_EXTERNAL_DIRECTORIES
new file mode 100644
index 0000000000..9933463548
--- /dev/null
+++ b/SVN_EXTERNAL_DIRECTORIES
@@ -0,0 +1,5 @@
+cime https://github.com/CESM-Development/cime/tags/cime2.0.7
+components/clm/tools/shared/gen_domain https://github.com/CESM-Development/cime/tags/cime2.0.7/tools/mapping/gen_domain_files
+components/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism2_1_02
+components/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_52
+
diff --git a/SVN_EXTERNAL_DIRECTORIES.orig b/SVN_EXTERNAL_DIRECTORIES.orig
new file mode 100644
index 0000000000..9933463548
--- /dev/null
+++ b/SVN_EXTERNAL_DIRECTORIES.orig
@@ -0,0 +1,5 @@
+cime https://github.com/CESM-Development/cime/tags/cime2.0.7
+components/clm/tools/shared/gen_domain https://github.com/CESM-Development/cime/tags/cime2.0.7/tools/mapping/gen_domain_files
+components/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism2_1_02
+components/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_52
+
diff --git a/UpDateChangeLog.pl b/UpDateChangeLog.pl
new file mode 100755
index 0000000000..27e4803122
--- /dev/null
+++ b/UpDateChangeLog.pl
@@ -0,0 +1,258 @@
+#!/usr/bin/env perl
+#=======================================================================
+#
+# This is a script to update the ChangeLog
+#
+# Usage:
+#
+# perl ChangeLog tag-name One-line summary
+#
+#
+#=======================================================================
+
+use strict;
+use Getopt::Long;
+use IO::File;
+#use warnings;
+#use diagnostics;
+
+use English;
+
+my $ProgName;
+($ProgName = $PROGRAM_NAME) =~ s!(.*)/!!; # name of program
+my $ProgDir = $1; # name of directory where program lives
+
+sub usage {
+ die <
+
+OPTIONS
+ -compbrnch version Enter clm branch version to compare to (under branch_tags in repo).
+ [or -cb]
+ -comptrunk version Enter clm trunk version to compare to (under trunk_tags in repo).
+ [or -ct]
+ -help [or -h] Help on this script.
+ -update [or -u] Just update the date/time for the latest tag
+ In this case no other arguments should be given.
+ARGUMENTS
+ Tag name of tag to document
+ Short summary description of this tag
+EXAMPLES:
+ To just update the date/time for the latest tag
+
+ $ProgName -update
+
+ To document a new tag
+
+ $ProgName clm4_5_2_r097 "Description of this tag"
+
+ To document a new tag and compare expected fails to previous tag.
+
+ $ProgName clm4_5_2_r097 "Description of this tag" -ct clm4_5_2_r096
+EOF
+}
+
+my %opts = {
+ help => 0,
+ update => 0,
+ comptrunk => undef,
+ compbrnch => undef,
+ };
+GetOptions(
+ "h|help" => \$opts{'help'},
+ "u|update" => \$opts{'update'},
+ "ct|comptrunk=s" => \$opts{'comptrunk'},
+ "cb|compbrnch=s" => \$opts{'compbrnch'},
+ );
+if ( $opts{'help'} ) {
+ usage();
+}
+my $tag; my $sum;
+
+if ( ! $opts{'update'} ) {
+ if ( $#ARGV != 1 ) {
+ print "ERROR: wrong number of arguments: $ARGV\n";
+ usage();
+ }
+
+ $tag = $ARGV[0];
+ $sum = $ARGV[1];
+
+ if ( $tag !~ /clm[0-9]+_([0-9]+)_[0-9]+_r[0-9]+/ ) {
+ print "ERROR: bad tagname: $tag\n";
+ usage();
+ }
+} else {
+ if ( $#ARGV != -1 ) {
+ print "ERROR: wrong number of arguments when update option picked: $ARGV\n";
+ usage();
+ }
+}
+my $EDITOR = $ENV{EDITOR};
+if ( $EDITOR !~ /.+/ ) {
+ print "ERROR: editor NOT set -- set the env variable EDITOR to the text editor you would like to use\n";
+ usage();
+}
+
+
+my $template = ".ChangeLog_template";
+my $changelog = "ChangeLog";
+my $changesum = "ChangeSum";
+my $changelog_tmp = "ChangeLog.tmp";
+my $changesum_tmp = "ChangeSum.tmp";
+
+my $user = $ENV{USER};
+if ( $user !~ /.+/ ) {
+ die "ERROR: Could not get user name: $user";
+}
+my @list = getpwnam( $user );
+my $fullname = $list[6];
+my $date = `date`;
+chomp( $date );
+
+if ( $date !~ /.+/ ) {
+ die "ERROR: Could not get date: $date\n";
+}
+
+#
+# Deal with ChangeLog file
+#
+my $fh = IO::File->new($changelog_tmp, '>') or die "** $ProgName - can't open file: $changelog_tmp\n";
+
+#
+# If adding a new tag -- read in template and add information in
+#
+if ( ! $opts{'update'} ) {
+ open( TL, "<$template" ) || die "ERROR:: trouble opening file: $template";
+ while( $_ = ) {
+ if ( $_ =~ /Tag name:/ ) {
+ chomp( $_ );
+ print $fh "$_ $tag\n";
+ } elsif ( $_ =~ /Originator/ ) {
+ chomp( $_ );
+ print $fh "$_ $user ($fullname)\n";
+ } elsif ( $_ =~ /Date:/ ) {
+ chomp( $_ );
+ print $fh "$_ $date\n";
+ } elsif ( $_ =~ /One-line Summary:/ ) {
+ chomp( $_ );
+ print $fh "$_ $sum\n";
+ } elsif ( $_ =~ /CLM tag used for the baseline comparison tests if applicable:/ ) {
+ chomp( $_ );
+ if ( defined($opts{'comptrunk'}) ) {
+ print $fh "$_ $opts{'comptrunk'}\n";
+ &AddExpectedFailDiff( $fh, "trunk_tags/$opts{'comptrunk'}" );
+ } elsif ( defined($opts{'compbrnch'}) ) {
+ print $fh "$_ $opts{'compbrnch'}\n";
+ &AddExpectedFailDiff( $fh, "branch_tags/$opts{'compbrnch'}" );
+ } else {
+ print $fh "$_\n";
+ }
+ } else {
+ print $fh $_;
+ }
+ }
+ close( TL );
+}
+open( CL, "<$changelog" ) || die "ERROR:: trouble opening file: $changelog";
+my $update = $opts{'update'};
+my $oldTag = "";
+while( $_ = ) {
+ # If adding a new tag check that new tag name does NOT match any old tag
+ if ( $_ =~ /Tag name:[ ]*(clm.+)/ ) {
+ $oldTag = $1;
+ if ( (! $opts{'update'}) && ($tag eq $oldTag) ) {
+ close( CL );
+ close( $fh );
+ system( "/bin/rm -f $changelog_tmp" );
+ print "ERROR:: New tag $tag matches a old tag name\n";
+ usage();
+ }
+ # If updating the date -- find first occurance of data and change it
+ # Then turn the update option to off
+ } elsif ( ($update) && ($_ =~ /(Date:)/) ) {
+ print $fh "Date: $date\n";
+ print "Update $oldTag with new date: $date\n";
+ $update = undef;
+ $_ = ;
+ }
+ print $fh $_;
+}
+# Close files and move to final name
+close( CL );
+$fh->close( );
+system( "/bin/mv $changelog_tmp $changelog" );
+#
+# Deal with ChangeSum file
+#
+
+open( FH, ">$changesum_tmp" ) || die "ERROR:: trouble opening file: $changesum_tmp";
+
+open( CS, "<$changesum" ) || die "ERROR:: trouble opening file: $changesum";
+
+my $update = $opts{'update'};
+
+$date = `date "+%m/%d/%Y"`;
+chomp( $date );
+
+while( $_ = ) {
+ # Find header line
+ if ( $_ =~ /=====================/ ) {
+ print FH $_;
+ my $format = "%16.16s %8.8s %10.10s %s\n";
+ if ( $update ) {
+ $_ = ;
+ if ( /^(.{16}) (.{8}) (.{10}) (.+)$/ ) {
+ $tag = $1;
+ $user = $2;
+ $sum = $4;
+ } else {
+ die "ERROR: bad format for ChangeSum file\n";
+ }
+ }
+ printf FH $format, $tag, $user, $date, $sum;
+ $_ = ;
+ }
+ print FH $_;
+}
+# Close files and move to final name
+close( CS );
+close( FH );
+system( "/bin/mv $changesum_tmp $changesum" );
+
+#
+# Edit the files
+#
+if ( ! $opts{'update'} ) {
+ system( "$EDITOR $changelog" );
+ system( "$EDITOR $changesum" );
+}
+system( "/bin/cp -fp $changelog components/clm/doc/." );
+system( "/bin/cp -fp $changesum components/clm/doc/." );
+system( "/bin/chmod 0444 components/clm/doc/$changelog" );
+system( "/bin/chmod 0444 components/clm/doc/$changesum" );
+
+sub AddExpectedFailDiff {
+#
+# Add information about the expected fail difference
+#
+ my $fh = shift;
+ my $version = shift;
+
+ my $SVN_MOD_URL = "https://svn-ccsm-models.cgd.ucar.edu/clm2/";
+ my $expectedFail = `find . -name 'expected*Fail*.xml' -print`;
+ if ( $expectedFail eq "" ) {
+ die "ERROR:: expectedFails file NOT found here\n";
+ }
+
+ `svn ls $SVN_MOD_URL/$version` || die "ERROR:: Bad version to compare to: $version\n";
+ `svn ls $SVN_MOD_URL/$version/$expectedFail` || die "ERROR:: expectedFails file NOT found in: $version\n";
+ print $fh "\nDifference in expected fails from testing:\n\n";
+ my $diff = `svn diff --old $SVN_MOD_URL/$version/$expectedFail \ \n --new $expectedFail`;
+ if ( $diff eq "" ) {
+ print $fh " No change in expected failures in testing\n";
+ } else {
+ print $fh $diff;
+ }
+}
diff --git a/components/cism/ChangeLog b/components/cism/ChangeLog
new file mode 100644
index 0000000000..47d1874d2a
--- /dev/null
+++ b/components/cism/ChangeLog
@@ -0,0 +1,5976 @@
+================================================================================
+This file describes what main-trunk tags were created and why
+================================================================================
+
+================================================================================
+Originator: sacks
+Date: Aug 4, 2015
+Model: cism
+Version: cism2_1_02
+One-line summary: Move prebeta goldbach tests to hobart
+
+Purpose of changes:
+
+ With hobart replacing goldbach, all components are moving prealpha & prebeta
+ tests from golbach to hobart.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M cimetest/testlist_cism.xml
+
+Summary of testing: NONE
+
+Externals used for testing: N/A
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: June 3, 2015
+Model: cism
+Version: cism2_1_01
+One-line summary: Update glimmer-cism external to add a 'use' statement
+
+Purpose of changes:
+
+ Two needed variables were missing from a 'use' statement. For some reason,
+ this did not cause problems on the previous yellowstone testing, but showed
+ up as a problem in standalone CISM testing.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+-glimmer-cism https://github.com/CESM-Development/cism/tags/move_glint_to_cpl_n01
++glimmer-cism https://github.com/CESM-Development/cism/tags/move_glint_to_cpl_n02
+
+Summary of testing:
+
+ Ran full yellowstone aux_glc test suite. All tests passed.
+
+Externals used for testing:
+https://svn-ccsm-models.cgd.ucar.edu/cesm1/exp/branch_tags/move_glint_into_cpl2_tags/move_glint_into_cpl2_n04_cesm1_4_beta04
+(I *think*), with mpi-serial patch as noted below
+
+cism tag used for baseline comparisons: cism2_1_00
+
+Any other externals that differed in baseline: see below
+
+================================================================================
+Originator: sacks
+Date: May 19, 2015
+Model: cism
+Version: cism2_1_00
+One-line summary: Move GLC <-> LND coupling to CESM coupler
+
+Purpose of changes:
+
+ Up until now, the GLC <-> LND coupling has been done by CISM's glint
+ library. This is problematic for a few reasons:
+ - glint can only handle regular land grids - not (for example) the CAM-SE
+ grid
+ - glint can only perform bilinear interpolation, not area-conservative
+ remapping, for mapping from LND -> GLC
+ - glint’s remapping is done in serial, on the master processor
+ - any alternative ice sheet model that wants to couple to CESM (e.g.,
+ MPAS-Land Ice) needs to reimplement glint
+ - coupling between the ocean and GLC would need to go through the LND grid,
+ leading to loss of accuracy (especially for high-resolution ocean grids)
+
+ To address these limitations, we have moved the GLC <-> LND coupling out of
+ CISM, into the CESM coupler. This required significant changes to the coupler
+ and scripts. In addition, for GLC/CISM, it required:
+
+ - Writing a replacement for glint in CISM (called glad, for "glimmer already
+ downscaled"), which works with inputs and outputs on the CISM grid rather
+ than on the LND grid.
+
+ - Changing GLC so that its grid is now the CISM grid, not the LND grid. Along
+ with this, removed the CISM_GRID xml variable (now, just use the GLC_GRID
+ variable, which has the value that CISM_GRID used to have).
+
+ - Changing various namelist options to accommodate these changes
+
+ I have also enabled the glc -> ocn coupling. Previously, this was disabled
+ because we didn't have the necessary mapping files. Now I have generated the
+ necessary mapping files, and they are available out-of-the-box in the
+ upcoming CIME tag.
+
+ The new code currently does not have any PDD support. I have pulled out some
+ never-quite-working code related to the PDD option, because now the PDD
+ option is definitively NOT supported.
+
+
+Changes answers relative to previous tag: YES
+
+ Since the coupling method has changed - including using area-conservative
+ remapping rather than bilinear remapping for the downscaling - answers change
+ significantly. However, I have done many eyeball comparisons to verify that
+ the new code is operating roughly the same as before.
+
+
+Dependencies: Depends on an upcoming CIME tag (cime2_0_00?)
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Point to glimmer-cism branch
+M SVN_EXTERNAL_DIRECTORIES
+
+*** glc_global_fields renamed to glc_fields for accuracy; this is significantly
+ different now
+D source_glc/glc_global_fields.F90
+A source_glc/glc_fields.F90
+
+*** Operation of glc overrides fundamentally reworked: It no longer works to
+ scale fractions. Instead, the fractions specify topographic height cutoffs
+ below / above which ice_covered is set to 0 / 1.
+M source_glc/glc_override_frac.F90
+
+*** Point to new input files, which contain lat/lon values that agree with the
+ values on Jeremy's new SCRIP grid files. These SCRIP grid files were needed
+ for creating mapping files for the coupler. Lat/lon values are now also
+ needed on the input files in order for CISM/GLC to tell the coupler about
+ its domain information. It seemed good (although maybe not completely
+ necessary) for these two sets of files to agree in terms of lat/lon
+ values. Also, removed some no-longer-needed namelist items.
+M bld/namelist_files/namelist_defaults_cism.xml
+
+*** Fix some documentation
+M tools/glc2scripConvert.ncl
+M tools/README.glc_overlap_tools
+
+*** Other changes; see "Purpose of changes", above, for details.
+D source_glc/glc_global_grid.F90
+A source_glc/glc_indexing_info.F90
+M source_glc/glc_io.F90
+M source_glc/glc_constants.F90
+M source_glc/history_tape_base.F90
+M source_glc/glc_FinalMod.F90
+M source_glc/glc_history.F90
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+M cimetest/testmods_dirs/cism/override_glc_frac/user_nl_cism
+M drivers/cpl/glc_comp_esmf.F90
+M drivers/cpl/glc_import_export.F90
+M drivers/cpl/glc_coupling_flags.F90
+M drivers/cpl/glc_cpl_indices.F90
+M drivers/cpl/glc_comp_mct.F90
+M bld/cismIO/cism.buildIO.template.csh
+M bld/README.build-namelist
+M bld/cism.template
+M bld/build-namelist
+M bld/namelist_files/namelist_definition_cism.xml
+M bld/trilinosOptions/README
+M bld/cism.buildnml
+
+
+Summary of testing:
+
+ Ran full yellowstone aux_glc test suite. All tests passed.
+
+Externals used for testing:
+https://svn-ccsm-models.cgd.ucar.edu/cesm1/exp/branch_tags/move_glint_into_cpl2_tags/move_glint_into_cpl2_n03_cesm1_4_beta04
+ with mpi-serial patch as noted below
+
+cism tag used for baseline comparisons: cism2_0_09
+
+Any other externals that differed in baseline: see externals notes under cism2_0_09
+
+================================================================================
+Originator: mvertens (brought to trunk by sacks)
+Date: Apr 29, 2015
+Model: cism
+Version: cism2_0_09
+One-line summary: Updates for latest version of cime
+
+Purpose of changes:
+
+ Updates to make (a) location of test directory and (b) variable names
+ consistent with the latest version of CIME.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Change variable names to be consistent with latest version of CIME
+M bld/README.build-namelist
+M bld/build-namelist
+M bld/cism.buildlib
+M bld/cism.buildnml
+M bld/cism.template
+
+*** Rename cesmtest to cimetest
+D cesmtest
+D cesmtest/testlist_cism.xml
+D cesmtest/testmods_dirs
+D cesmtest/testmods_dirs/cism
+D cesmtest/testmods_dirs/cism/apply_to_multiinstance
+D cesmtest/testmods_dirs/cism/apply_to_multiinstance/README
+D cesmtest/testmods_dirs/cism/apply_to_multiinstance/shell_commands
+D cesmtest/testmods_dirs/cism/oneway
+D cesmtest/testmods_dirs/cism/oneway/README
+D cesmtest/testmods_dirs/cism/oneway/xmlchange_cmnds
+D cesmtest/testmods_dirs/cism/override_glc_frac
+D cesmtest/testmods_dirs/cism/override_glc_frac/include_user_mods
+D cesmtest/testmods_dirs/cism/override_glc_frac/user_nl_cism
+D cesmtest/testmods_dirs/cism/test_coupling
+D cesmtest/testmods_dirs/cism/test_coupling/include_user_mods
+D cesmtest/testmods_dirs/cism/test_coupling/user_nl_cism
+D cesmtest/testmods_dirs/cism/trilinos
+D cesmtest/testmods_dirs/cism/trilinos/README
+D cesmtest/testmods_dirs/cism/trilinos/include_user_mods
+D cesmtest/testmods_dirs/cism/trilinos/shell_commands
+D cesmtest/testmods_dirs/cism/trilinos/user_nl_cism
+A + cimetest
+
+
+Summary of testing:
+
+ Ran full yellowstone aux_glc test suite. All tests passed.
+
+ Note, however, that two tests needed to be rerun to pass:
+
+ PEA_P1_M_Ly2.f09_g16_gl20.TGIS2.yellowstone_intel
+ - seemed to have a system problem the first time
+
+ SMS_D_Ly1.f09_g16_gl20.TGHISTIS2.yellowstone_gnu
+ - died with:
+ MCT::m_AttrVect::lsize_: attribute array length mismatch error, stat =892988089
+ - passed when I reran it
+ - likely a compiler bug
+
+Externals used for testing: cesm1_4_beta02, with mpi-serial patch as noted below
+
+cism tag used for baseline comparisons: cism2_0_08
+
+Any other externals that differed in baseline: see externals notes under cism2_0_08
+
+================================================================================
+Originator: mvertens, sacks
+Date: Feb 20, 2015
+Model: cism
+Version: cism2_0_08
+One-line summary: Updates for Mariana's new testing infrastructure
+
+Purpose of changes:
+
+ This tag updates CISM to fit in with Mariana's major overhaul of the test
+ system and build scripts. Specifically:
+
+ (1) Tests are now distributed with their components, rather than being
+ centralized in 'scripts'
+
+ (2) Build scripts have been converted to perl, and implicit dependencies on
+ the environment have been removed.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Build scripts converted to perl; implicit dependencies on environment removed
+D bld/cism.buildexe.csh
+D bld/cism.cpl7.template
+D bld/cism.buildnml.csh
+M bld/trilinosOptions/README
+A bld/cism.buildlib
+A bld/cism.buildnml
+M bld/README.build-namelist
+A bld/cism.template
+M bld/build-namelist
+M bld/README
+
+*** Tests now distributed into components, rather than being centralized in scripts
+A cesmtest/testmods_dirs/cism/override_glc_frac/user_nl_cism
+A cesmtest/testmods_dirs/cism/override_glc_frac/include_user_mods
+A cesmtest/testmods_dirs/cism/override_glc_frac
+A cesmtest/testmods_dirs/cism/test_coupling/user_nl_cism
+A cesmtest/testmods_dirs/cism/test_coupling/include_user_mods
+A cesmtest/testmods_dirs/cism/test_coupling
+A cesmtest/testmods_dirs/cism/oneway/xmlchange_cmnds
+A cesmtest/testmods_dirs/cism/oneway/README
+A cesmtest/testmods_dirs/cism/oneway
+A cesmtest/testmods_dirs/cism/trilinos/user_nl_cism
+A cesmtest/testmods_dirs/cism/trilinos/include_user_mods
+A cesmtest/testmods_dirs/cism/trilinos/shell_commands
+A cesmtest/testmods_dirs/cism/trilinos/README
+A cesmtest/testmods_dirs/cism/trilinos
+A cesmtest/testmods_dirs/cism/apply_to_multiinstance/shell_commands
+A cesmtest/testmods_dirs/cism/apply_to_multiinstance/README
+A cesmtest/testmods_dirs/cism/apply_to_multiinstance
+A cesmtest/testmods_dirs/cism
+A cesmtest/testmods_dirs
+A cesmtest/testlist_cism.xml
+A cesmtest
+
+
+Summary of testing:
+
+ Ran full yellowstone test suite from Mariana's experimental tag
+ (https://svn-ccsm-models.cgd.ucar.edu/cesm1/exp_tags/newtesting_cesm1_3_beta18c). All
+ tests PASSed.
+
+ Since the CME _D tests failed in the baseline, I ran SMS versions of these,
+ as noted below (cism2_0_07). These baseline comparisons PASSed. (The CME
+ tests themselves now pass, too.)
+
+Externals used for testing:
+https://svn-ccsm-models.cgd.ucar.edu/cesm1/exp_tags/newtesting_cesm1_3_beta18c -
+but since this points to branches rather than tags, it is hard to say exactly
+what was used. (I think it was at r68306 for most components.)
+
+ Also, mpi-serial changes, as noted below.
+
+cism tag used for baseline comparisons: cism2_0_07
+
+Any other externals that differed in baseline: See below for notes on externals
+used for cism2_0_07.
+
+================================================================================
+Originator: sacks
+Date: Feb 20, 2015
+Model: cism
+Version: cism2_0_07
+One-line summary: Separate cism history frequency from coupler history frequency
+
+Purpose of changes:
+
+ With Mariana's new testing framework, some exact restart tests were failing
+ because CISM was writing history files at the end of a few-day run, and these
+ didn't restart properly (specifically, IG/FG/BG tests without
+ test_coupling). This provided motivation for something I have been wanting to
+ do for a long time: separating the cism history frequency from the coupler
+ history frequency.
+
+ With this tag, the default cism history frequency is annual. You can still
+ tie cism history to cpl history if you want, by setting hist_option =
+ 'coupler'. However, this is not recommended for production runs, because it
+ does not write frequency metadata to the history file.
+
+ Along with this change, I have added a global metadata field to the history
+ files saying the frequency of history writes. This is in accordance with a
+ new requirement from the CESM post-processing/workflow group. As noted above,
+ this does NOT work for hist_option = 'coupler'.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+A source_glc/glc_history.F90
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+A source_glc/history_tape_standard.F90
+M source_glc/glc_io.F90
+A source_glc/history_tape_coupler.F90
+A source_glc/history_tape_base.F90
+M drivers/cpl/glc_comp_mct.F90
+M drivers/cpl/glc_comp_esmf.F90
+
+Summary of testing:
+
+ Ran full yellowstone test suite, using the new aux_glc test suite that I have
+ developed... this test suite is not on the scripts trunk (it is on Mariana's
+ testing branch), so I'm listing it below:
+
+ CME_D.T31_g37.IGCLM45.yellowstone_intel.cism-test_coupling # Note that this is more about testing CLM (specifically the esmf code in CLM) than it is about testing CISM
+ ERS_Ly5.T31_g37.IGCLM45.yellowstone_intel.clm-glcMEC_long # Need IG ERS test to catch problems with fields sent before the end of the first year after restart. Also use glcMEC_long testmods to get shorter snow_persistence_max - the main motivation here is to have a long ERS test that checks restart of the snow_persistence stuff (which more belongs in the aux_clm test list, but was added here to avoid needing to add a long ERS test to that test list)... this is mainly needed because we do not have an IG test that uses spun-up initial conditions, so we currently need a long test to completely test the restartability of the snow_persistence stuff. Also note that this is the only multi-year non-TG test in the test list, so this is the one test that a production-like configuration can run for a few years. Consider moving this to the aux_clm45 test list once this test can be made shorter, either through use of initial conditions and/or moving to f10 resolution.
+ ERS_D_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac # Make sure glc override options work correctly, and restart properly
+ SMS_D.T31_g37_gl20.IGCLM45IS2.yellowstone_gnu.cism-test_coupling
+ SMS_D.f09_g16.TG.yellowstone_intel
+ ERS_Ly20_N2_P2.f09_g16_gl10.TG.yellowstone_pgi
+ SMS_D.f09_g16_gl10.TG.yellowstone_gnu
+ SMS_D.f09_g16_gl10.TG.yellowstone_pgi
+ CME_Ly5_N2_P2_D.f09_g16.TG1850.yellowstone_intel
+ ERS_Ly20.f09_g16.TG1850.yellowstone_gnu.cism-oneway
+ ERS_Ly11.f09_g16_gl20.TG1850IS2.yellowstone_gnu
+ SMS_D_Ly1.f09_g16_gl20.TG1850IS2.yellowstone_pgi
+ ERS_Ly20_E.f09_g16.TGHIST.yellowstone_intel
+ PEA_P1_M.f09_g16.TGHIST.yellowstone_pgi
+ ERI_Ly15.f09_g16_gl20.TGHISTIS2.yellowstone_pgi
+ SMS_D_Ly1.f09_g16_gl20.TGHISTIS2.yellowstone_gnu
+ PEA_P1_M_Ly2.f09_g16_gl20.TGIS2.yellowstone_intel # needs to be at least 2 years for there to be enough cpl fields for the cpl log comparison to work
+ SMS_D_Ly1.f09_g16_gl20.TGIS2.yellowstone_intel
+ SMS_D_Ly1.f09_g16_gl20.TGIS2.yellowstone_intel.cism-trilinos
+ SMS_Ly1.f09_g16_gl4.TGIS2.yellowstone_intel # include one short test of the typical production resolution for CISM2
+ ERI_Ly44.f09_g16.TGRCP85.yellowstone_intel
+ CME_Ly3.f09_g16_gl20.TGRCP85IS2.yellowstone_intel
+ NCK_Ly3.f09_g16_gl20.TGRCP85IS2.yellowstone_pgi
+
+ These all passed, except for these, which are currently expected to fail for
+ issues unrelated to CISM:
+
+ PEND CME_D.T31_g37.IGCLM45.yellowstone_intel.cism-test_coupling.GC.150219-095007
+ PEND CME_Ly5_N2_P2_D.f09_g16.TG1850.yellowstone_intel.GC.150219-095007
+
+ Also, this comparison failed due to having no history file in the test
+ case. This is expected and okay - note that this test is about testing the
+ fields sent to the coupler, so it's fine for this to not have a CISM history
+ file (and, as a side-note, this was the main test that provided motivation
+ for this tag, because in Mariana's branch, the ERS comparison of the CISM
+ history file fails for this test)
+
+ FAIL ERS_D_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac.cism.h.compare_hist.cism2_0_05_alpha17d
+
+ Since the CME_D tests currently fail (because of issues unrelated to CISM), I
+ ran SMS versions of these tests:
+
+ SMS_D.T31_g37.IGCLM45.yellowstone_intel.cism-test_coupling
+ SMS_Ly5_N2_P2_D.f09_g16.TG1850.yellowstone_intel
+
+
+
+Externals used for testing: cesm1_3_alpha17d, with these mods:
+
+ Index: mpi-serial/mpi.h
+ ===================================================================
+ --- mpi-serial/mpi.h (revision 1224)
+ +++ mpi-serial/mpi.h (working copy)
+ @@ -152,6 +152,8 @@
+
+ } MPI_Status;
+
+ +#define MPI_STATUS_IGNORE (MPI_Status *)1
+ +#define MPI_STATUSES_IGNORE (MPI_Status *)1
+
+ /*
+ * Collective operations
+ Index: mpi-serial/mpif.master.h
+ ===================================================================
+ --- mpi-serial/mpif.master.h (revision 1224)
+ +++ mpi-serial/mpif.master.h (working copy)
+ @@ -132,6 +132,8 @@
+
+ INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
+ PARAMETER(MPI_SOURCE=1, MPI_TAG=2, MPI_ERROR=3)
+ + INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
+ + INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
+
+
+
+ Index: mpi-serial/mpif.real4double8.h
+ ===================================================================
+ --- mpi-serial/mpif.real4double8.h (revision 1224)
+ +++ mpi-serial/mpif.real4double8.h (working copy)
+ @@ -132,6 +132,8 @@
+
+ INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
+ PARAMETER(MPI_SOURCE=1, MPI_TAG=2, MPI_ERROR=3)
+ + INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
+ + INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
+
+
+
+ Index: mpi-serial/mpif.real8double16.h
+ ===================================================================
+ --- mpi-serial/mpif.real8double16.h (revision 1224)
+ +++ mpi-serial/mpif.real8double16.h (working copy)
+ @@ -132,6 +132,8 @@
+
+ INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
+ PARAMETER(MPI_SOURCE=1, MPI_TAG=2, MPI_ERROR=3)
+ + INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
+ + INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
+
+
+
+ Index: mpi-serial/mpif.real8double8.h
+ ===================================================================
+ --- mpi-serial/mpif.real8double8.h (revision 1224)
+ +++ mpi-serial/mpif.real8double8.h (working copy)
+ @@ -132,6 +132,8 @@
+
+ INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
+ PARAMETER(MPI_SOURCE=1, MPI_TAG=2, MPI_ERROR=3)
+ + INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
+ + INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
+
+
+ Index: ccsm_utils/Testlistxml/testmods_dirs/cism/test_coupling/user_nl_cism
+ ===================================================================
+ --- ccsm_utils/Testlistxml/testmods_dirs/cism/test_coupling/user_nl_cism (revision 68086)
+ +++ ccsm_utils/Testlistxml/testmods_dirs/cism/test_coupling/user_nl_cism (working copy)
+ @@ -1,3 +1,6 @@
+ ! This option changes the ice sheet dynamics time step to 1 day rather than 1 year
+ ! Thus, the ice sheet dynamics can be exercised in a few-day run
+ test_coupling = .true.
+ +
+ +! This option gives us a history file at the end of the run, even for a few-day run
+ +history_option = 'coupler'
+
+
+cism tag used for baseline comparisons: cism2_0_05
+
+Any other externals that differed in baseline: same, but without the mod in
+test_coupling/user_nl_cism
+
+================================================================================
+Originator: sacks
+Date: Feb 13, 2015
+Model: cism
+Version: cism2_0_06
+One-line summary: Update glimmer-cism external to fix cray compilation problem
+
+Purpose of changes:
+
+ Fix compilation problem with a string continuation line (bug 2145)
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/): 2145
+
+List all modified files, and describe the changes:
+
+*** cism2_141202 -> cism2_150213
+M SVN_EXTERNAL_DIRECTORIES
+
+Summary of testing:
+
+ Just tested the build for these three tests:
+
+ SMS_D.f09_g16_gl4.TGIS2.yellowstone_intel
+ SMS_D.f09_g16_gl4.TGIS2.yellowstone_pgi
+ SMS_D.f09_g16_gl4.TGIS2.yellowstone_gnu
+
+Externals used for testing: cesm1_3_alpha17d
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: Dec 02, 2014
+Model: cism
+Version: cism2_0_05
+One-line summary: update glimmer-cism external
+
+Purpose of changes:
+
+ Update to latest development version. Among other things, this includes a bug
+ fix for basal melt for the higher-order code.
+
+Changes answers relative to previous tag: YES
+
+ Changes answers for CISM2 compsets
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Update from cism2_141119a -> cism2_141202
+M SVN_EXTERNAL_DIRECTORIES
+
+Summary of testing:
+
+ Ran full aux_glc test suite. All tests PASSed.
+
+Externals used for testing: cesm1_3_beta12 with these diffs:
+
+ Index: SVN_EXTERNAL_DIRECTORIES
+ ===================================================================
+ --- SVN_EXTERNAL_DIRECTORIES (revision 65603)
+ +++ SVN_EXTERNAL_DIRECTORIES (working copy)
+ @@ -1,8 +1,8 @@
+ -scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_140813
+ -scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/bluewatersfix_Machines_140811_tags/bluewatersfix02_Machines_140811
+ +scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/rollback_timing_updates_tags/rollback_timing_updates_n08_scripts4_141023
+ +scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/piscees_testing_tags/piscees_testing_n02_Machines_140923
+ tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_140625
+ tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_140702b
+ -models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_15
+ +models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_17
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_140723
+ scripts/validation_testing https://svn-ccsm-models.cgd.ucar.edu/validation_testing/trunk_tags/validation_20140708/run_CESM/
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_3_45/models/atm/cam/
+ @@ -10,14 +10,14 @@
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/satm
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xatm
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/dead_share
+ -models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140602
+ +models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sglc
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xglc
+ -models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20140711
+ +models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20140918b
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_131201
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sice
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xice
+ -models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r079//models/lnd/clm
+ +models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r084//models/lnd/clm
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_131201
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/slnd
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xlnd
+
+
+cism tag used for baseline comparisons: cism2_0_04
+
+Any other externals that differed in baseline: none
+
+================================================================================
+Originator: sacks
+Date: Nov 19, 2014
+Model: cism
+Version: cism2_0_04
+One-line summary: update glimmer-cism external
+
+Purpose of changes:
+
+ Point to new external location, in the CESM-Develop github area.
+
+ Update that repo to pull in some minor bug fixes from the piscees repo.
+
+ Also add some error-checking to ensure that beta is present on the input file
+ if using which_ho_babc = 5.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+-glimmer-cism https://github.com/CISM/cism/tags/v2.0
++glimmer-cism https://github.com/CESM-Development/cism/tags/cism2_141119a
+
+Summary of testing:
+
+ Ran full aux_glc test suite (which now just runs on yellowstone). All tests PASSed.
+
+Externals used for testing: cesm1_3_beta12 with these diffs:
+
+ Index: SVN_EXTERNAL_DIRECTORIES
+ ===================================================================
+ --- SVN_EXTERNAL_DIRECTORIES (revision 65521)
+ +++ SVN_EXTERNAL_DIRECTORIES (working copy)
+ @@ -1,8 +1,8 @@
+ -scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_140813
+ -scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/bluewatersfix_Machines_140811_tags/bluewatersfix02_Machines_140811
+ +scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/rollback_timing_updates_tags/rollback_timing_updates_n08_scripts4_141023
+ +scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/piscees_testing_tags/piscees_testing_n02_Machines_140923
+ tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_140625
+ tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_140702b
+ -models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_15
+ +models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_17
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_140723
+ scripts/validation_testing https://svn-ccsm-models.cgd.ucar.edu/validation_testing/trunk_tags/validation_20140708/run_CESM/
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_3_45/models/atm/cam/
+ @@ -10,14 +10,14 @@
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/satm
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xatm
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/dead_share
+ -models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140602
+ +models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sglc
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xglc
+ -models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20140711
+ +models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20140918b
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_131201
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sice
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xice
+ -models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r079//models/lnd/clm
+ +models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r084//models/lnd/clm
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_131201
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/slnd
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xlnd
+
+Also, diffs for mpi-serial, as noted below
+
+
+cism tag used for baseline comparisons: cism2_0_03
+
+Any other externals that differed in baseline: none
+
+================================================================================
+Originator: sacks
+Date: Nov 18, 2014
+Model: cism
+Version: cism2_0_03
+One-line summary: tweak namelist options for cism2
+
+Purpose of changes:
+
+- remove 10km support for CISM2: we'll just do testing using the 20km grid
+
+- add which_ho_approx namelist option
+
+- for gland20 with cism2, change default dt to 0.5, and default which_ho_babc to 4
+
+- decrease dt for gland4 to achieve stability in some of my tests (note that we
+ need to use a non-round value because dt needs to translate into an integer
+ number of hours)
+
+
+Changes answers relative to previous tag: YES
+
+ Answers change for cism2 runs due to changes in default namelist options
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+
+
+Summary of testing:
+
+ Ran two versions of the aux_glc yellowstone tests. (Note that I am removing the
+ aux_glc titan tests). The final set used externals as listed below. The earlier
+ set used rollback_timing_updates_n05_scripts4_141023 (up-to-date with
+ rework_glc_compsets_n04_scripts4_141112a). All tests passed in the final set of
+ tests. In the earlier set, ERS_Ly3.f09_g16_gl4.TGIS2.yellowstone_intel failed
+ simply because there were no lines compared in the cpl log file (cpl hist files
+ were bfb).
+
+ There were lots of BFAILs, because I have totally reworked the test
+ list. However, there were still quite a few compare_hist PASSes.
+
+ These failed compare_hist, due to changes in default length of TG tests:
+
+ SMS_D.f09_g16_gl10.TG.yellowstone_pgi
+ SMS_D.f09_g16.TG.yellowstone_intel
+
+Externals used for testing: cesm1_3_beta12 with these diffs:
+
+ Index: SVN_EXTERNAL_DIRECTORIES
+ ===================================================================
+ --- SVN_EXTERNAL_DIRECTORIES (revision 65521)
+ +++ SVN_EXTERNAL_DIRECTORIES (working copy)
+ @@ -1,8 +1,8 @@
+ -scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_140813
+ -scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/bluewatersfix_Machines_140811_tags/bluewatersfix02_Machines_140811
+ +scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/rollback_timing_updates_tags/rollback_timing_updates_n08_scripts4_141023
+ +scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/piscees_testing_tags/piscees_testing_n02_Machines_140923
+ tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_140625
+ tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_140702b
+ -models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_15
+ +models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_17
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_140723
+ scripts/validation_testing https://svn-ccsm-models.cgd.ucar.edu/validation_testing/trunk_tags/validation_20140708/run_CESM/
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_3_45/models/atm/cam/
+ @@ -10,14 +10,14 @@
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/satm
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xatm
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/dead_share
+ -models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140602
+ +models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sglc
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xglc
+ -models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20140711
+ +models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20140918b
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_131201
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sice
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xice
+ -models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r079//models/lnd/clm
+ +models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r084//models/lnd/clm
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_131201
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/slnd
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xlnd
+
+Also, diffs for mpi-serial, as noted below
+
+
+cism tag used for baseline comparisons: cism2_0_00
+
+Any other externals that differed in baseline: see below for externals used for cism2_0_00
+
+================================================================================
+Originator: sacks
+Date: Nov 5, 2014
+Model: cism
+Version: cism2_0_02
+One-line summary: modify tools for creating glcmask files
+
+Purpose of changes:
+
+ The toolchain for creating glcmask files is out-dated and didn't work exactly
+ how I wanted it to. I have made the following modifications:
+
+ (1) Create mapping file using the shared CESM tools, rather than the outdated
+ tool that used to be contained here.
+
+ (2) It was getting awkward to support all combinations of CLM grid x CISM
+ grid. Thus, I have rewritten the documentation to just do this glcmask
+ generation for a single CISM grid. From a quick look at the different
+ existing glcmask files, as well as the different CISM grids that we
+ currently support, I don't think the choice of CISM grid should make a
+ significant difference in this glcmask file. I have (somewhat
+ arbitrarily) chosen the (old) 5km CISM grid for this purpose.
+
+ (3) Remove dependence on CLM's landfrac. This dependence seems unnecessary,
+ and fragile in the face of potentially changing ocean grids. With this
+ change, the overlap file contains a 1 value in any CLM grid cell that
+ overlaps a CISM grid cell, regardless of whether this grid cell has any
+ land according to landfrac.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+D tools/scrip_make_wgts_CCSM_to_GLC_bilin.csh
+M tools/README.glc_tools
+M tools/scrip2CLMoverlap.ncl
+M tools/glc2scripConvert.ncl
+M tools/README.glc_overlap_tools
+
+
+Summary of testing:
+
+ Did NOT run any aux_glc tests, because I have only changed these offline
+ tools.
+
+ I have tested the new tool-chain by creating glcmask files for the 3
+ supported CLM resolutions. These new glcmask files have been committed to the
+ inputdata repository.
+
+Externals used for testing: N/A
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: Oct 31, 2014
+Model: cism
+Version: cism2_0_01
+One-line summary: Update ChangeLog entry for last tag
+
+Purpose of changes:
+
+Add stuff to ChangeLog that was accidentally excluded from the last tag
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M ChangeLog
+
+Summary of testing: none
+
+Externals used for testing: n/a
+
+cism tag used for baseline comparisons: n/a
+
+Any other externals that differed in baseline: n/a
+
+================================================================================
+Originator: sacks
+Date: Oct 31, 2014
+Model: cism
+Version: cism2_0_00
+One-line summary: Point to cism2 release code base
+
+Purpose of changes:
+
+Update glimmer-cism external to point to recently-released cism2 code base. This
+pulls in developments that have been made over the last year in the private
+PISCEES repo.
+
+In addition, a number of misc. changes to namelist defaults, build options,
+etc. to work smoothly with new code base.
+
+Changes answers relative to previous tag: No answer changes for
+SIA. Higher-order code (using glissade dycore) finally works!
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Point to cism2 release code base
+M SVN_EXTERNAL_DIRECTORIES
+
+*** Change default config options for cism2 runs, including using glissade
+ dycore and allowing runs at 4km resolution (trilinosOptions for 4km simply
+ copies the 5km file). Also add some error checking for the match between
+ cism_phys (cism1/cism2) and resolution.
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+M bld/build-namelist
+A bld/trilinosOptions/trilinosOptions_gland4.xml
+
+*** Some changes to accommodate changes made in the glimmer-cism code or build
+ system. Also, switch to always using parallel_mpi (rather than
+ parallel_slap), because that's easier than trying to come up with a rule for
+ when we need one or the other.
+M bld/cism.buildexe.csh
+M bld/cism.cpl7.template
+M source_glc/glc_io.F90
+
+
+Summary of testing:
+
+ Ran full aux_glc test suite, on yellowstone and titan. All tests PASS!
+
+ In addition, ran these tests from the prebeta test list:
+
+ PASS SMS.T31_g37_gl10.BGCNIS2.yellowstone_intel
+ PASS SMS.T31_g37_gl10.BGCNIS2.yellowstone_intel.memleak
+ PASS SMS_Ly2.f09_g16_gl10.TGIS2.yellowstone_intel
+ PASS SMS_Ly2.f09_g16_gl10.TGIS2.yellowstone_intel.memleak
+
+
+Externals used for testing: cesm1_3_beta12 with the following diffs:
+
+ Index: SVN_EXTERNAL_DIRECTORIES
+ ===================================================================
+ --- SVN_EXTERNAL_DIRECTORIES (revision 64812)
+ +++ SVN_EXTERNAL_DIRECTORIES (working copy)
+ @@ -1,8 +1,8 @@
+ -scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_140813
+ -scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/bluewatersfix_Machines_140811_tags/bluewatersfix02_Machines_140811
+ +scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/rollback_timing_updates_tags/rollback_timing_updates_n02_scripts4_141023
+ +scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/piscees_testing_tags/piscees_testing_n01_Machines_140923
+ tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_140625
+ tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_140702b
+ -models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_15
+ +models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_17
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_140723
+ scripts/validation_testing https://svn-ccsm-models.cgd.ucar.edu/validation_testing/trunk_tags/validation_20140708/run_CESM/
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_3_45/models/atm/cam/
+ @@ -10,14 +10,14 @@
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/satm
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xatm
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/dead_share
+ -models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140602
+ +models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sglc
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xglc
+ -models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20140711
+ +models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20140918b
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_131201
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sice
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xice
+ -models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r079//models/lnd/clm
+ +models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r084//models/lnd/clm
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_131201
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/slnd
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xlnd
+
+ In addition, the following diffs in my yellowstone sandbox, which are needed
+ for the PEA tests:
+
+Index: models/utils/mct/mpi-serial/mpi.h
+===================================================================
+--- models/utils/mct/mpi-serial/mpi.h (revision 1122)
++++ models/utils/mct/mpi-serial/mpi.h (working copy)
+@@ -152,6 +152,8 @@
+
+ } MPI_Status;
+
++#define MPI_STATUS_IGNORE (MPI_Status *)1
++#define MPI_STATUSES_IGNORE (MPI_Status *)1
+
+ /*
+ * Collective operations
+Index: models/utils/mct/mpi-serial/mpif.master.h
+===================================================================
+--- models/utils/mct/mpi-serial/mpif.master.h (revision 1122)
++++ models/utils/mct/mpi-serial/mpif.master.h (working copy)
+@@ -132,6 +132,8 @@
+
+ INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
+ PARAMETER(MPI_SOURCE=1, MPI_TAG=2, MPI_ERROR=3)
++ INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
++ INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
+
+
+
+Index: models/utils/mct/mpi-serial/mpif.real4double8.h
+===================================================================
+--- models/utils/mct/mpi-serial/mpif.real4double8.h (revision 1122)
++++ models/utils/mct/mpi-serial/mpif.real4double8.h (working copy)
+@@ -132,6 +132,8 @@
+
+ INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
+ PARAMETER(MPI_SOURCE=1, MPI_TAG=2, MPI_ERROR=3)
++ INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
++ INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
+
+
+
+Index: models/utils/mct/mpi-serial/mpif.real8double16.h
+===================================================================
+--- models/utils/mct/mpi-serial/mpif.real8double16.h (revision 1122)
++++ models/utils/mct/mpi-serial/mpif.real8double16.h (working copy)
+@@ -132,6 +132,8 @@
+
+ INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
+ PARAMETER(MPI_SOURCE=1, MPI_TAG=2, MPI_ERROR=3)
++ INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
++ INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
+
+
+
+Index: models/utils/mct/mpi-serial/mpif.real8double8.h
+===================================================================
+--- models/utils/mct/mpi-serial/mpif.real8double8.h (revision 1122)
++++ models/utils/mct/mpi-serial/mpif.real8double8.h (working copy)
+@@ -132,6 +132,8 @@
+
+ INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
+ PARAMETER(MPI_SOURCE=1, MPI_TAG=2, MPI_ERROR=3)
++ INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
++ INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
+
+
+
+cism tag used for baseline comparisons: cism1_141006 and
+piscees_n13_cism1_141006
+
+ yellowstone testing:
+
+ - CISM1 tests bfb with cism1_141006, according to both cism & cpl hist
+ files; CISM2 tests BFAIL with this comparison because the tests were
+ failing on the trunk until now
+
+ - All tests (CISM1 & CISM2) bfb with piscees_n13_cism1_141006, according to
+ both cism & cpl hist files
+
+ titan testing:
+
+ - just compared with piscees_n13_cism1_141006; all tests bfb
+
+Any other externals that differed in baseline: externals were similar, but
+possibly not identical in the baseline
+
+================================================================================
+Originator: sacks
+Date: Oct 6, 2014
+Model: cism
+Version: cism1_141006
+One-line summary: update glimmer-cism to relax tolerances in glimmer_sparse.F90
+
+Purpose of changes:
+
+Merge r1571 from piscees repo for
+libglimmer-solve/glimmer_sparse.F90. This relaxes the tolerances from
+1e-11 to 1e-8.
+
+My purpose in merging this change into the cesm repo is so that SIA
+answers can remain bfb between the cesm repo and the piscees repo.
+
+Changes answers relative to previous tag: YES
+
+ Changes answers for all runs that are long enough to trigger the main CISM code
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Update to r64084
+M SVN_EXTERNAL_DIRECTORIES
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following expected
+ failures:
+
+ All IS2 tests fail (as usual)
+
+ Baseline comparisons fail for many tests, as expected
+
+Externals used for testing: cesm1_3_beta12, with these diffs:
+
+ Index: SVN_EXTERNAL_DIRECTORIES
+ ===================================================================
+ --- SVN_EXTERNAL_DIRECTORIES (revision 64083)
+ +++ SVN_EXTERNAL_DIRECTORIES (working copy)
+ @@ -1,8 +1,8 @@
+ -scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_140813
+ +scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_140916c
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/bluewatersfix_Machines_140811_tags/bluewatersfix02_Machines_140811
+ tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_140625
+ tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_140702b
+ -models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_15
+ +models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_17
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_140723
+ scripts/validation_testing https://svn-ccsm-models.cgd.ucar.edu/validation_testing/trunk_tags/validation_20140708/run_CESM/
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_3_45/models/atm/cam/
+ @@ -10,7 +10,7 @@
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/satm
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xatm
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/dead_share
+ -models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140602
+ +models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sglc
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xglc
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20140711
+ @@ -17,7 +17,7 @@
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_131201
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sice
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xice
+ -models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r079//models/lnd/clm
+ +models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r084//models/lnd/clm
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_131201
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/slnd
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xlnd
+
+
+cism tag used for baseline comparisons: cism1_140914
+
+Any other externals that differed in baseline: See ChangeLog entry for
+cism1_140914 - in particular, externals that differed from cesm1_3_beta12 used
+branch versions for cism1_140914
+
+================================================================================
+Originator: sacks
+Date: Sept 16, 2014
+Model: cism
+Version: cism1_140916
+One-line summary: Fix ChangeLog entry for cism1_140914
+
+Purpose of changes:
+
+ I realized that I incorrectly described the reason for answer changes in the
+ IG CLM40 test. I'm fixing that here.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M ChangeLog
+
+
+Summary of testing: NONE
+
+Externals used for testing: N/A
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: Sept 14, 2014
+Model: cism
+Version: cism1_140914
+One-line summary: Add zero_gcm_fluxes option; send icemask_coupled_fluxes field to coupler
+
+Purpose of changes:
+
+In talking with Dave Lawrence, Bill Lipscomb and Jeremy Fyke, I realized that we
+need to introduce more logic to distinguish between active icesheets and
+diagnostic-only icesheets.
+
+One aspect of this is the ability to zero out the fluxes CISM sends to the
+coupler (currently runoff fluxes; later also the heat flux). That way, by
+zeroing out these fluxes and turning off the connection to CLM, we can continue
+to run with an evolving ice sheet in diagnostic-only mode (i.e., 1-way
+coupling). This is achieved via the new zero_gcm_fluxes option. The default for
+zero_gcm_fluxes is based on the GLC_TWO_WAY_COUPLING xml variable. The current
+logic in scripts sets GLC_TWO_WAY_COUPLING to true for CLM45 (or later) compsets
+with CISM, and for TG compsets; otherwise (for CLM40, or for compsets without
+CISM) this is set to false.
+
+In addition, I realized the need to distinguish between areas of active
+icesheets and areas of either no icesheet or diagnostic-only icesheets in
+applying CLM's glc_dyn_runoff_routing switch. In particular, we should only use
+the new runoff routing scheme over areas of icesheet that are generating calving
+fluxes. From the perspective of CISM, this is achieved by computing and sending
+a new icemask_coupled_fluxes field to the coupler (which is then passed to
+CLM). icemask_coupled_fluxes is similar to the existing icemask field, but is
+zero over icesheet instances for which zero_gcm_fluxes is true - which will be
+the case for icesheets that are not generating calving fluxes (e.g.,
+diagnostic-only icesheets).
+
+Changes answers relative to previous tag: YES
+
+ Changes icemask at the roundoff-level, for some resolutions and compilers
+
+ Also, in combination with the new scripts, zeroes out runoff fluxes sent to
+ coupler (g2x_Fogg_rofl and g2x_Fogg_rofi).
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Update to r63495: glint changes needed to support the above changes
+M SVN_EXTERNAL_DIRECTORIES
+
+*** add zero_gcm_fluxes namelist option
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+
+*** send icemask_coupled_fluxes field to the coupler
+M drivers/cpl/glc_cpl_indices.F90
+M drivers/cpl/glc_import_export.F90
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+M source_glc/glc_global_fields.F90
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following expected
+ failures:
+
+ All IS2 tests fail (as usual)
+
+ compare_hist failures:
+
+ FAIL CME_Ly5.T31_g37.IG.yellowstone_intel.clm-reduceOutput.compare_hist.cism1_140602_beta12
+ FAIL ERS_Ly5.T31_g37.IGCLM45.yellowstone_intel.clm-glcMEC_long.compare_hist.cism1_140602_beta12
+ FAIL SMS_D.T31_g37.BG1850CN.yellowstone_intel.compare_hist.cism1_140602_beta12
+
+ These show roundoff-level changes in icemask. In addition, the CME test has
+ large diffs in g2x_Fogg_rofl and g2x_Fogg_rofi: These fields are identically
+ 0 in the new run. This makes sense, since zero_gcm_fluxes is set to true for
+ this CLM40 run (which arises in combination with the new scripts version,
+ which sets GLC_TWO_WAY_COUPLING to FALSE for this run).
+
+
+Externals used for testing: cesm1_3_beta12, with these diffs:
+
+ Index: SVN_EXTERNAL_DIRECTORIES
+ ===================================================================
+ --- SVN_EXTERNAL_DIRECTORIES (revision 63201)
+ +++ SVN_EXTERNAL_DIRECTORIES (working copy)
+ @@ -1,8 +1,8 @@
+ -scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_140813
+ +scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/glc_runoff_routing_tags/glc_runoff_routing_n01_scripts4_140814a
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/bluewatersfix_Machines_140811_tags/bluewatersfix02_Machines_140811
+ tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_140625
+ tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_140702b
+ -models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_15
+ +models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/branch_tags/glc_runoff_routing_tags/glc_runoff_routing_n01_drvseq5_0_15
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_140723
+ scripts/validation_testing https://svn-ccsm-models.cgd.ucar.edu/validation_testing/trunk_tags/validation_20140708/run_CESM/
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_3_45/models/atm/cam/
+ @@ -10,7 +10,7 @@
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/satm
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xatm
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/dead_share
+ -models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_140602
+ +models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/branches/glc_runoff_routing
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sglc
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xglc
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20140711
+ @@ -17,7 +17,7 @@
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_131201
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sice
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xice
+ -models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_1_r079//models/lnd/clm
+ +models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/branch_tags/glc_runoff_routing_tags/glc_runoff_routing_n02_clm4_5_1_r081
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_131201
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/slnd
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_00/xlnd
+
+However, for the PEA test, I merged the changes from scripts4_140903b into the sandbox.
+
+
+cism tag used for baseline comparisons: cism1_140602
+
+Any other externals that differed in baseline: Baseline used cesm1_3_beta12 with
+updated scripts (scripts4_140905???)
+
+================================================================================
+Originator: sacks
+Date: June 2, 2014
+Model: cism
+Version: cism1_140602
+One-line summary: Update glimmer-cism external with some fixes for CISM2
+
+Purpose of changes:
+
+ Update glimmer-cism external with the following fixes:
+
+ (1) update CMakeLists.txt to fix the yellowstone build when building with trilinos
+
+ (2) fix downscaling of fields in elevation class 0 when running with multiple processors
+
+ (3) set halo values in glint downscaling using parallel_halo calls
+
+Changes answers relative to previous tag: NO (at least not for CISM1 [SIA] runs)
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Update to r60744
+M SVN_EXTERNAL_DIRECTORIES
+
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following expected
+ failures:
+
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.140530-154133
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.140530-154116
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.clm-reduceOutput.GC.140530-154116
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.140530-154116
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.140530-154116
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.clm-reduceOutput.GC.140530-154116
+
+ Note: The IS2 runs now get further than before, because of problems that have
+ been fixed in this tag. e.g., SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel
+ gets up to a conservation error before stopping (in contrast to dying due to
+ a floating point problem or some other problem previously).
+
+ However, the IGIS2 test fails with:
+
+ 0:NOXSolve called
+ 0:
+ 0:************************************************************************
+ 0:-- Nonlinear Solver Step 0 --
+ 0:||F|| = inf step = 0.000e+00 dx = 0.000e+00
+ 0:************************************************************************
+ 0:
+ 35:
+ 35:p=35: *** Caught standard std::exception of type 'Belos::StatusTestError' :
+ 35:
+ 35: /glade/u/home/rory/trilinos-11.0.3-Source/packages/belos/src/BelosStatusTestGenResNorm.hpp:574:
+ 35:
+ 35: Throw number = 1
+ 35:
+ 35: Throw test that evaluated to true: true
+ 35:
+ 35: StatusTestGenResNorm::checkStatus(): NaN has been detected.
+ 35:INFO: 0031-306 pm_atexit: pm_exit_value is 1.
+
+
+Externals used for testing: cesm1_3_beta07, with many diffs - as in cism1_140501
+
+cism tag used for baseline comparisons: cism1_140501
+
+Any other externals that differed in baseline: none
+
+================================================================================
+Originator: jfyke, sacks
+Date: May 1, 2014
+Model: cism
+Version: cism1_140501
+One-line summary: Add an elevation class 0, for bare land SMB
+
+Purpose of changes:
+
+ Changes from Jeremy Fyke to add an elevation class 0, for bare land SMB. Also
+ adds ice_sheet_grid_mask. Also some minor, unrelated changes as noted below.
+
+ This involves extensive changes to glint, as well as some changes to the glc
+ code.
+
+ In addition, this tag pulls in an unrelated change to the glimmer-cism
+ external: a fix from Stephen Cornford for glint_mbal_io.
+
+Changes answers relative to previous tag: YES
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+M source_glc/glc_override_frac.F90
+M source_glc/glc_global_fields.F90
+M drivers/cpl/glc_cpl_indices.F90
+
+*** Also, clean up interfaces for glc_import & glc_export ('use' things directly
+ rather than having them passed through the interface)
+M drivers/cpl/glc_comp_mct.F90
+M drivers/cpl/glc_comp_esmf.F90
+M drivers/cpl/glc_import_export.F90
+
+*** Use some constants from shr_const_mod
+M source_glc/glc_constants.F90
+
+*** Add an option to decrease the mass balance time step, for testing purposes
+ (currently not exercised in any tests)
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following expected
+ failures (note: the GEN cases probably would have given CFAIL if I had been
+ patient):
+
+CFAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.140430-125442
+CFAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.clm-reduceOutput.GC.140430-125442
+FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.140430-125442
+GEN SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.140430-125442
+GEN SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.clm-reduceOutput.GC.140430-125442
+CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.140430-125445
+
+ Note: I am now getting a CFAIL instead of a FAIL for the IS2 cases... seems
+ to be something having to do with the yellowstone upgrade... I'm guessing
+ it's a problem with the C++/Fortran link. I get this message:
+
+ cannot find -lmkl_rt
+
+Externals used for testing: cesm1_3_beta07, but with quite a few changes:
+
+ Index: SVN_EXTERNAL_DIRECTORIES
+ ===================================================================
+ --- SVN_EXTERNAL_DIRECTORIES (revision 59711)
+ +++ SVN_EXTERNAL_DIRECTORIES (working copy)
+ @@ -1,23 +1,23 @@
+ -scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_140113
+ -scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_140124
+ +scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/add_dynlu_tests_tags/add_dynlu_tests_n02_scripts4_140305
+ +scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/arfs_tags/arfs_01_mach140218
+ tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_131224
+ tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_131217a
+ -models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_0_08
+ -models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_131231
+ +models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/branch_tags/bare_land_smb_tags/bare_land_smb_n02_drvseq5_0_08
+ +models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/branch_tags/shr_assert_macro_tags/shr_assert_macro_n04_share3_140115
+ scripts/validation_testing https://svn-ccsm-models.cgd.ucar.edu/validation_testing/trunk_tags/validation_20131108/run_CESM/
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_3_23/models/atm/cam
+ -models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_131201
+ +models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_140114
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/satm
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_09/xatm
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_09/dead_share
+ -models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_131213
+ +models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/branch_tags/bare_land_smb_tags/bare_land_smb_n06_cism1_140416
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sglc
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_09/xglc
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20131002
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_131201
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/sice
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_09/xice
+ -models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_5_57/models/lnd/clm
+ +models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/branch_tags/bare_land_smb_tags/bare_land_smb_n14_clm4_5_70/models/lnd/clm
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_131201
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/slnd
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_09/xlnd
+ @@ -27,16 +27,16 @@
+ models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/socn
+ models/ocn/xocn https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_09/xocn
+ models/rof/drof https://svn-ccsm-models.cgd.ucar.edu/drof/trunk_tags/drof8_131201
+ -models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_36
+ +models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_37
+ models/rof/srof https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/srof
+ models/rof/xrof https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_09/xrof
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_130213
+ models/utils/mct https://github.com/quantheory/MCT/tags/compiler_fixes_n04_MCT_2.8.3
+ -models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_8_7/pio
+ +models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_8_11/pio
+ models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_131108
+ models/wav/swav https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_04/swav
+ models/wav/ww3 https://svn-ccsm-models.cgd.ucar.edu/ww3/trunk_tags/ww3_130905
+ models/wav/xwav https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_09/xwav
+ -scripts/ccsm_utils/CMake https://github.com/quantheory/CMake_Fortran_utils/tags/CMake_Fortran_utils_140116
+ +scripts/ccsm_utils/CMake https://github.com/quantheory/CMake_Fortran_utils/tags/CMake_Fortran_utils_140403
+ scripts/doc https://svn-ccsm-models.cgd.ucar.edu/doc/trunk_tags/doc_131021
+ tools/unit_testing https://svn-ccsm-models.cgd.ucar.edu/unit_testing/trunk_tags/unit_testing_0_04
+
+
+cism tag used for baseline comparisons: cism1_140303
+
+Any other externals that differed in baseline: See notes under cism1_140303 below
+
+================================================================================
+Originator: muszala
+Date: April 16, 2014
+Model: cism
+Version: cism1_140416
+One-line summary: bring in SHR_ASSERT macros
+
+Purpose of changes: Removed 'use shr_assert_mod' statements and replace
+'call shr_assert' with SHR_ASSERT or SHR_ASSERT_ALL where appropriate.
+Add #include "shr_assert.h" to modules where macro is used.
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/): N/A
+
+List all modified files, and describe the changes:
+
+M source_glc/glc_override_frac.F90
+M drivers/cpl/glc_comp_mct.F90
+M drivers/cpl/glc_comp_esmf.F90
+M drivers/cpl/glc_import_export.F90
+
+Summary of testing:
+
+Tested along with clm4_5_70. This includes yellowstone+{intel,pgi} and
+goldbach+{nag,pgi,intel} and component comp. gen with ys+intel.
+
+Externals used for testing: alpha09b
+
+Cism tag used for baseline comparisons: cism1_140303 in clm4_5_69
+
+Any other externals that differed in baseline: Many, including: Pio, Mct, timing,
+scripts, machines. Please compare clm4_5_69 and clm4_5_70 for all details.
+
+================================================================================
+Originator: sacks
+Date: April 3, 2014
+Model: cism
+Version: cism1_140403
+One-line summary: fix allocate statement so it works with gfortran
+
+Purpose of changes:
+
+ I accidentally used a F2008 feature - trying to pick up array dimensions with
+ allocate(..., source=...); I have fixed this.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M drivers/cpl/glc_import_export.F90
+
+Summary of testing:
+
+ Just ran a subset of tests:
+
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140403-132534
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140403-132534.memleak
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140403-132534.generate.cism1_140403
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140403-132534.compare_hist.cism1_140305
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140403-132534.memcomp.cism1_140305
+FAIL SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140403-132534.tputcomp.cism1_140305
+COMMENT tput_decr = 8.0199999 tput_decr = 3.7669999
+
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140403-132608
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140403-132608.memleak
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140403-132608.generate.cism1_140403
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140403-132608.compare_hist.cism1_140305
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140403-132608.memcomp.cism1_140305
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140403-132608.tputcomp.cism1_140305
+COMMENT tput_decr = 2.2609999
+
+PASS ERS_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac.GC.140403-140743
+PASS ERS_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac.GC.140403-140743.memleak
+PASS ERS_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac.GC.140403-140743.generate.cism1_140403
+PASS ERS_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac.GC.140403-140743.compare_hist.cism1_140305
+PASS ERS_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac.GC.140403-140743.memcomp.cism1_140305
+PASS ERS_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac.GC.140403-140743.tputcomp.cism1_140305
+
+And from component_gen_comp:
+
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.compare_hist.cism1_140305.cism.h
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.generate.cism.h
+
+PASS SMS_D.f09_g16.TG.yellowstone_intel.compare_hist.cism1_140305.cism.h
+PASS SMS_D.f09_g16.TG.yellowstone_intel.generate.cism.h
+
+PASS ERS_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac.compare_hist.cism1_140305.cism.h
+PASS ERS_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac.generate.cism.h
+
+
+Externals used for testing: cesm1_3_beta07, but scripts4_140305 and Machines_140227
+
+cism tag used for baseline comparisons: cism1_140305
+
+Any other externals that differed in baseline: none
+
+================================================================================
+Originator: sacks
+Date: March 5, 2014
+Model: cism
+Version: cism1_140305
+One-line summary: fix capping of increase_frac override
+
+Purpose of changes:
+
+ The capping of inrease_frac was accidentally capping each elevation class at
+ 100%, when it should be capping the sum of elevation classes at 100%. That is
+ now fixed.
+
+Changes answers relative to previous tag: NO (only changes test cases that use increase_frac)
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M source_glc/glc_override_frac.F90
+
+Summary of testing:
+
+ Just ran a subset of tests:
+
+PASS ERS_Ld9.f19_g16.IGCLM45.yellowstone_pgi.cism-override_glc_frac.GC.140305-092736
+
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140305-093532
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140305-093532.memleak
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140305-093532.generate.cism1_140305
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140305-093532.compare_hist.cism1_140303
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140305-093532.memcomp.cism1_140303
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140305-093532.tputcomp.cism1_140303
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi.GC.140305-093532.nlcomp
+
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140305-093417
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140305-093417.memleak
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140305-093417.generate.cism1_140305
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140305-093417.compare_hist.cism1_140303
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140305-093417.memcomp.cism1_140303
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140305-093417.tputcomp.cism1_140303
+PASS SMS_D.f09_g16.TG.yellowstone_intel.GC.140305-093417.nlcomp
+
+Externals used for testing: cesm1_3_beta07, but scripts4_140305 and Machines_140227
+
+cism tag used for baseline comparisons: cism1_140303
+
+Any other externals that differed in baseline: baseline used scripts4_140221a
+
+================================================================================
+Originator: sacks
+Date: March 3, 2014
+Model: cism
+Version: cism1_140303
+One-line summary: add options to override glc frac sent to cpl, for testing
+
+Purpose of changes:
+
+ Put in place a number of namelist settings that can be used to modify the glc
+ frac sent from GLC -> CPL. The point of this is to push CLM's dynamic
+ landunits in various ways (e.g., going from 0% glacier to non-zero glacier,
+ from 100% glacier down to 0% glacier, etc.).
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+M source_glc/glc_InitMod.F90
+A source_glc/glc_override_frac.F90
+M source_glc/glc_global_fields.F90
+M drivers/cpl/glc_import_export.F90
+
+*** Unrelated change: fix documentation
+M bld/README.build-namelist
+
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following expected
+ failures:
+
+FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.140228-162551
+FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.clm-reduceOutput.GC.140228-162551
+FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.140228-162551
+FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.140228-162551
+FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.clm-reduceOutput.GC.140228-162551
+CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.140228-162555
+
+
+Externals used for testing: cesm1_3_beta07, but scripts4_140221a and Machines_140227
+
+cism tag used for baseline comparisons: cism1_140203
+
+Any other externals that differed in baseline: baseline used cesm1_3_beta07, unmodified
+
+================================================================================
+Originator: sacks
+Date: Feb 3, 2014
+Model: cism
+Version: cism1_140203
+One-line summary: fix exact restart problem with cism -> cpl fields
+
+Purpose of changes:
+
+ Update glimmer-cism external to fix an exact restart problem that appears
+ with a few of the cism -> cpl fields - specifically, the rofi, rofl and hflx
+ fields. This problem showed up with runs with daily (rather than annual)
+ coupling - i.e., all compsets other than TG. For example, this test was
+ failing:
+
+ ERS_Ly5.T31_g37.IGCLM45.yellowstone_intel
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Update from r55984 -> r56989
+M SVN_EXTERNAL_DIRECTORIES
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following expected
+ failures:
+
+CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.140129-134856
+BFAIL ERS_Ly5.T31_g37.IGCLM45.yellowstone_intel.GC.140129-134851.compare_hist.cism1_131213_beta07
+FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.140129-134851
+FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.clm-reduceOutput.GC.140129-134851
+FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.140129-134851
+FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.140129-134851
+FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.clm-reduceOutput.GC.140129-134851
+
+
+Externals used for testing: cesm1_3_beta07
+
+cism tag used for baseline comparisons: cism1_131213
+
+Any other externals that differed in baseline: none
+
+================================================================================
+Originator: sacks
+Date: Dec 13, 2013
+Model: cism
+Version: cism1_131213
+One-line summary: update glimmer-cism external
+
+Purpose of changes:
+
+ Update glimmer-cism external to r55984. The main change here is to remove
+ prevtemp from the computation of the diagnostic quantity dTtop; this changes
+ answers for hflx, but nothing else.
+
+Changes answers relative to previous tag: YES
+
+ Just changes answers for the hflx term sent to the coupler. Diffs are
+ generally about 1e-6 (relative error), though some RMS errors are as large as
+ 1e-4 (relative error).
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following expected
+ failures:
+
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.143014
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.143009
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.clm-reduceOutput.GC.143009
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.143009
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.143009
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.clm-reduceOutput.GC.143009
+
+Externals used for testing: cesm1_3_beta05
+
+cism tag used for baseline comparisons: cism1_131212a
+
+Any other externals that differed in baseline: none
+
+================================================================================
+Originator: sacks
+Date: Dec 12, 2013
+Model: cism
+Version: cism1_131212a
+One-line summary: update glimmer-cism external
+
+Purpose of changes:
+
+ Update glimmer-cism external from r55935 to r55978. This changes the number
+ of seconds in a year to exactly 365 days, in agreement with the rest of CESM.
+
+Changes answers relative to previous tag: YES
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following expected
+ failures:
+
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.142829
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.142824
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.clm-reduceOutput.GC.142824
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.142824
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.142824
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.clm-reduceOutput.GC.142824
+
+
+Externals used for testing: cesm1_3_beta05
+
+cism tag used for baseline comparisons: cism1_131212
+
+Any other externals that differed in baseline: none
+
+================================================================================
+Originator: sacks
+Date: Dec 12, 2013
+Model: cism
+Version: cism1_131212
+One-line summary: update glimmer-cism external, fixing fields sent from glc to cpl
+
+Purpose of changes:
+
+ Update glimmer-cism external from r54059 to r55935. The main changes here
+ involve fixing the fields sent from glc to the CESM coupler - specifically,
+ rofi, rofl and hflx. Also, a few other minor changes.
+
+Changes answers relative to previous tag: YES
+
+ Changes answers for all fields sent from GLC -> CPL: SIGNIFICANT differences
+ for hflx, rofi and rofl; and roundoff-level changes for frac and topo.
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following expected
+ failures:
+
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.133413
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.133404
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.clm-reduceOutput.GC.133404
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.133404
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.133404
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.clm-reduceOutput.GC.133404
+
+
+Externals used for testing: cesm1_3_beta05
+
+cism tag used for baseline comparisons: cism1_131203
+
+Any other externals that differed in baseline: None
+
+================================================================================
+Originator: sacks
+Date: Dec 3, 2013
+Model: cism
+Version: cism1_131203
+One-line summary: call glc_export in mct initialization
+
+Purpose of changes:
+
+ With the changes in cism1_131008, the esmf side had been changed to call
+ glc_export in initialization, but the mct side had not been changed
+ similarly. This tag makes a simple change to the mct side so that CME tests
+ now pass. (This problem was missed in the testing for cism1_131008 because it
+ only shows up in IG/FG/BG compsets, which were excluded from the testing of
+ that tag.)
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M drivers/cpl/glc_comp_mct.F90
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following expected
+ failures:
+
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.103449
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.103445
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.clm-reduceOutput.GC.103445
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.103445
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.103445
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.clm-reduceOutput.GC.103445
+
+ Note that this test failed in the baseline:
+
+ CME_Ly5.T31_g37.IG.yellowstone_intel.clm-reduceOutput
+
+ However, I manually compared the cpl hist files, and they are identical in
+ the new tag compared with the baseline.
+
+Externals used for testing: cesm1_3_beta05
+
+cism tag used for baseline comparisons: cism1_131008
+
+Any other externals that differed in baseline: None (baselines also generated
+from cesm1_3_beta05)
+
+================================================================================
+Originator: sacks, mvertens
+Date: Oct 8, 2013
+Model: cism
+Version: cism1_131008
+One-line summary: Mariana's changes to cpl infrastructure, update external
+
+Purpose of changes:
+
+(1) Bring in Mariana's refactoring of the cpl infrastructure
+
+(2) Point glimmer-cism external to new repository location
+
+(3) Small update to glimmer-cism external: Bring in Matt Hoffman's
+change to the logic for allocating isostasy variables
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** (1) above
+M bld/cism.buildexe.csh
+D drivers/cpl_share
+D drivers/cpl_mct
+D drivers/cpl_esmf
+A drivers/cpl/glc_comp_mct.F90
+A drivers/cpl/glc_comp_esmf.F90
+A drivers/cpl/glc_import_export.F90
+A drivers/cpl/glc_coupling_flags.F90
+A drivers/cpl/glc_cpl_indices.F90
+A drivers/cpl
+
+*** (2) and (3) above
+M SVN_EXTERNAL_DIRECTORIES
+
+
+Summary of testing:
+
+ Ran just a subset of the yellowstone intel & pgi test lists: Just the TG
+ CISM1 tests. It was not ideal to exclude the IG / FG / BG tests, but I made
+ this compromise because yellowstone is down.
+
+ All PASS except the following expected failure:
+
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.215132
+
+ Update 12-2-13: Ran IG / FG / BG CISM1 tests; all pass EXCEPT:
+
+ *** Known failure; working on resolving this
+ FAIL CME_Ly5.T31_g37.IG.yellowstone_intel.GC.100040
+
+ *** Diffs only in x2l_Flrr_volr
+ FAIL ERI.f19_g16.IGRCP85CN.yellowstone_pgi.GC.100111.compare_hist.cism1_130924
+
+ (component_gen_comp: all PASS, including the two above test failures)
+
+Externals used for testing: cesm1_3_alpha04c
+
+cism tag used for baseline comparisons: cism1_130924
+
+Any other externals that differed in baseline: Baseline used externals at cesm1_3_beta03
+
+================================================================================
+Originator: sacks
+Date: Sept 24, 2013
+Model: cism
+Version: cism1_130924
+One-line summary: Update glimmer-cism external to r1966
+
+Purpose of changes: Bring in latest glimmer-cism external - update from r1950 to
+r1966
+
+Changes answers relative to previous tag: No diffs according to test suite,
+although log messages suggest there may have been some small answer changes.
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following
+ expected failures:
+
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.162146
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.162141
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.GC.162141
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.162141
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.162141
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.GC.162141
+
+
+Externals used for testing: cesm1_3_beta03
+
+cism tag used for baseline comparisons: cism1_130905
+
+Any other externals that differed in baseline: None
+
+================================================================================
+Originator: sacks, tcraig
+Date: Sept 5, 2013
+Model: cism
+Version: cism1_130905
+One-line summary: rework coupling fields, update glimmer-cism external
+
+Purpose of changes:
+
+(1) Rework the runoff coupling fields (rofi & rofl) to fit in with Tony Craig's
+ coupler changes. These fields are now routed to the ocean and/or sea ice,
+ instead of to the land. Also, these are no longer separated by elevation
+ class.
+
+(2) Update glimmer-cism external from r1936 -> r1950. This brings in a number of
+ changes. The most important are some changes from Bill Lipscomb that fix
+ some of the g2x coupling fields, and remove the elevation class dimension
+ from the g2x rofi & rofl fields.
+
+
+Changes answers relative to previous tag: YES
+ Changes all of the g2x coupling fields.
+ However, CISM history files are bfb
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Changes to coupling fields as described above
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+M source_glc/glc_global_fields.F90
+M drivers/cpl_share/glc_cpl_indices.F90
+M drivers/cpl_mct/glc_comp_mct.F90
+M drivers/cpl_esmf/glc_comp_esmf.F90
+
+*** Routines to determine whether to send calving flux to ocean or sea ice
+A source_glc/glc_route_ice_runoff.F90
+A drivers/cpl_share/glc_coupling_flags.F90
+
+*** Add a namelist option to choose where calving flux should go
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+
+*** Update from r1936 -> r1950
+M SVN_EXTERNAL_DIRECTORIES
+
+
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. Testing was done in the context of Tony
+ Craig's changes, which involved the coupler and many other components. The
+ testing documented here was done on cplupa_n03_cism1_130624.
+
+ Baseline comparisons fail for g2x fields and the derived x2l fields. CISM
+ history files are bfb according to component_gen_comp.
+
+ Test failures are:
+
+ *** Expected failures
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel
+
+ *** Failures in IG/FG tests that I think are expected (note: these all failed
+ in the baseline, too; also note that I have TG tests that cover the same
+ kind of functionality -Â e.g., a CME TG test - so this likely points to
+ problems outside of GLC; also, Tony says that the IG failures are
+ expected)
+ FAIL ERI.f19_g16.IGRCP85CN.yellowstone_pgi
+ FAIL SMS_D.f09_g16.IG20TR.yellowstone_pgi
+ FAIL CME_Ly5.T31_g37.IG.yellowstone_intel
+ FAIL SMS.f19_f19.FG20TRCN.yellowstone_intel
+
+
+Externals used for testing:
+
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/branch_tags/cesmcosp_cam5_3_xx_tags/cesmcosp_n01_cam5_3_04/models/atm/cam
+ models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_130424
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/branch_tags/cplupa_tags/cplupa_n02_stubs1_4_02/satm
+ models/atm/wrf https://svn-ccsm-models.cgd.ucar.edu/wrf/trunk_tags/wrf32_ccsm130325
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_06/xatm
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/branch_tags/cplupa_tags/cplupa_n01_share3_130528
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/branch_tags/cplupa_tags/cplupa_n02_dead7_7_06/dead_share
+ models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/branch_tags/cplupa_tags/cplupa_n02_drvseq4_2_33
+ models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/branches/cplupa
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/branch_tags/cplupa_tags/cplupa_n02_stubs1_4_02/sglc
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/branch_tags/cplupa_tags/cplupa_n02_dead7_7_06/xglc
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/branch_tags/cplupa_tags/cplupa_n01_cice4_0_20130524
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/branch_tags/cplupa_tags/cplupa_n01_dice8_130313
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/branch_tags/cplupa_tags/cplupa_n02_stubs1_4_02/sice
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/branch_tags/cplupa_tags/cplupa_n02_dead7_7_06/xice
+ models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/branch_tags/cplupa_tags/cplupa_n02_clm4_5_11/models/lnd/clm
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/branch_tags/cplupa_tags/cplupa_n01_dlnd8_130213
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/branch_tags/cplupa_tags/cplupa_n02_stubs1_4_02/slnd
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/branch_tags/cplupa_tags/cplupa_n02_dead7_7_06/xlnd
+ models/ocn/docn https://svn-ccsm-models.cgd.ucar.edu/docn7/trunk_tags/docn8_130313
+ models/ocn/pop2 https://svn-ccsm-models.cgd.ucar.edu/pop2/trunk_tags/cesm_pop_2_1_20130517
+ models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/branch_tags/cplupa_tags/cplupa_n02_stubs1_4_02/socn
+ models/ocn/xocn https://svn-ccsm-models.cgd.ucar.edu/dead7/branch_tags/cplupa_tags/cplupa_n02_dead7_7_06/xocn
+ models/ocn/aquap https://svn-ccsm-models.cgd.ucar.edu/aquap/trunk_tags/aquap_130503
+ models/rof/drof https://svn-ccsm-models.cgd.ucar.edu/drof/trunk_tags/drof8_130503
+ models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/branch_tags/cplupa_tags/cplupa_n01_rtm1_0_28
+ models/rof/srof https://svn-ccsm-models.cgd.ucar.edu/stubs/branch_tags/cplupa_tags/cplupa_n02_stubs1_4_02/srof
+ models/rof/xrof https://svn-ccsm-models.cgd.ucar.edu/dead7/branch_tags/cplupa_tags/cplupa_n02_dead7_7_06/xrof
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_130213
+ models/utils/mct https://github.com/quantheory/MCT/tags/compiler_fixes_n03_MCT_2.8.3
+ models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_7_2/pio
+ models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_130506
+ models/wav/swav https://svn-ccsm-models.cgd.ucar.edu/stubs/branch_tags/cplupa_tags/cplupa_n02_stubs1_4_02/swav
+ models/wav/ww3 https://svn-ccsm-models.cgd.ucar.edu/ww3/trunk_tags/ww3_130314
+ models/wav/xwav https://svn-ccsm-models.cgd.ucar.edu/dead7/branch_tags/cplupa_tags/cplupa_n02_dead7_7_06/xwav
+ scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/cplupa_tags/cplupa_n02_scripts4_130627
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_130625
+ tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_130529
+ tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_130509
+
+
+cism tag used for baseline comparisons: cplupa_n02_cism1_130624
+ Note: I removed the rofi & rofl coupling stuff from cism so that it would
+ work in the context of the above externals
+
+Any other externals that differed in baseline: Same as above
+
+================================================================================
+Originator: sacks
+Date: June 24, 2013
+Model: cism
+Version: cism1_130624
+One-line summary: update glimmer-cism external
+
+Purpose of changes:
+
+Update glimmer-cism external, from r1876 to r1936.
+
+Also update documentation of config options.
+
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+M bld/namelist_files/namelist_definition_cism.xml
+
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. All PASS except the following
+ expected failures:
+
+ *** No ESMF for PGI
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi
+
+ *** CISM2 failures
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.163141
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.GC.163141
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.163141
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.163141
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.GC.163141
+
+
+ Note that nlcomp failed for 10km & 20km, as expected (with
+ comparison to cism1_130514)
+
+
+Externals used for testing: cesm1_2_beta08, with scripts @ scripts4_130513b
+
+cism tag used for baseline comparisons: cism1_130514
+
+Any other externals that differed in baseline: None.
+
+================================================================================
+Originator: sacks
+Date: May 24, 2013
+Model: cism
+Version: cism1_130524
+One-line summary: change idiag & jdiag for 10km & 20km; allow other
+ sigma values; fix some namelist documentation
+
+
+Purpose of changes: (see summary)
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+
+Summary of testing:
+
+ Ran the following with comparison to baseline (but NOT component_gen_comp):
+ SMS_D.f09_g16_gl10.TG.yellowstone_pgi
+ ERS_Ly20.f09_g16.TG1850.yellowstone_intel
+ SMS_D.f09_g16.TG.yellowstone_intel
+
+ Ran the following with NO comparison to baseline:
+ SMS_D.f09_g16.IG20TR.yellowstone_intel
+
+ Also, ran the following with CISM_GRID=gland20, also with
+ comparison to baseline (including component_gen_comp):
+ SMS.f09_g16.TG.yellowstone_pgi
+
+ Note that nlcomp failed for 10km & 20km, as expected
+
+Externals used for testing: cesm1_2_beta08, with scripts @ scripts4_130513b
+
+cism tag used for baseline comparisons: cism1_130514
+
+Any other externals that differed in baseline: None
+
+================================================================================
+Originator: sacks
+Date: May 14, 2013
+Model: cism
+Version: cism1_130514
+One-line summary: Update glimmer-cism: use double-precision for all variables
+
+Purpose of changes:
+
+Bring in changes from Bill L to convert all variables to double precision
+
+
+Changes answers relative to previous tag: YES -- answers change by
+single-precision roundoff (as expected), along with some slightly unexpected,
+large changes around the continental margin (at least in acab & temp)
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Update from r1863 -> r1876
+M SVN_EXTERNAL_DIRECTORIES
+
+*** Change some variables to double precision
+M source_glc/glc_InitMod.F90
+
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. For this testing, I have switched over to
+ using the xml-based test lists, since they have been fixed to include all
+ desired tests in the latest scripts tag.
+
+ Baseline comparisons fail, as expected. Other than that, failures are:
+
+ *** No ESMF for PGI
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.094304
+
+ *** CISM2 failures
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.094301
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.GC.094301
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.094301
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.094301
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.GC.094301
+
+
+Externals used for testing: cesm1_2_beta08, with scripts @ scripts4_130513b
+
+cism tag used for baseline comparisons: cism1_130502
+
+Any other externals that differed in baseline: Unsure... I may have used
+cesm1_2_beta06 for the baseline by accident, but it doesn't really matter
+because there are known baseline differences in this new tag.
+
+================================================================================
+Originator: sacks
+Date: May 2, 2013
+Model: cism
+Version: cism1_130502
+One-line summary: Update glimmer-cism: fix restart bugs, clean-up from Bill L
+
+Purpose of changes:
+
+Update glimmer-cism from r1849 -> 1863, mainly for two purposes:
+
+- Fixes for restart bugs in non-default configurations (from Matt Hoffman)
+
+- Clean-up (mostly of comments) from Bill Lipscomb
+
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. I used the plain text test lists as they
+ existed before the conversion of test lists to xml, because there were some
+ missing tests in the xml
+
+ All PASS except:
+
+ *** No ESMF for PGI
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.150138
+
+ *** CISM2 failures
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.150142
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.GC.150142
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.150142
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.150142
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.GC.150142
+
+Externals used for testing: cesm1_2_beta06, but PIO @ pio1_6_8
+
+cism tag used for baseline comparisons: cism1_130430a
+
+Any other externals that differed in baseline: No differences
+
+================================================================================
+Originator: sacks
+Date: April 30, 2013
+Model: cism
+Version: cism1_130430a
+One-line summary: Update glimmer-cism external, including getting restarts to
+ work for evolution=2; also add some config options
+
+Purpose of changes:
+
+- Update external (1846 -> 1849):
+ - some cleanup from Bill Lipscomb
+ - get restarts to work with evolution=2
+
+- Add some config options to namelist_definition file
+
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/namelist_files/namelist_definition_cism.xml
+M SVN_EXTERNAL_DIRECTORIES
+
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. I used the plain text test lists as they
+ existed before the conversion of test lists to xml, because there were some
+ missing tests in the xml
+
+ All PASS except:
+
+ *** No ESMF for PGI
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.084959
+
+ *** CISM2 failures
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.085004
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.GC.085004
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.085004
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.085004
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.GC.085004
+
+
+ Also ran all CISM1 tests from a sandbox that set evolution=2,
+ dt=0.0083333333333333333333333333333333333 by default (for all
+ resolutions). These all pass except:
+
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.102727
+
+ Note, in particular, that all restart tests are now passing for
+ evolution=2. However, note that a small time step is needed for these
+ evolution=2 cases to pass (even using dt=0.025 led to failure of some cases)
+
+Externals used for testing: cesm1_2_beta06, but PIO @ pio1_6_8
+
+cism tag used for baseline comparisons: cism1_130429
+
+Any other externals that differed in baseline: No differences
+
+================================================================================
+Originator: sacks
+Date: April 30, 2013
+Model: cism
+Version: cism1_130430
+One-line summary: Use new glint interfaces
+
+Purpose of changes:
+
+Use new glint interfaces from Bill L.
+
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Update glimmer-cism external from r1837 to r1846, to use new glint
+ interfaces (this also includes some other changes that should have no effect
+ on the code run within CESM)
+M SVN_EXTERNAL_DIRECTORIES
+
+*** Use new glint interfaces
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+
+*** Comment out use of deleted itest, jtest (now these are iglint_global and
+ jglint_global, but those are hard-coded values that don't make sense for
+ most runs, so for now it's best not to use them)
+M source_glc/glc_global_grid.F90
+
+*** Unrelated clean-up of documentation
+M bld/namelist_files/namelist_definition_cism.xml
+
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. I used the plain text test lists as they
+ existed before the conversion of test lists to xml, because there were some
+ missing tests in the xml
+
+ All PASS except:
+
+ *** No ESMF for PGI
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.202525
+
+ *** CISM2 failures
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.202556
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.GC.202556
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.202556
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.202556
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.GC.202556
+
+
+Externals used for testing: cesm1_2_beta06, but PIO @ pio1_6_8
+
+cism tag used for baseline comparisons: cism1_130429
+
+Any other externals that differed in baseline: No differences
+
+================================================================================
+Originator: sacks
+Date: April 29, 2013
+Model: cism
+Version: cism1_130429
+One-line summary: Change some parameter values
+
+Purpose of changes:
+
+The previous tag kept parameter values at their cism1 defaults, for testing
+purposes. In this tag, we change parameter values to those that we want moving
+forward. In particular, this changes:
+
+- temp_init: 2 instead of 1
+- basal_mass_balance: 1 instead of 0
+- sigma: 0 instead of 2 for cism_phys=cism1
+
+Changes answers relative to previous tag: YES
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/namelist_files/namelist_defaults_cism.xml
+
+*** fix some comments
+M bld/build-namelist
+
+Summary of testing:
+
+ Yellowstone intel & pgi test lists. I used the plain text test lists as they
+ existed before the conversion of test lists to xml, because there were some
+ missing tests in the xml
+
+ All PASS except:
+
+ *** Missing ESMF library
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.G.061715
+
+ *** CISM2 failures
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.G.061723
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.G.061723
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.G.061723
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.G.061723
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.G.061723
+
+
+Externals used for testing: cesm1_2_beta06, but PIO @ pio1_6_8
+
+cism tag used for baseline comparisons: N/A -- no baseline comparisons performed
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: April 28, 2013
+Model: cism
+Version: cism1_130428
+One-line summary: Cleanup of glimmer-cism external code, with focus on cleaning
+ up config options
+
+Purpose of changes:
+
+- Bring in latest glimmer-cism external, which cleans up config options, as well
+ as a number of other changes from Bill L
+
+- Get cismIO generation working
+
+- Run some tests to make sure that new code gives the same answers as the
+ cesm1.1.1 release
+
+
+Changes answers relative to previous tag: YES
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M SVN_EXTERNAL_DIRECTORIES
+
+*** Get cismIO generation working. This includes getting files from glimmer-cism
+ external rather than maintaining copies of them
+D bld/cismIO/glide_lithot_vars.def
+D bld/cismIO/glint_mbal_vars.def
+D bld/cismIO/glint_vars.def
+D bld/cismIO/ncdf_template.F90.in
+D bld/cismIO/glide_vars.def
+D bld/cismIO/generate_ncvars.py
+M bld/cismIO/cism.buildIO.template.csh
+M bld/cism.cpl7.template
+
+*** Handle Bill L's changes to config options
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+
+*** Add blank line
+M bld/user_nl_cism
+
+
+Summary of testing:
+
+ (1) Yellowstone intel & pgi test lists. I used the plain text test lists as
+ they existed before the conversion of test lists to xml, because there
+ were some missing tests in the xml
+
+ All PASS except:
+
+ *** Missing ESMF library
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.194914
+
+ *** CISM2 failures
+ FAIL ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.GC.194923
+ FAIL NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel.GC.194923
+ FAIL PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.GC.194923
+ FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel.GC.194923
+ FAIL SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel.GC.194923
+
+
+ (2) Comparisons of latest code base with cesm1.1.1 release code. Expect CISM1
+ compsets to be the same within roundoff, once some code mods were made to
+ the new and old code to correct for known differences (put in place by
+ Bill L). For each of these, I ran one-year TG compsets from the new code
+ base and from cesm1.1.1; I compared log files (first few time steps, and
+ last time step of the year-long run), looked at cprnc results, and did a
+ visual comparison of history files for any fields with large differences
+ according to cprnc.
+
+ (a) baseline, out-of-the-box settings (done by Bill L, reproduced by me)
+
+ (b) basal sliding on (done by Bill L)
+
+ (c) 10 km
+
+ (d) 20 km
+
+ (e) evolution=2
+
+ (f) basal_water=1
+
+ (g) flow_law: 0 in new, 2 in old
+
+ (h) sigma=0 (in old code, implemented this by commenting out the call to
+ glide_read_sigma)
+
+ (i) use Jeremy Fyke's initial conditions, which have been copied to
+ bg40.1850.track1.1deg.006b.cism.r.0863-01-01-00000.nc; along with
+ hotstart=1
+
+ (j) slip_coeff: 5 in old, 3 in new; also, use initial conditions as in
+ (i)
+
+ All of these appeared to differ only by single-precision roundoff, EXCEPT
+ (i) was questionable and (j) had big differences (in cesm1.1.1, (j) was
+ unstable in the old code). The differences in (i) suggest that old cism1
+ restart files MAY not be compatible with the new code base. The
+ differences in (j) might be connected to the use of initial conditions in
+ this run, rather than a problem with slip_coeff per se. For example, it
+ seems that the old code, with Bill L's mods to make the old & new more
+ similar, didn't read bheatflx from the restart file.
+
+
+Externals used for testing: cesm1_2_beta06, but PIO @ pio1_6_8
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: April 25, 2013
+Model: cism
+Version: cism1_130425
+One-line summary: Allow longer case names, and error-check too-long case names
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M source_glc/glc_io.F90
+M source_glc/glc_time_management.F90
+
+Summary of testing:
+
+ Ran standard cism1 tests on yellowstone, from glc aux test lists (for intel &
+ pgi); took these test lists from an older scripts tag from before the
+ conversion to xml, since the xml list has some problems.
+
+ All tests PASSed, except the following expected failures:
+
+ *** No esmf library for pgi
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.GC.145755
+
+ *** PIO broken for mpi-serial
+ RUN PEA_P1_M.f09_g16.TG20TR.yellowstone_intel.GC.145802
+
+ All baseline comparisons, including component_gen_comp, PASSed or BFAILed in
+ an expected way.
+
+
+ Also ran an ERI_Ly44 test with a long testid that resulted in a > 160
+ character case name (old limit was 80, new is 256 characters).
+
+Externals used for testing: cesm1_2_beta06
+
+cism tag used for baseline comparisons: cism1_130405
+
+Any other externals that differed in baseline: None
+
+================================================================================
+Originator: sacks
+Date: April 5, 2013
+Model: cism
+Version: cism1_130405
+One-line summary: Change namelist defaults for cism1
+
+Purpose of changes:
+
+I was getting instabilities in cism1. Resolving these required either
+decreasing the time step or changing evolution back to 0 (rather than the
+new default of 2). Bill L recommended changing evolution back to 0.
+
+So this tag changes these defaults for cism1:
+- evolution=0 rather than 2
+- dt=0.1 rather than 0.025 for gl10
+
+These changes bring the cism1 namelist defaults back to what they were in
+cism1_121114
+
+
+Changes answers relative to previous tag: YES
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/namelist_files/namelist_defaults_cism.xml
+
+Summary of testing:
+
+ NOTE: The testing was accidentally run on a version of code that
+ did NOT include the changes from cism1_130403.
+
+ JUST RAN THE CISM1 TESTS FROM THESE LISTS (NO BASELINE COMPARISONS):
+ - yellowstone_intel.glc.auxtest
+ - yellowstone_pgi.glc.auxtest
+
+ All PASS except the following expected failures:
+ CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.G.205611
+ FAIL CME_Ly5.T31_g37.IG.yellowstone_intel.G.205554
+
+ NOTE THAT THE ERS AND ERI TESTS ARE NOW PASSING!
+
+ CONFIRMING THAT RESTART TESTS ARE NOW WORKING:
+ Ran the following additional tests:
+
+PASS ERS_Ly100.f09_g16.TG.yellowstone_intel.062613
+PASS ERS_Ly100.f09_g16.TG.yellowstone_intel.062613.memleak
+PASS ERI_Ly220.f09_g16.TG20TR.yellowstone_intel.062613
+PASS ERI_Ly220.f09_g16.TG20TR.yellowstone_intel.062613.memleak
+PASS ERS_Ly200.f09_g16_gl10.TG1850.yellowstone_intel.062613
+PASS ERS_Ly200.f09_g16_gl10.TG1850.yellowstone_intel.062613.memleak
+PASS ERI_Ly440.f09_g16_gl10.TGRCP85.yellowstone_intel.062613
+PASS ERI_Ly440.f09_g16_gl10.TGRCP85.yellowstone_intel.062613.memleak
+PASS ERS_Ly20.f09_g16_gl10.TG1850.yellowstone_intel.062613
+PASS ERS_Ly20.f09_g16_gl10.TG1850.yellowstone_intel.062613.memleak
+PASS ERI_Ly44.f09_g16_gl10.TGRCP85.yellowstone_intel.062613
+PASS ERI_Ly44.f09_g16_gl10.TGRCP85.yellowstone_intel.062613.memleak
+
+ SO INDEED, IT SEEMS THAT RESTARTS ARE EXACT NOW, WITH EVOLUTION=0
+
+
+Externals used for testing: cesm1_2_beta05, except scripts4_130405 and
+CLM at branches/newcompsets/models/lnd/clm, r45570
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: April 3, 2013
+Model: cism
+Version: cism1_130403
+One-line summary: Add CISM_OBSERVED_IC option for hybrid runs
+
+Purpose of changes:
+
+Allow cism to ignore the refcase's restart file in a hybrid run by
+setting the CISM_OBSERVED_IC env_run.xml variable to TRUE.
+
+As part of this change, the default value of hotstart is now set in
+build-namelist rather than being set via namelist_defaults_cism.xml.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+
+
+Summary of testing:
+
+- Test runs where I leave CISM_OBSERVED_IC at its default value (FALSE); compare namelist/config file and cism history file with baseline runs
+ - startup
+ - namelist/config files
+ - hist file
+ - hybrid
+ - namelist/config files
+ - hist file
+ - branch
+ - namelist/config files
+ - hist file
+
+- Test runs where I set CISM_OBSERVED_IC to TRUE; compare namelist/config file and cism history file with baseline runs
+ - startup
+ - namelist/config files
+ NOT DOING - hist file
+ - hybrid: should differ from baseline hybrid, be identical to baseline startup (in particular: hostart, cisminputfile)
+ - namelist/config files
+ - hist file: should be identical to baseline startup
+ Note: time coordinate variable differs from baseline hybrid
+ - restart file: should be identical to baseline startup
+ Note: time coordinate variable differs from baseline hybrid
+ - compare netcdf headers for hist & restart files
+ - branch: BUILD-NAMELIST SHOULD DIE
+
+
+
+Externals used for testing: cesm1_2_beta05, with updated scripts
+
+cism tag used for baseline comparisons: cism1_130401
+
+Any other externals that differed in baseline: old scripts used for
+baseline runs
+
+================================================================================
+Originator: sacks
+Date: April 1, 2013
+Model: cism
+Version: cism1_130401
+One-line summary: Update external, including significant glide updates
+
+Purpose of changes:
+
+Update glimmer-cism external. Among other things, this includes a
+large set of glide updates from Bill Lipscomb.
+
+Changes answers relative to previous tag: YES
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Update glimmer-cism from r1773 to r1785
+M SVN_EXTERNAL_DIRECTORIES
+
+*** Delete bluefire test stubs
+M ChangeLog_template
+
+Summary of testing:
+
+ yellowstone_intel.glc.auxtest & yellowstone_pgi.glc.auxtest
+
+ (skipped hopper glc auxtests because for now I just care about
+ testing CISM1 compsets)
+
+All PASS except:
+
+***** NEW FAILURE: Problems building with mpi-serial with cmake
+RUN PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel.G.162507
+
+***** NEW FAILURE: Problem in CLM or RTM ESMF (should be resolved as of clm4_0_69)
+FAIL CME_Ly5.T31_g37.IG.yellowstone_intel.G.162507
+
+***** Expected failures based on failures in cism1_130207
+FAIL ERS_Ly20.f09_g16.TG1850.yellowstone_intel.G.162507
+FAIL ERI_Ly44.f09_g16.TGRCP85.yellowstone_intel.G.162507
+RUN ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel.G.162507
+FAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_intel.G.162507
+FAIL ERS_Ly20_N2_P2.f09_g16_gl10.TG.yellowstone_pgi.G.162510
+CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi.G.162510
+
+
+Externals used for testing: cesm1_2_beta05
+
+cism tag used for baseline comparisons: N/A: No baseline comparisons done
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: tcraig
+Date: March 25, 2013
+Model: cism
+Version: cism1_130325
+One-line summary: updates for new CISM_GRID env variable
+
+the scripts env variable GLC_GRID was renamed to CISM_GRID and a few changes
+were needed in cism scripts.
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M models/glc/cism/bld/trilinosOptions/README
+M models/glc/cism/bld/README.build-namelist
+M models/glc/cism/bld/build-namelist
+M models/glc/cism/bld/cism.buildnml.csh
+
+Summary of testing: Ran a handful of yellowstone prealpha/beta tests
+
+Externals used for testing: cesm1_2_alpha04a+
+
+cism tag used for baseline comparisons: cism1_130315
+
+Any other externals that differed in baseline: Several including a
+ dependency on scripts changes and everything else in alpha05a. This
+ tag does depends on the scripts tag in cesm1_2_alpha05a and that
+ scripts tag depends on everything else in alpha05a (add wave model).
+
+================================================================================
+Originator: sacks
+Date: March 15, 2013
+Model: cism
+Version: cism1_130315
+One-line summary: update glimmer-cism, including adding support for 36 columns
+
+Purpose of changes:
+
+Add support for 36 columns.
+
+While I'm at it, update glimmer-cism external to latest revision.
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Update glimmer-cism from r1746 to r1773
+ M .
+M SVN_EXTERNAL_DIRECTORIES
+
+Summary of testing:
+
+Ran the following test list, including component_gen_comp (note: this
+is the shortlist.glc.auxtest list, plus a long IG test):
+
+SMS_D.f09_g16.TG
+SMS_D_Ly1.f09_g16_gl10.TGIS2
+ERS_Ly20.f09_g16.TG1850
+SMS_D.f09_g16.IG20TR
+SMS_Ly5.T31_g37.IG
+
+All pass, except the following, which also failed in the baseline:
+FAIL ERS_Ly20.f09_g16.TG1850.yellowstone_intel.GC.141629
+
+Externals used for testing: cesm1_2_alpha03d
+
+cism tag used for baseline comparisons: cism1_130307
+
+Any other externals that differed in baseline: None
+
+================================================================================
+Originator: sacks
+Date: March 7, 2013
+Model: cism
+Version: cism1_130307
+One-line summary: fix dependency appends for new mkDepends... for real, now
+
+Purpose of changes:
+
+Yesterday's fix didn't really do the right thing. Now I have removed the
+awkward awk script, and instead use new functionality in mkDepends to do
+this appending the right way.
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/cism.buildexe.csh
+
+Summary of testing:
+
+Summary of testing:
+
+MINIMAL TESTING DONE: JUST TESTED BUILD FOR A CASE WITH:
+-mach yellowstone -compset IG -res f09_g16
+
+Externals used for testing: cesm1_2_alpha03b, with Machines at Machines_130307
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: March 6, 2013
+Model: cism
+Version: cism1_130306
+One-line summary: fix dependency appends for new mkDepends
+
+Purpose of changes:
+
+The awk script that appended libglimmercismfortran.a to the list of
+dependencies accidentally assumed a space at the end of the lines
+created by mkDepends. The new version of mkDepends doesn't include a
+space at the end of each line, so the dependencies weren't created
+correctly. Now the necessary space is added in the awk script.
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Just add a space
+M bld/cism.buildexe.csh
+
+Summary of testing:
+
+MINIMAL TESTING DONE: JUST TESTED BUILD FOR A CASE WITH:
+-mach yellowstone -compset IG -res f09_g16
+
+Externals used for testing: cesm1_2_alpha03b, with Machines at Machines_130304b
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: Feb 7, 2013
+Model: cism
+Version: cism1_130207
+One-line summary: bring cism2 to trunk
+
+Purpose of changes:
+
+Bring cism2 to trunk (merge changes from cism2_cesm_bld_wjs branch). This
+inovles:
+
+- bringing in the new glimmer-cism code base as an svn external, rather than
+ using copies of the source files
+
+- updating the build system to handle building glimmer-cism as a standalone
+ library, using its native cmake-based build system
+
+- modifying some source code so GLC works properly in a parallel environment
+ (see README.parallelization for details)
+
+- adding namelist options to support cism2
+
+
+Changes answers relative to previous tag: YES.
+
+There are currently big answer changes (orders of magnitude differences), even
+for cism1 compsets that are supposed to give roughly the same answer. Bill
+Lipscomb is looking into this, and a new tag will be made once these changes are
+resolved.
+
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/): None
+
+List all modified files, and describe the changes:
+
+*** Bring in new glimmer-cism code base as an svn external, delete old copies of
+*** source files
+A SVN_EXTERNAL_DIRECTORIES
+D source_glimmer-cism
+D source_glimmer-cism/glide.F90
+D source_glimmer-cism/glimmer_scales.F90
+D source_glimmer-cism/glint_timestep.F90
+D source_glimmer-cism/smb_dummy.F90
+D source_glimmer-cism/glide_thck.F90
+D source_glimmer-cism/isostasy_setup.F90
+D source_glimmer-cism/isostasy_el.F90
+D source_glimmer-cism/glide_nc_custom.F90
+D source_glimmer-cism/glide_lithot_io.F90
+D source_glimmer-cism/glint_mbal_io.F90
+D source_glimmer-cism/glimmer_ts.F90
+D source_glimmer-cism/isostasy_types.F90
+D source_glimmer-cism/glimmer_searchcircle.F90
+D source_glimmer-cism/glimmer_routing.F90
+D source_glimmer-cism/glimmer_daily_pdd.F90
+D source_glimmer-cism/xls.F90
+D source_glimmer-cism/glint_mbal.F90
+D source_glimmer-cism/glide_temp.F90
+D source_glimmer-cism/glint_climate.F90
+D source_glimmer-cism/glimmer_utils.F90
+D source_glimmer-cism/glimmer_map_init.F90
+D source_glimmer-cism/glimmer_anomcouple.F90
+D source_glimmer-cism/glimmer_filenames.F90
+D source_glimmer-cism/glimmer_ncparams.F90
+D source_glimmer-cism/glimmer_writestats.F90
+D source_glimmer-cism/glimmer_restart_gcm.F90
+D source_glimmer-cism/glint_initialise.F90
+D source_glimmer-cism/glimmer_paramets.F90
+D source_glimmer-cism/glimmer_vers.F90
+D source_glimmer-cism/glide_lithot1d.F90
+D source_glimmer-cism/glimmer_map_CFproj.F90
+D source_glimmer-cism/glide_mask.F90
+D source_glimmer-cism/glint_io.F90
+D source_glimmer-cism/glide_setup.F90
+D source_glimmer-cism/glide_profile.F90
+D source_glimmer-cism/glimmer_map_proj4.F90
+D source_glimmer-cism/glide_types.F90
+D source_glimmer-cism/glide_velo.F90
+D source_glimmer-cism/glimmer_global.F90
+D source_glimmer-cism/glimmer_map_types.F90
+D source_glimmer-cism/glimmer_deriv.F90
+D source_glimmer-cism/glimmer_coordinates.F90
+D source_glimmer-cism/glimmer_ncdf.F90
+D source_glimmer-cism/kelvin.F90
+D source_glimmer-cism/glide_stop.F90
+D source_glimmer-cism/ncdf_utils.F90
+D source_glimmer-cism/glint_example_clim.F90
+D source_glimmer-cism/glimmer_log.F90
+D source_glimmer-cism/glimmer_integrate.F90
+D source_glimmer-cism/glint_precip_param.F90
+D source_glimmer-cism/glint_global_grid.F90
+D source_glimmer-cism/glint_constants.F90
+D source_glimmer-cism/glimmer_sparse.F90
+D source_glimmer-cism/glide_diagnostics.F90
+D source_glimmer-cism/isostasy.F90
+D source_glimmer-cism/glint_global_interp.F90
+D source_glimmer-cism/glint_main.F90
+D source_glimmer-cism/glimmer_config.F90
+D source_glimmer-cism/glimmer_pdd.F90
+D source_glimmer-cism/glint_mpinterp.F90
+D source_glimmer-cism/glint_interp.F90
+D source_glimmer-cism/glide_lithot3d.F90
+D source_glimmer-cism/glimmer_physcon.F90
+D source_glimmer-cism/glimmer_map_trans.F90
+D source_glimmer-cism/profile.F90
+D source_glimmer-cism/glimmer_ncio.F90
+D source_glimmer-cism/glide_lithot.F90
+D source_glimmer-cism/glint_mbal_coupling.F90
+D source_glimmer-cism/glide_io.F90
+D source_glimmer-cism/glint_type.F90
+D source_slap
+D source_slap/dmset.F
+D source_slap/dlaputil.F
+D source_slap/xersla.F
+D source_slap/dcgs.F
+D source_slap/blas.F
+D source_slap/dmvops.F
+D source_slap/mach.F
+
+*** Add xml files giving trilinos options; the correct one (based on resolution)
+*** is copied to the run directory
+A bld/trilinosOptions
+A bld/trilinosOptions/trilinosOptions_gland10.xml
+A bld/trilinosOptions/trilinosOptions_gland20.xml
+A bld/trilinosOptions/trilinosOptions_gland5.xml
+A bld/trilinosOptions/trilinosOptions_gland5UM.xml
+A bld/trilinosOptions/README
+M bld/cism.buildnml.csh
+M bld/README
+
+*** New build system for cism2, which includes building a separate glimmer-cism
+*** library using a cmake-based build system
+M bld/cism.buildexe.csh
+
+*** Support new config options for cism2, including new xml variables
+*** "CISM_PHYS" and "CISM_USE_TRILINOS". Also change a few defaults for cism1:
+*** evolution=2 rather than 0, and dt=0.025 rather than 0.1 for 10km (to achieve
+*** stability when using evolution=2)
+M bld/build-namelist
+M bld/README.build-namelist
+M bld/namelist_files/namelist_definition_cism.xml
+M bld/namelist_files/namelist_defaults_cism.xml
+
+*** Use new sourceMods directory for glimmer-cism-specific code
+*** (src.cism/glimmer-cism)
+M bld/cism.cpl7.template
+
+*** Support operation in a parallel environment
+M mpi/glc_communicate.F90
+M source_glc/glc_InitMod.F90
+M source_glc/glc_global_grid.F90
+M source_glc/glc_io.F90 --------- also, minor changes for new glimmer-cism
+ i/o interface
+M drivers/cpl_mct/glc_comp_mct.F90
+M drivers/cpl_esmf/glc_comp_mct.F90
+M drivers/cpl_esmf/glc_comp_esmf.F90
+A README.parallelization
+
+Summary of testing:
+
+------------------------------------------------------------------------
+Core testing
+------------------------------------------------------------------------
+
+Ran the following from https://svn-ccsm-models.cgd.ucar.edu/cesm1/exp_tags/cism2_bld_tags/cism2_bld_21_cesm1_2_beta01
+
+For each machine, I first give failures, then passes. All of these
+failures should be resolved at some point (except the titan failures,
+which basically duplicate the hopper failures: no need to rerun titan
+tests in the future).
+
+Yellowstone: yellowstone_intel.glc.auxtest & yellowstone_pgi.glc.auxtest
+
+***** Restarts currently are not bfb
+FAIL ERS_Ly20.f09_g16.TG1850.yellowstone_intel
+FAIL ERI_Ly44.f09_g16.TGRCP85.yellowstone_intel
+FAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_intel
+FAIL ERS_Ly20_N2_P2.f09_g16_gl10.TG.yellowstone_pgi
+***** Hangs in initialization of the last run (the continue run)
+***** (similar to the ERS IS2 run that fails on hopper)
+RUN ERI_Ly15.f09_g16_gl10.TG20TRIS2.yellowstone_intel
+***** No ESMF library for PGI
+CFAIL ERS_Ly20_E.f09_g16.TG20TR.yellowstone_pgi
+
+PASS SMS_D.f09_g16.TG.yellowstone_intel
+PASS SMS_D_Ly1.f09_g16_gl10.TGIS2.yellowstone_intel
+PASS PEA_P1_M.f09_g16.TG20TR.yellowstone_intel
+PASS PEA_P1_M_Ly2.f09_g16_gl10.TGIS2S.yellowstone_intel
+PASS NCK.f09_g16.TG.yellowstone_intel
+PASS NCK_Ly3.f09_g16_gl10.TGRCP85IS2.yellowstone_intel
+PASS CME_Ly10_N2_P2_D.f09_g16.TG1850.yellowstone_intel
+PASS CME_Ly5.T31_g37.IG.yellowstone_intel
+PASS SMS.f19_f19.FG20TRCN.yellowstone_intel
+PASS SMS_D.T31_g37.BG1850CN.yellowstone_intel
+PASS SMS_Ly3.T31_g37_gl10.IGIS2.yellowstone_intel
+PASS SMS_D.f09_g16_gl10.TG.yellowstone_pgi
+PASS SMS_D.f09_g16.IG20TR.yellowstone_pgi
+PASS ERI.f19_g16.IGRCP85CN.yellowstone_pgi
+
+
+Hopper: hopper_gnu.glc.auxtest & hopper_pgi.glc.auxtest
+
+***** Hangs in initialization of GLC for the restart run
+RUN ERS_Ly11.f09_g16_gl10.TG1850IS2.hopper_gnu
+***** Dies while writing history file: subscript out of range (Dies
+***** while trying to write uflx: The problem is that it's trying to
+***** write 1:local_ewn, but for uflx, values only go 1:18, since it's
+***** on the flux grid. I think this is due to glint using some glide
+***** init routines rather than glissade, so nhalo has the wrong
+***** value)
+FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.hopper_pgi
+***** Dies as follows:
+***** (shr_mct_sMatReaddnc) reading mapping matrix data decomposed...
+***** (shr_mct_sMatReaddnc) * file name : idmap
+***** (shr_sys_abort) ERROR: (shr_mct_sMatReaddnc)No such file or directory
+***** I believe I have fixed this with a drv change, and have tested
+***** SMS.T31_g37_gl10.BGCNIS2.yellowstone_intel, but haven't rerun
+***** this exact test
+FAIL SMS_D.T31_g37_gl10.BGCNIS2.hopper_pgi
+
+PASS SMS_D_Ly1.f09_g16_gl10.TGIS2.hopper_gnu
+PASS SMS.f19_f19_gl10.FGCNIS2.hopper_gnu
+PASS CME_Ly3.f09_g16_gl10.TGRCP85IS2.hopper_pgi
+
+
+
+Titan: titan.glc.auxtest:
+
+***** See notes for the similar hopper_pgi test
+FAIL SMS_D_Ly1.f09_g16_gl10.TGIS2.titan_pgi
+***** See notes for the similar hopper_gnu test
+RUN ERS_Ly11.f09_g16_gl10.TG1850IS2.titan_pgi
+***** See notes for the similar hopper_pgi test
+FAIL SMS_D.T31_g37_gl10.BGCNIS2.titan_pgi
+
+PASS CME_Ly3.f09_g16_gl10.TGRCP85IS2.titan_pgi
+PASS SMS.f19_f19_gl10.FGCNIS2.titan_pgi
+
+
+
+------------------------------------------------------------------------
+Additional CISM1 testing on multiple machines/compilers
+------------------------------------------------------------------------
+
+Ran the following test list:
+[SMS_Ly5 test is just in case the CME test fails because of lack of an esmf library]
+[B is to test a NON-GLC compset, to make sure I haven't broken that]
+
+SMS_D_Ly2.f09_g16.TGIS1
+SMS_D.f09_g16.IG20TRIS1
+SMS_Ly5.f09_g16_gl10.TG1850IS1
+CME_Ly5.f09_g16_gl10.TG1850IS1
+SMS.T31_g37.B
+
+on the following machines & compilers:
+
+- hopper
+ - pgi
+- frankfurt
+ - intel
+ - pgi
+- yellowstone
+ - pgi
+ - intel
+ - gnu
+- titan
+ - pgi
+- intrepid
+ - ibm
+
+(I did NOT run on Janus, assuming it's close enough to
+yellowstone. However, I DID test a TG build on Janus)
+
+In general, used: https://svn-ccsm-models.cgd.ucar.edu/cesm1/exp_tags/cism2_bld_tags/cism2_bld_17_cesm1_2_beta01
+(on yellowstone, used the following for the B compset test; this updates POP:
+https://svn-ccsm-models.cgd.ucar.edu/cesm1/exp_tags/cism2_bld_tags/cism2_bld_18_cesm1_2_beta01)
+(on yellowstone, used the following for the gnu tests:
+https://svn-ccsm-models.cgd.ucar.edu/cesm1/exp_tags/cism2_bld_tags/cism2_bld_19_cesm1_2_beta01)
+
+IMPORTANT TEST FAILURES (I want to resolve these at some point):
+
+FAIL SMS_D_Ly2.f09_g16.TGIS1.frankfurt_intel
+FAIL SMS_Ly5.f09_g16_gl10.TG1850IS1.frankfurt_intel
+
+UNIMPORTANT TEST FAILURES:
+
+***** I think these are just because there are no ESMF libraries for these machines/compilers:
+CFAIL CME_Ly5.f09_g16_gl10.TG1850IS1.frankfurt_intel
+CFAIL CME_Ly5.f09_g16_gl10.TG1850IS1.frankfurt_pgi
+CFAIL CME_Ly5.f09_g16_gl10.TG1850IS1.yellowstone_pgi
+CFAIL CME_Ly5.f09_g16_gl10.TG1850IS1.yellowstone_gnu
+CFAIL CME_Ly5.f09_g16_gl10.TG1850IS1.intrepid_ibm
+
+***** Dies in lnd init. Sheri says no I compsets are being tested on
+***** intrepid, so I'm not going to worry about this. The following
+***** two additional tests passed on intrepid:
+***** ERS.f09_f09.FG1850CN.intrepid_ibm and ERS_Ld7.f09_g16.BGRCP26CN
+FAIL SMS_D.f09_g16.IG20TRIS1.intrepid_ibm
+
+***** Also, Sheri said that she could only get glc to build if she set
+***** NTHRDS_GLC=1 (weird!) -- otherwise xlf hung while compiling
+***** glide_thck.F90. For now I have changed the PE layouts to set
+***** NTHRDS_GLC=1 by default on intrepid, as a workaround.
+
+
+------------------------------------------------------------------------
+
+Externals used for testing: See above
+
+cism tag used for baseline comparisons: N/A -- no baseline comparisons done,
+because big answer changes were expected (at some point, once some bugs are
+fixed, we should probably do baseline comparisons against cism1_121114 to
+confirm that cism1 compsets give [nearly] identical answers)
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: Nov 14, 2012
+Model: cism
+Version: cism1_121114
+One-line summary: remove docs folder
+
+Purpose of changes:
+
+Remove duplicated docs folder. Instead, see
+https://svn-cism-model.cgd.ucar.edu/docs
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+D doc
+D doc/CESM_ice_sheets_documentation.pdf
+D doc/glimmer_doc.pdf
+D doc/README
+
+Summary of testing: NONE
+
+- bluefire.glc.auxtest: main test results: NOT DONE
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: NOT DONE
+
+- Other tests: N/A
+
+Externals used for testing: N/A
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: jwolfe
+Date: Nov 13, 2012
+Model: cism
+Version: cism1_121113
+One-line summary: Add parallel support in source_glc code
+
+Purpose of changes:
+
+In preparation for bringing cism2 in, some modifications were needed
+in source_glc so that this interface layer could operate in a parallel
+environment.
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+A mpi/glc_broadcast.F90
+M source_glc/glc_InitMod.F90
+M source_glc/glc_global_grid.F90
+M source_glc/glc_io.F90
+M source_glc/glc_time_management.F90
+
+Summary of testing: (Testing done by Bill Sacks)
+
+- bluefire.glc.auxtest: main test results: All PASS
+
+ Note: I made a mistake in running the baseline comparisons, so ran
+ them after the fact with component_gen_comp -model cpl
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: All PASS
+
+- Other tests: Jon Wolfe ran some tests specific to the new
+ functionality
+
+Externals used for testing: cesm1_1_beta18
+
+cism tag used for baseline comparisons: cism1_121012
+
+Any other externals that differed in baseline: None
+
+================================================================================
+Originator: sacks
+Date: Nov 6, 2012
+Model: cism
+Version: cism1_121106a
+One-line summary: Change name of documentation file
+
+Purpose of changes:
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+A + doc/CESM_ice_sheets_documentation.pdf
+D doc/cism_doc.pdf
+M doc/README
+
+Summary of testing: NONE
+
+- bluefire.glc.auxtest: main test results: NOT DONE
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: NOT DONE
+
+- Other tests: NONE
+
+Externals used for testing: N/A
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: Nov 6, 2012
+Model: cism
+Version: cism1_121106
+One-line summary: Update documentation
+
+Purpose of changes:
+
+Update documentation for CESM1.1 release
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Add the CISM documentation to the repository
+A doc/cism_doc.pdf
+
+*** Update to version 1.5.1 (rather than 1.0.0)
+M doc/glimmer_doc.pdf
+
+*** Update notes about what's included in the doc directory
+M doc/README
+
+*** Add notes about how to add a new optional section in cism.config
+M bld/README.build-namelist
+
+Summary of testing: NONE (JUST CHANGED DOCUMENTATION)
+
+- bluefire.glc.auxtest: main test results: NOT DONE
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: NOT DONE
+
+- Other tests: NONE
+
+Externals used for testing: N/A
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: sacks
+Date: Oct 12, 2012
+Model: cism
+Version: cism1_121012
+One-line summary: add namelist documentation, validation of dt,
+ add/remove namelist items
+
+Purpose of changes:
+
+Main purpose is adding namelist documentation. Also added validation of dt in
+build-namelist (can't add this through simple valid_values).
+
+Also added / removed a few namelist parameters: Add scale_factor in cism.config:
+projection section. Removed namelist parameters which seem not to be used
+anywhere: isostasy, sliding_law, stress_calc, isos_time
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Add lots of documentation & valid_values;
+*** add scale_factor namelist parameter;
+*** remove isostasy, sliding_law, stress_calc, isos_time;
+*** change standard_parallel to length-2 array
+*** change categories for html documentation
+M bld/namelist_files/namelist_definition_cism.xml
+
+*** Add validation of dt; remove isostasy, sliding_law, stress_calc, isos_time
+M bld/build-namelist
+
+*** remove isostasy, sliding_law, stress_calc, isos_time
+M bld/namelist_files/namelist_defaults_cism.xml
+
+*** Mostly unrelated: Add documentation
+M bld/README.build-namelist
+
+*** Update ChangeLog entry for cism1_121009: document some more careful
+*** checks of the reasons for differences from baseline
+M ChangeLog
+
+Summary of testing:
+
+- bluefire.glc.auxtest: main test results: All PASS except:
+
+(Ignoring failures in NLComp & tputcomp)
+
+***** Test not run in baseline case (this test was deleted in a recent scripts tag)
+BFAIL ERS_Ly20_E.f09_g16_gl10.TG20TR.bluefire_ibm.C.161152.compare_hist.cesm1_1_alpha18h_cism_smallerDT
+
+***** Diffs just in x2a dust fluxes, due to externals change compared to baseline
+FAIL SMS.f19_f19.FG20TRCN.bluefire_ibm.GC.063931.compare_hist.cesm1_1_alpha18f_scripts4_121008_clm_newGlacierData2_dlnd_121001_cism_changeTimeToDouble
+FAIL SMS_D.T31_g37.BG1850CN.bluefire_ibm.GC.063931.compare_hist.cesm1_1_alpha18f_scripts4_121008_clm_newGlacierData2_dlnd_121001_cism_changeTimeToDouble
+
+So none of these failures are worrisome
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: All PASS except:
+
+***** Test not run in baseline case (this test was deleted in a recent scripts tag)
+BFAIL ERS_Ly20_E.f09_g16_gl10.TG20TR.bluefire_ibm.compare_hist.cesm1_1_alpha18h_cism_smallerDT.cism (baseline directory does not exist)
+
+
+- Other tests:
+
+ - out-of-the-box TG compset cism_in & cism.config identical to old
+ case, except for the 4 removed parameters (and runid in cism_in)
+
+Externals used for testing:
+
+https://svn-ccsm-models.cgd.ucar.edu/cesm1/tags/cesm1_1_alpha18g except:
+
+ scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_121008
+
+ models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_54/models/lnd/clm
+
+cism tag used for baseline comparisons: cism1_121009 for most tests;
+cism1_121010 for _gl10 tests
+
+Any other externals that differed in baseline: MANY! (used baselines generated
+from cism1_121009 and cism1_121010, using externals as listed below)
+
+================================================================================
+Originator: sacks
+Date: Oct 10, 2012
+Model: cism
+Version: cism1_121010
+One-line summary: Decrease dt for resolutions other than gland5UM
+
+Purpose of changes:
+
+I found that temperature was blowing up for out-of-the-box gland10
+cases, ever since the cism1_121001 tag. Jeremy Fyke & Bill Lipscomb
+suggested fixing this by decreasing dt to 0.1.
+
+I am decreasing dt to 0.1 for gland10 & gland20, and to 0.05 for
+gland5 (same as what is already the case for gland5UM).
+
+Changes answers relative to previous tag: YES, for gland10, gland20
+and gland5 (but not gland5UM)
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Change defaults for dt & ndiag
+M bld/namelist_files/namelist_defaults_cism.xml
+
+*** Update ChangeLog entry for cism1_121009: document additional testing
+M ChangeLog
+
+
+Summary of testing:
+
+- bluefire.glc.auxtest: main test results: NOT DONE
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: NOT DONE
+
+- Other tests:
+
+Ran the single test from the bluefire.glc.auxtest list that tests a
+resolution other than gland5UM:
+
+PASS ERS_Ly20_N2_P2.f09_g16_gl10.TG.bluefire_ibm.G.132320
+
+Also, ran 300-year tests with TG compset, GLC_GRID=gland10 or
+gland20. Jeremy Fyke confirmed that temperature and other values look
+reasonable.
+
+
+Externals used for testing:
+
+ scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_121009b
+
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_121009
+
+ mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_120929
+
+ models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_2_07
+
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_121003
+
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_1_44/models/atm/cam
+ models/atm/wrf https://svn-ccsm-models.cgd.ucar.edu/wrf/trunk_tags/wrf32_ccsm120726
+
+ models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_54/models/lnd/clm
+
+ models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_08
+
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20120927
+
+ models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk
+
+ models/ocn/pop2 https://svn-ccsm-models.cgd.ucar.edu/pop2/trunk_tags/cesm_pop_2_1_20120927
+
+ models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_121008
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_121001
+ models/rof/drof https://svn-ccsm-models.cgd.ucar.edu/drof/trunk_tags/drof8_120924
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_120925
+ models/ocn/docn https://svn-ccsm-models.cgd.ucar.edu/docn7/trunk_tags/docn8_120921
+
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xatm
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xlnd
+ models/rof/xrof https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xrof
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xice
+ models/ocn/xocn https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xocn
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xglc
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/dead_share
+
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/satm
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/slnd
+ models/rof/srof https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/srof
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sice
+ models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/socn
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sglc
+
+ models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_120731
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_120427
+
+ models/utils/mct https://github.com/MCSclimate/MCT/tags/MCT_2.8.2
+
+ models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_5_4/pio
+
+
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: lipscomb / sacks
+Date: Oct 9, 2012
+Model: cism
+Version: cism1_121009
+One-line summary: Change time to double precision to fix restart problem
+
+Purpose of changes:
+
+Starting with cism1_121001, some ERS tests were failing. Bill Lipscomb
+traced this to the mixture of single and double precision variables
+(and in one case an integer!) that are used to track time: a double
+precision time variable was being converted to single precision before
+being written to the restart file.
+
+We have changed these to be double precision universally. Note that
+this just refers to the time variables that are specified as
+fractional years: time variables that give number of hours are still
+integers.
+
+Also: Fix GetValueDouble to read in value as double precision
+
+Note that Bill L has also implemented these fixes in the seacism
+branch of cism2.
+
+Changes answers relative to previous tag: YES
+
+ I expected answer changes on the order of single precision
+ roundoff. However, the actual answer changes are larger than that for
+ select grid cells (cprnc's worst decimal digits between 1 and 2 for
+ some variables). I suspect this is just due to non-linear feedbacks in
+ the system, but haven't confirmed that; I am doing more investigation
+ of this, but needed to make this tag before that investigation is
+ complete.
+
+ - update 10-12-12: diffs in first year are less, then diffs
+ increase. From looking quickly at maps of diffs in first year,
+ I'm not too concerned about this - I can believe that these diffs
+ are really due to single vs double precision, with initial small
+ differences expanding over time due to feedbacks in the system.
+
+ Also, there are differences in a gland10 run, too; I did not expect
+ these differences, since dt=1 there. This is also undergoing additional
+ investigation.
+
+ - update 10-11-12: note that geothermal heat flux is now read as
+ double rather than single precision. That could be the
+ explanation. Another explanation could be that time is used in
+ calculations that are now done in double rather than single
+ precision. I'm not too concerned because diffs in first year are
+ very small.
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Main changes: change time variables to double precision
+M source_glc/glc_io.F90
+M source_glimmer-cism/glide_thck.F90
+M source_glimmer-cism/glide_lithot_io.F90
+M source_glimmer-cism/glint_mbal_io.F90
+M source_glimmer-cism/glimmer_ncparams.F90
+M source_glimmer-cism/glint_io.F90
+M source_glimmer-cism/glide_diagnostics.F90
+M source_glimmer-cism/glimmer_ncio.F90
+M source_glimmer-cism/glide_io.F90
+M source_glimmer-cism/glint_type.F90
+M source_glimmer-cism/glimmer_ncdf.F90
+ - includes change of an integer to double
+M source_glimmer-cism/glide.F90
+ - also change unit 6 to stdout, changed some diagnostic output
+M source_glimmer-cism/glide_types.F90
+ - also change defaults for tend, tinc
+ - also fix typo in comment
+M source_glimmer-cism/glint_main.F90
+ - also add some diagnostic output
+ - also change diagnostic output to write model%numerics%time rather
+ than the local 'time' variable -- latter contains the time at the
+ start of the glint timestep, which can be up to a year behind
+M source_glimmer-cism/glint_timestep.F90
+ - also add diagnostic output
+
+*** Fix GetValueDouble to read in value as double precision
+M source_glimmer-cism/glimmer_config.F90
+
+*** Unrelated change: add a comment
+M bld/namelist_files/namelist_defaults_cism.xml
+
+
+Summary of testing:
+
+- bluefire.glc.auxtest: main test results: All PASS except:
+
+(Not listing failures in NLComp [which I don't think is working right] or
+tputcomp)
+
+FAIL SMS_D.f09_g16.TG.bluefire_ibm.GC.123515.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+FAIL PEA_P1.f09_g16.TG20TR.bluefire_ibm.GC.123515.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+BFAIL ERS_Ly20.f09_g16.TG1850.bluefire_ibm.GC.123515.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+FAIL ERI_Ly44.f09_g16.TGRCP85.bluefire_ibm.GC.123515.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+FAIL ERS_Ly20_N2_P2.f09_g16_gl10.TG.bluefire_ibm.GC.123515.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+FAIL NCK_P2.f09_g16.TG.bluefire_ibm.GC.123515.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+FAIL CME_Ly10_N2_P2_D.f09_g16.TG1850.bluefire_ibm.GC.123515.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+BFAIL ERS_Ly20_E.f09_g16.TG20TR.bluefire_ibm.GC.123515.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+FAIL ERS_Ly20_E.f09_g16_gl10.TG20TR.bluefire_ibm.GC.123515.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+FAIL CME_Ly5_PL.T31_g37.IG.bluefire_ibm.GC.123515.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+
+These are all failures in history comparisons, which are expected.
+
+Note that the previously-failing ERS tests are now passing.
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: All PASS except:
+
+FAIL CME_Ly10_N2_P2_D.f09_g16.TG1850.bluefire_ibm.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002.cism
+FAIL CME_Ly5_PL.T31_g37.IG.bluefire_ibm.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002.cism
+FAIL ERI_Ly44.f09_g16.TGRCP85.bluefire_ibm.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002.cism
+FAIL ERS_Ly20.f09_g16.TG1850.bluefire_ibm.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002.cism
+FAIL ERS_Ly20_E.f09_g16.TG20TR.bluefire_ibm.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002.cism
+FAIL ERS_Ly20_E.f09_g16_gl10.TG20TR.bluefire_ibm.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002.cism
+FAIL ERS_Ly20_N2_P2.f09_g16_gl10.TG.bluefire_ibm.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002.cism
+FAIL NCK_P2.f09_g16.TG.bluefire_ibm.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002.cism
+FAIL PEA_P1.f09_g16.TG20TR.bluefire_ibm.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002.cism
+FAIL SMS_D.f09_g16.TG.bluefire_ibm.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002.cism
+
+These are all expected failures, except I did not expect failures in the two
+gl10 comparisons. I am investigating this further.
+
+- Other tests:
+
+Note: these two tests below used a user_nl_dir, which set cism's dt=0.05:
+ERS_D_Ly20.f09_g16_gl10.TG1850.bluefire_ibm
+ERI_D_Ly44.f09_g16_gl10.TGRCP85.bluefire_ibm
+
+PASS ERS_D_Ly200.f09_g16_gl10.TG1850.bluefire_ibm.134929
+PASS ERS_Ly100.f09_g16.TG.bluefire_ibm.204752
+PASS ERS_D_Ly20.f09_g16_gl10.TG1850.bluefire_ibm.204752
+PASS ERI_D_Ly44.f09_g16_gl10.TGRCP85.bluefire_ibm.204752
+PASS ERI_D_Ly220.f09_g16_gl10.TGRCP85.bluefire_ibm.204752
+
+Externals used for testing:
+
+ scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_121008
+
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_120921
+
+ mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_120816
+
+ models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_2_02
+
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_120918
+
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_1_42/models/atm/cam
+ models/atm/wrf https://svn-ccsm-models.cgd.ucar.edu/wrf/trunk_tags/wrf32_ccsm120726
+
+ models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/branch_tags/new_glacier_data2_tags/new_glacier_data2_06_clm4_0_52/models/lnd/clm
+
+ models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_08
+
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20120927
+
+ models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/branches/change_time_to_double
+
+ models/ocn/pop2 https://svn-ccsm-models.cgd.ucar.edu/pop2/trunk_tags/cesm_pop_2_1_20120927
+
+ models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_120925b
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_121001
+ models/rof/drof https://svn-ccsm-models.cgd.ucar.edu/drof/trunk_tags/drof8_120924
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_120925
+ models/ocn/docn https://svn-ccsm-models.cgd.ucar.edu/docn7/trunk_tags/docn8_120921
+
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xatm
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xlnd
+ models/rof/xrof https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xrof
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xice
+ models/ocn/xocn https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xocn
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xglc
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/dead_share
+
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/satm
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/slnd
+ models/rof/srof https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/srof
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sice
+ models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/socn
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sglc
+
+ models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_120731
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_120427
+
+ models/utils/mct https://github.com/MCSclimate/MCT/tags/MCT_2.8.2
+
+ models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_5_4/pio
+
+
+cism tag used for baseline comparisons: cism1_121002
+
+Any other externals that differed in baseline:
+- scripts4_120930
+- dlnd8_120918
+
+================================================================================
+Originator: sacks
+Date: Oct 8, 2012
+Model: cism
+Version: cism1_121008
+One-line summary: add some namelist items, remove others
+
+Purpose of changes:
+
+Add some namelist items to namelist_definition_cism that users may want to
+change. Remove some items that should not be changed.
+
+Requires some new functionality in build-namelist to only create a section
+in cism.config under certain conditions.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+
+Summary of testing:
+
+- bluefire.glc.auxtest: main test results: NOT DONE
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: NOT DONE
+
+- shortlist.glc.auxtest on bluefire: main test results: All PASS except:
+
+FAIL ERS_Ly20.f09_g16.TG1850.bluefire_ibm.C.165451
+BFAIL ERS_Ly20.f09_g16.TG1850.bluefire_ibm.C.165451.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+
+These are expected failures based on cism1_121002 test results
+
+- shortlist.glc.auxtest: results of component_gen_comp -model cism: All PASS
+
+- Other tests:
+
+ - Confirmed that build-namelist gives error when trying to define
+ variables in gthf section, without setting do_gthf to .true.; same for
+ isostasy section; same for elastic lithosphere section, but this keys
+ off of lithosphere==1
+
+ - Confirmed that GTHF section isn't output when do_gthf is not set to
+ .true.; same for isostasy section; same for elastic lithosphere
+ section, but this keys off of lithosphere==1
+
+ - With empty user_nl_cism, gives identical cism.config to that in
+ cism1_121002.
+
+ - Added all new parameters to user_nl_cism, with values different from
+ their defaults (along with do_gthf=.true., do_isostasy=.true.)
+ - Checked cism.config: made sure each appears in the right section
+ - Ran model, checked output to make sure they are all read in correctly
+
+ Note: this job died with the error:
+
+ * FATAL ERROR : (/glade/proj3/cseg/people/sacks/cesm_code/cesm1_1_alpha18f_newCISM/models/glc/cism/source_glimmer-cism/glide_nc_custom.F90:1
+29) NetCDF: Start+count exceeds dimension bound
+
+ I have filed a bug report about this (1560); I am not going to worry
+ about this for now.
+
+Externals used for testing:
+
+NOTE: perl5lib updated to include my new get_defined_vars_in_group function
+-- similar, but not identical, to the perl5lib tag I made 10-8-2012
+
+ scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_120930
+
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_120921
+
+ mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_120816
+
+ models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_2_02
+
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_120918
+
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_1_42/models/atm/cam
+ models/atm/wrf https://svn-ccsm-models.cgd.ucar.edu/wrf/trunk_tags/wrf32_ccsm120726
+
+ models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/branch_tags/new_glacier_data2_tags/new_glacier_data2_06_clm4_0_52/models/lnd/clm
+
+ models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_08
+
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20120927
+
+ models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk
+
+ models/ocn/pop2 https://svn-ccsm-models.cgd.ucar.edu/pop2/trunk_tags/cesm_pop_2_1_20120927
+
+ models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_120925b
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_120918
+ models/rof/drof https://svn-ccsm-models.cgd.ucar.edu/drof/trunk_tags/drof8_120924
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_120925
+ models/ocn/docn https://svn-ccsm-models.cgd.ucar.edu/docn7/trunk_tags/docn8_120921
+
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xatm
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xlnd
+ models/rof/xrof https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xrof
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xice
+ models/ocn/xocn https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xocn
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xglc
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/dead_share
+
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/satm
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/slnd
+ models/rof/srof https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/srof
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sice
+ models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/socn
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sglc
+
+ models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_120731
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_120427
+
+ models/utils/mct https://github.com/MCSclimate/MCT/tags/MCT_2.8.2
+
+ models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_5_4/pio
+
+
+
+cism tag used for baseline comparisons: cism1_121002
+
+Any other externals that differed in baseline: None (except unchanged perl5lib)
+
+================================================================================
+Originator: sacks
+Date: Oct 2, 2012
+Model: cism
+Version: cism1_121002
+One-line summary: add some namelist variables, fix others
+
+Purpose of changes:
+
+Change idiag and jdiag into config parameters rather than hard-coded,
+following what is done in the seacism branch.
+
+Add some optional config options that were missing from
+namelist_definition_cism.xml (found by searching for GetValue calls in
+cism).
+
+Remove niso (unused).
+
+Make ntem & nvel integers
+
+Make ndiag 20 for gland5UM
+
+Change basal_tract from real(10) to real(5)
+
+Fix categories in namelist_definition_cism, for documentation
+purposes.
+
+Changes answers relative to previous tag: NO
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Remove hard-coded idiag, jdiag; instead use values set in config file
+M source_glimmer-cism/glimmer_paramets.F90
+M source_glimmer-cism/glide_setup.F90
+M source_glimmer-cism/glide_types.F90
+M source_glimmer-cism/glint_main.F90
+
+*** Changes to namelist variables as documented above
+M bld/build-namelist
+M bld/namelist_files/namelist_defaults_cism.xml
+M bld/namelist_files/namelist_definition_cism.xml
+
+*** Fix documentation of categories
+M bld/README.build-namelist
+
+
+Summary of testing:
+
+- bluefire.glc.auxtest: main test results: All PASS except:
+
+These were all expected failures based on previous tag:
+FAIL ERS_Ly20.f09_g16.TG1850.bluefire_ibm.GC.233417
+BFAIL ERS_Ly20.f09_g16.TG1850.bluefire_ibm.GC.233417.generate.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+BFAIL ERS_Ly20.f09_g16.TG1850.bluefire_ibm.GC.233417.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_120930
+FAIL ERS_Ly20_E.f09_g16.TG20TR.bluefire_ibm.GC.233417
+BFAIL ERS_Ly20_E.f09_g16.TG20TR.bluefire_ibm.GC.233417.generate.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_121002
+BFAIL ERS_Ly20_E.f09_g16.TG20TR.bluefire_ibm.GC.233417.compare_hist.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_120930
+
+tputcomp failures:
+FAIL SMS_D.f09_g16.TG.bluefire_ibm.GC.233417.tputcomp.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_120930
+COMMENT tput_decr = 71.718 tput_percent_decr = 21.9
+FAIL ERS_Ly20_N2_P2.f09_g16_gl10.TG.bluefire_ibm.GC.233417.tputcomp.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_120930
+COMMENT tput_decr = 7477.653 tput_percent_decr = 55.8
+FAIL ERS_Ly20_E.f09_g16_gl10.TG20TR.bluefire_ibm.GC.233417.tputcomp.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_120930
+COMMENT tput_decr = 983.18499 tput_percent_decr = 13.1
+
+NLComp failures; as far as I can tell, this is an error in the test script:
+FAIL SMS_D.f09_g16.TG.bluefire_ibm.GC.233417.NLComp.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_120930
+FAIL ERS_Ly20_N2_P2.f09_g16_gl10.TG.bluefire_ibm.GC.233417.NLComp.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_120930
+FAIL ERS_Ly20_E.f09_g16_gl10.TG20TR.bluefire_ibm.GC.233417.NLComp.cesm1_1_alpha18f_scripts4_120930_clm_newGlacierData2_cism1_120930
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: All PASS
+
+- Other tests:
+
+ - added newly-added config options to user_nl_cism, confirmed that they
+ appeared in correct place in cism.config, and that at least some of
+ them were read properly by cism
+
+ - confirmed that diagnostic output is done at the desired coordinates,
+ based on new idiag & jdiag
+
+Externals used for testing:
+
+ scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_120930
+
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_120921
+
+ mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_120816
+
+ models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_2_02
+
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_120918
+
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_1_42/models/atm/cam
+ models/atm/wrf https://svn-ccsm-models.cgd.ucar.edu/wrf/trunk_tags/wrf32_ccsm120726
+
+ models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/branch_tags/new_glacier_data2_tags/new_glacier_data2_06_clm4_0_52/models/lnd/clm
+
+ models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_08
+
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20120927
+
+ models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk
+
+ models/ocn/pop2 https://svn-ccsm-models.cgd.ucar.edu/pop2/trunk_tags/cesm_pop_2_1_20120927
+
+ models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_120925b
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_120918
+ models/rof/drof https://svn-ccsm-models.cgd.ucar.edu/drof/trunk_tags/drof8_120924
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_120925
+ models/ocn/docn https://svn-ccsm-models.cgd.ucar.edu/docn7/trunk_tags/docn8_120921
+
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xatm
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xlnd
+ models/rof/xrof https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xrof
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xice
+ models/ocn/xocn https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xocn
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xglc
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/dead_share
+
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/satm
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/slnd
+ models/rof/srof https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/srof
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sice
+ models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/socn
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sglc
+
+ models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_120731
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_120427
+
+ models/utils/mct https://github.com/MCSclimate/MCT/tags/MCT_2.8.2
+
+ models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_5_4/pio
+
+
+cism tag used for baseline comparisons: cism1_121001
+
+Any other externals that differed in baseline: None
+
+================================================================================
+Originator: sacks
+Date: Oct 1, 2012
+Model: cism
+Version: cism1_121001
+One-line summary: change parameter defaults
+
+Purpose of changes:
+
+Change some parameters to give better results, based on suggestions from
+Jeremy Fyke and Bill Lipscomb:
+
+- sigma_levels: focus resolution at the bottom
+
+- flow_law = 0
+
+- ntem=nvel=niso=1, because we realized these are multipliers, and values <
+ 1 don't make sense
+
+Changes answers relative to previous tag: YES!
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Change parameters
+M bld/namelist_files/namelist_defaults_cism.xml
+
+*** Document that ntemp, nvel & niso are multipliers
+M bld/namelist_files/namelist_definition_cism.xml
+
+
+Summary of testing:
+
+Note: baseline comparisons not done: I know this changes answers substantially
+
+- bluefire.glc.auxtest: main test results:
+
+All pass except the following:
+
+FAIL ERS_Ly20.f09_g16.TG1850.bluefire_ibm.G.140709
+BFAIL ERS_Ly20.f09_g16.TG1850.bluefire_ibm.G.140709.generate.cesm1_1_alpha18f_scripts4_120930_cism1_120930
+FAIL ERS_Ly20_E.f09_g16.TG20TR.bluefire_ibm.G.140709
+BFAIL ERS_Ly20_E.f09_g16.TG20TR.bluefire_ibm.G.140709.generate.cesm1_1_alpha18f_scripts4_120930_cism1_120930
+
+These failures reveal an old problem which has just come to the surface:
+under certain circumstatnces, cism isn't restarting exactly. The problem is
+in the temp field, and this only affects the cpl fields once we set
+flow_law=0. With flow_law=1 (the old default), there is still a problem,
+but it is only apparent in the cism history files, so doesn't trigger a
+failure in the ERS test. I confirmed that the same problem existed in
+cism1_110418, if you set parameters appropriately (in particular: gland5UM
+with evolution=0, dt=0.05).
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism:
+
+(Just did -generate, not -compare; all PASS)
+
+- Other tests:
+
+Externals used for testing:
+
+ scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_120930
+
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_120921
+
+ mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_120816
+
+ models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_2_02
+
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_120918
+
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/trunk_tags/cam5_1_42/models/atm/cam
+ models/atm/wrf https://svn-ccsm-models.cgd.ucar.edu/wrf/trunk_tags/wrf32_ccsm120726
+
+ models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/branch_tags/new_glacier_data2_tags/new_glacier_data2_06_clm4_0_52/models/lnd/clm
+
+ models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_08
+
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20120927
+
+ models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/trunk
+
+ models/ocn/pop2 https://svn-ccsm-models.cgd.ucar.edu/pop2/trunk_tags/cesm_pop_2_1_20120927
+
+ models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_120925b
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_120918
+ models/rof/drof https://svn-ccsm-models.cgd.ucar.edu/drof/trunk_tags/drof8_120924
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_120925
+ models/ocn/docn https://svn-ccsm-models.cgd.ucar.edu/docn7/trunk_tags/docn8_120921
+
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xatm
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xlnd
+ models/rof/xrof https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xrof
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xice
+ models/ocn/xocn https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xocn
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xglc
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/dead_share
+
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/satm
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/slnd
+ models/rof/srof https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/srof
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sice
+ models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/socn
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sglc
+
+ models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_120731
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_120427
+
+ models/utils/mct https://github.com/MCSclimate/MCT/tags/MCT_2.8.2
+
+ models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_5_4/pio
+
+
+cism tag used for baseline comparisons: N/A
+
+Any other externals that differed in baseline: N/A
+
+================================================================================
+Originator: mvertens, tcraig
+Date: Sept 21, 2012
+Model: cism
+Version: cism1_120921
+One-line summary: add esmf interface
+
+Purpose of changes: Add esmf interface
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/cism.buildexe.csh
+A drivers/cpl_esmf/glc_comp_mct.F90
+A drivers/cpl_esmf/glc_comp_esmf.F90
+
+Summary of testing:
+
+(testing performed by Bill Sacks)
+
+*** Full test suite NOT run: Since the changes were isolated to the
+*** esmf interface, the full test suite seemed unnecessary
+
+- bluefire.glc.auxtest: main test results: NOT DONE
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: NOT DONE
+
+- Other tests:
+
+PASS CME10y.f09_g16.TG1850.bluefire_ibm
+PASS CME10y_N2_P2_D.f09_g16.TG1850.bluefire_ibm
+PASS CME10y_N2_P2.f09_g16.TG1850.bluefire_ibm
+PASS ERS20y_E.f09_g16.TG20TR.bluefire_ibm
+PASS ERS20y_E.f09_g16.TG20TR.bluefire_ibm.memleak
+PASS CME5y_PL.T31_g37.IG.bluefire_ibm
+PASS SMS_D.f09_g16.BGRCP45CN.bluefire_ibm
+PASS SMS_D.f09_g16.BGRCP45CN.bluefire_ibm.memleak
+PASS SMS_DE.f09_g16.BGRCP45CN.bluefire_ibm
+PASS SMS_DE.f09_g16.BGRCP45CN.bluefire_ibm.memleak
+PASS NCK_E.T31_g37.BG1850CN.bluefire_ibm
+
+Ran shortlist.glc.auxtest, with comparison to cesm1_1_alpha18a:
+
+PASS SMS_D.f09_g16.TG.bluefire_ibm
+PASS SMS_D.f09_g16.TG.bluefire_ibm.memleak
+PASS SMS_D.f09_g16.TG.bluefire_ibm.compare_hist.cesm1_1_alpha18a
+PASS SMS_D.f09_g16.TG.bluefire_ibm.memcomp.cesm1_1_alpha18a
+FAIL SMS_D.f09_g16.TG.bluefire_ibm.tputcomp.cesm1_1_alpha18a
+COMMENT tput_decr = 79.75 tput_percent_decr = 13.8
+PASS ERS20y.f09_g16.TG1850.bluefire_ibm
+PASS ERS20y.f09_g16.TG1850.bluefire_ibm.memleak
+PASS ERS20y.f09_g16.TG1850.bluefire_ibm.compare_hist.cesm1_1_alpha18a
+PASS ERS20y.f09_g16.TG1850.bluefire_ibm.memcomp.cesm1_1_alpha18a
+PASS ERS20y.f09_g16.TG1850.bluefire_ibm.tputcomp.cesm1_1_alpha18a PASS SMS_D.T31_g37.IG.bluefire_ibm
+PASS SMS_D.T31_g37.IG.bluefire_ibm.memleak
+FAIL SMS_D.T31_g37.IG.bluefire_ibm.compare_hist.cesm1_1_alpha18a
+PASS SMS_D.T31_g37.IG.bluefire_ibm.memcomp.cesm1_1_alpha18a
+FAIL SMS_D.T31_g37.IG.bluefire_ibm.tputcomp.cesm1_1_alpha18a
+COMMENT tput_decr = 2.949 tput_percent_decr = 12.1
+
+The failure in compare_hist with the IG case seems just due to runoff
+changes:
+
+[be1105en:/ptmp/sacks/tests/SMS_D.T31_g37.IG.bluefire_ibm.C.111836]$ grep RMS cprnc.out
+ RMS r2x_Forr 1.3464E-06
+[be1105en:/ptmp/sacks/tests/SMS_D.T31_g37.IG.bluefire_ibm.C.111836]$ grep FILLDIFF cprnc.out
+ FILLDIFF domr_lat
+ FILLDIFF domr_lon
+ FILLDIFF domr_are
+ FILLDIFF domr_are
+ FILLDIFF domr_mas
+ FILLDIFF domr_fra
+ FILLDIFF r2x_Forr
+ FILLDIFF r2x_Forr
+
+So I'm not worried about this
+
+Related component_gen_comp:
+
+PASS ERS20y.f09_g16.TG1850.bluefire_ibm.compare_hist.cesm1_1_alpha18a.cism
+PASS SMS_D.T31_g37.IG.bluefire_ibm.compare_hist.cesm1_1_alpha18a.cism
+PASS SMS_D.f09_g16.TG.bluefire_ibm.compare_hist.cesm1_1_alpha18a.cism
+
+
+Externals used for testing:
+
+ scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/branch_tags/rtmcomp_tags/rtmcomp07_scripts4_120828
+
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/branch_tags/rtmcomp_tags/rtmcomp03_Machines_120829
+
+ mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_120816
+
+ models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/branch_tags/rtmcomp_tags/rtmcomp15_drvseq4_1_26
+
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/branch_tags/rtmcomp_tags/rtmcomp02_share3_120803
+
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/branch_tags/exe_cam5_1_33_tags/exe01_cam5_1_33/models/atm/cam
+ models/atm/wrf https://svn-ccsm-models.cgd.ucar.edu/wrf/trunk_tags/wrf32_ccsm120726
+
+ models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/branch_tags/rtmcomp_tags/rtmcomp14_clm4_0_46/models/lnd/clm
+
+ models/rof/rtm https://svn-ccsm-models.cgd.ucar.edu/rivrtm/trunk_tags/rtm1_0_04
+
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20120825
+
+ models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/branches/rtmcomp
+
+ models/ocn/pop2/ https://svn-ccsm-models.cgd.ucar.edu/pop2/trunk_tags/cesm_pop_2_1_20120824
+
+ models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_120626
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/branch_tags/rtmcomp_tags/rtmcomp02_dlnd8_120626
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_120626
+ models/ocn/docn https://svn-ccsm-models.cgd.ucar.edu/docn7/trunk_tags/docn8_120626
+ models/rof/drof https://svn-ccsm-models.cgd.ucar.edu/drof/trunk_tags/drof8_120827
+
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xatm
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xlnd
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xice
+ models/ocn/xocn https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xocn
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xglc
+ models/rof/xrof https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/xrof
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_7_04/dead_share
+
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/satm
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/slnd
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sice
+ models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/socn
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/sglc
+ models/rof/srof https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_01/srof
+
+ models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_120731
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_120427
+
+ models/utils/mct https://github.com/MCSclimate/MCT/tags/MCT_2.8.0
+
+ models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_5_3/pio
+
+cism tag used for baseline comparisons: cism1_120905
+
+Any other externals that differed in baseline: Used cesm1_1_alpha18a
+for baselines; this involves many differences in externals
+
+================================================================================
+Originator: sacks
+Date: Sept 5, 2012
+Model: cism
+Version: cism1_120905
+One-line summary: change basal_tract to real rather than integer
+
+Purpose of changes:
+
+basal_tract was mistakenly listed as an integer; I am fixing this.
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/namelist_files/namelist_definition_cism.xml
+
+Summary of testing: No system tests; just tested namelist generation
+
+- bluefire.glc.auxtest: main test results: NOT DONE
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism: NOT DONE
+
+- Other tests:
+
+(1) Ran 'create_newcase -compset TG -res f09_g16 -mach bluefire', then
+'setup'. Compared default cism.config with baseline version: identical
+
+(2) In that case, did the following:
+echo 'basal_tract = 2.5 10 10 0 1' >> user_nl_cism
+preview_namelists
+
+Confirmed that it worked to use a real value.
+
+
+Externals used for testing:
+
+ scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_120828
+
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_120829
+
+ mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_120816
+
+ models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_1_26
+
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_120803
+
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/branch_tags/exe_cam5_1_33_tags/exe01_cam5_1_33/models/atm/cam
+ models/atm/wrf https://svn-ccsm-models.cgd.ucar.edu/wrf/trunk_tags/wrf32_ccsm120726
+
+ models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_46/models/lnd/clm
+
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20120825
+
+ models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/branches/ensemble
+
+ models/ocn/pop2/ https://svn-ccsm-models.cgd.ucar.edu/pop2/trunk_tags/cesm_pop_2_1_20120824
+
+ models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_120626
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_120626
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_120626
+ models/ocn/docn https://svn-ccsm-models.cgd.ucar.edu/docn7/trunk_tags/docn8_120626
+
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/xatm
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/xlnd
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/xice
+ models/ocn/xocn https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/xocn
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/xglc
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/dead_share
+
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_3_05/satm
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_3_05/slnd
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_3_05/sice
+ models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_3_05/socn
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_3_05/sglc
+
+ models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_120731
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_120427
+
+ models/utils/mct https://github.com/MCSclimate/MCT/tags/MCT_2.8.0
+
+ models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_5_3/pio
+
+
+cism tag used for baseline comparisons: cism1_120829
+
+Any other externals that differed in baseline: None
+
+================================================================================
+Originator: sacks
+Date: Aug 29, 2012
+Model: cism
+Version: cism1_120829
+One-line summary: enable ensemble capabilities
+
+Purpose of changes:
+
+Enable ensemble capabilities for cism. These were already implemented
+in drv & scripts, but required some changes in the cism code.
+
+Also, change flow_factor to real rather than integer.
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** New modules to handle variables that differ between ensemble members
+A source_glc/glc_ensemble.F90
+A source_glc/glc_files.F90
+A test/unit/unit_test_replacements/glc_files.F90
+ - simplified version needed because the time management unit test
+ doesn't know anything about ensemble number
+
+*** Add ensemble capability
+M drivers/cpl_mct/glc_comp_mct.F90
+M source_glc/glc_io.F90
+ - Also deleted unused glc_io_create_suffix_cesm, because I think it
+ would have been broken with ensemble capabilities
+M source_glc/glc_constants.F90
+ - Move file names that are no longer constant
+
+*** Change use statements to reflect movement of some file name variables
+M source_glc/glc_InitMod.F90
+M source_glc/glc_global_grid.F90
+M source_glc/glc_time_management.F90
+
+*** Add use of glc_files.F90
+M test/unit/time_management/Srcfiles
+
+*** Change flow_factor to real rather than integer
+M bld/namelist_files/namelist_definition_cism.xml
+
+*** Updated changeLog template to give more testing details
+M ChangeLog_template
+
+Summary of testing:
+
+- bluefire.glc.auxtest: main test results:
+
+All PASS except the following (other than throughput tests, these are all expected failures):
+
+FAIL ERS20y.f09_g16.TG1850.bluefire_ibm.tputcomp.cesm1_1_alpha17c
+COMMENT tput_decr = 1694.229 tput_percent_decr = 44.5
+BFAIL ERS20y_N2_P2.f09_g16.TGG10.bluefire_ibm.compare_hist.cesm1_1_alpha17c
+BFAIL NCK_P2.f09_g16.TG.bluefire_ibm.compare_hist.cesm1_1_alpha17c
+FAIL SMS_PL.T31_g37.IGLONG.bluefire_ibm.tputcomp.cesm1_1_alpha17c
+COMMENT tput_decr = 3.6440000 tput_percent_decr = 2.08
+FAIL SMS.f19_f19.FG20TRCN.bluefire_ibm.tputcomp.cesm1_1_alpha17c
+COMMENT tput_decr = 1.627 tput_percent_decr = 13.5
+
+(Note: test list was that in scripts4_120828, plus
+ERS20y.f09_g16.TGG10.bluefire_ibm [which was included in the baseline test
+suite, but has now been replaced by a _N2 version of this test])
+
+- bluefire.glc.auxtest: results of component_gen_comp -model cism:
+
+All PASS except the following (these are expected failures):
+
+BFAIL ERS20y_N2_P2.f09_g16.TGG10.bluefire_ibm.compare_hist.cesm1_1_alpha17c.cism (baseline directory does not exist)
+BFAIL NCK_P2.f09_g16.TG.bluefire_ibm.compare_hist.cesm1_1_alpha17c.cism (baseline directory does not exist)
+
+- Other tests:
+
+ - 4-instance TG run with differences in flow_factor: startup with one
+ continue run; compared instances #1 and #3 with analogous
+ single-instance runs (comparisons included cism hist, cism rest & cpl
+ hist files)
+
+ - as above, but 16 PE run with glc on PEs 4-7: compared with above run to
+ make sure results don't depend on PE location
+
+ - 4-instance IGLONG test with differences in the albice CLM parameter;
+ compared instance #3 with analogous single-instance run (comparisons
+ included clm hist, cism hist & cpl hist files)
+
+Externals used for testing:
+
+ scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_120828
+
+ scripts/ccsm_utils/Machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_120826
+
+ mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_120816
+
+ models/drv https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq4_1_26
+
+ models/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_120803
+
+ models/atm/cam https://svn-ccsm-models.cgd.ucar.edu/cam1/branch_tags/exe_cam5_1_33_tags/exe01_cam5_1_33/models/atm/cam
+ models/atm/wrf https://svn-ccsm-models.cgd.ucar.edu/wrf/trunk_tags/wrf32_ccsm120726
+
+ models/lnd/clm https://svn-ccsm-models.cgd.ucar.edu/clm2/trunk_tags/clm4_0_46/models/lnd/clm
+
+ models/ice/cice https://svn-ccsm-models.cgd.ucar.edu/cice/trunk_tags/cice4_0_20120825
+
+ models/glc/cism https://svn-ccsm-models.cgd.ucar.edu/glc/branches/ensemble
+
+ models/ocn/pop2/ https://svn-ccsm-models.cgd.ucar.edu/pop2/trunk_tags/cesm_pop_2_1_20120824
+
+ models/atm/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_120626
+ models/lnd/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_120626
+ models/ice/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_120626
+ models/ocn/docn https://svn-ccsm-models.cgd.ucar.edu/docn7/trunk_tags/docn8_120626
+
+ models/atm/xatm https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/xatm
+ models/lnd/xlnd https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/xlnd
+ models/ice/xice https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/xice
+ models/ocn/xocn https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/xocn
+ models/glc/xglc https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/xglc
+ models/dead_share https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_6_05/dead_share
+
+ models/atm/satm https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_3_05/satm
+ models/lnd/slnd https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_3_05/slnd
+ models/ice/sice https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_3_05/sice
+ models/ocn/socn https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_3_05/socn
+ models/glc/sglc https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_3_05/sglc
+
+ models/utils/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_120731
+ models/utils/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_120427
+
+ models/utils/mct https://github.com/MCSclimate/MCT/tags/MCT_2.8.0
+
+ models/utils/pio http://parallelio.googlecode.com/svn/trunk_tags/pio1_5_3/pio
+
+
+cism tag used for baseline comparisons: cism1_120823
+
+Any other externals that differed in baseline:
+
+- scripts4_120727d: only diff is in bluefire.glc.auxtest list
+
+================================================================================
+Originator: sacks
+Date: Aug 23, 2012
+Model: cism
+Version: cism1_120823
+One-line summary: Turn evolution on for gland5UM
+
+Purpose of changes:
+
+Turn evolution on for gland5UM (evolution=0 rather than -1). Also shorten
+time steps so ice sheet evolution is stable for gland5UM: dt=0.05,
+ntem=nvel=niso=0.1. These parameters came from Jeremy Fyke's most recent
+runs.
+
+The values of these (and other) parameters may still change, but I wanted
+to use something for now that gives ice sheet evolution with gland5UM to
+facilitate testing.
+
+Changes answers relative to previous tag: Yes, for gland5UM
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/namelist_files/namelist_defaults_cism.xml
+
+Summary of testing:
+
+No system tests!
+
+Just created three cases and compared cism.config with previously-generated
+cases:
+
+- gland5UM: diffs as expected
+
+- gland5: no diffs
+
+- gland10: no diffs
+
+================================================================================
+Originator: mvertens
+Date: June 29, 2012
+Model: cism
+Version: cism1_120629
+One-line summary: Change DIN_LOC_ROOT_CSMDATA to DIN_LOC_ROOT
+
+Purpose of changes: Change DIN_LOC_ROOT_CSMDATA to DIN_LOC_ROOT
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** simple, one-line change
+M bld/build-namelist
+
+Summary of testing: NONE!
+
+================================================================================
+Originator: mvertens
+Date: May 29, 2012
+Model: cism
+Version: cism1_120529
+One-line summary: Modifications to build to fit with latest scripts
+
+Purpose of changes:
+
+Minor changes to fit in with other system changes Mariana is bringing in
+
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/cism.cpl7.template
+
+
+Summary of testing: NONE!
+
+================================================================================
+Originator: mvertens
+Date: May 21, 2012
+Model: cism
+Version: cism1_120521
+One-line summary: Support removal of env_conf.xml
+
+Purpose of changes:
+
+Changes to build-namelist to support removal of env_conf.xml, and related
+cleanup
+
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+M bld/build-namelist
+
+
+Summary of testing:
+
+(All tests except the ERI IGCN test were conducted by Bill S)
+
+Main testing: Unless otherwise noted, the following tests were done with a
+10-year TG compset, f09_g16, GLC_GRID=gland20. I compared (1) cism_in input
+file, (2) cism.config input file, (3) year 10 cism history output (using
+cprnc). The comparison cases were generated from cesm1_1_beta14.
+
+- startup run
+
+- startup run at f19_g16
+
+- startup run with gland5UM
+
+- branch run; 1 year (continuing the above startup run)
+
+
+Other tests:
+
+- ERI.T31_g37.IGCN.bluefire_ibm
+ - comparison with cesm1_1_alpha14a
+
+- Checked dt_option & dt_count in cism in for the following:
+ - NCPL_BASE_PERIOD=decade, GLC_NCPL=5 (dt_option=steps_per_year, dt_count=0.5)
+
+================================================================================
+Originator: mvertens, sacks
+Date: May 15, 2012
+Model: cism
+Version: cism1_120512
+One-line summary: New build-namelist functionality
+
+Purpose of changes:
+
+- New build-namelist functionality to make cism consistent with other CESM
+ components. Makes cism.buildnml.csh just a thin wrapper to
+ build-namelist. Namelist defaults now set in an xml file.
+
+- Set dt_option and dt_count automatically to match the coupling interval.
+
+- Multi-instance capability in the build scripts (create multiple cism_in &
+ cism.config files).
+
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Major rework of build
+D bld/configure
+D bld/config_definition.xsl
+D bld/namelist_defaults_overall.xml
+D bld/namelist_definition_overall.xml
+D bld/namelist_defaults.xsl
+D bld/namelist_definition.xsl
+D bld/config_definition.xml
+D bld/namelist_defaults_cism.xml
+D bld/namelist_definition_cism.xml
+A bld/user_nl_cism
+A bld/cismIO/cism.buildIO.template.csh
+A bld/README.build-namelist
+A bld/cism.buildexe.csh
+M bld/build-namelist
+M bld/cism.cpl7.template
+A bld/cism.buildnml.csh
+M bld/README
+A bld/namelist_files/namelist_defaults_cism.xml
+A bld/namelist_files/namelist_definition_cism.xml
+A bld/namelist_files
+
+*** Remove now-unnecessary input templates (replaced by
+*** namelist_files/namelist_defaults_cism.xml)
+D input_templates
+
+*** Add unit test for 2-year time step
+M test/unit/time_management/restart_testlist
+A test/unit/time_management/inputs/twoyear/time_management_test_in
+A test/unit/time_management/inputs/twoyear/cism_in
+A test/unit/time_management/inputs/twoyear
+M test/unit/time_management/inputs/README
+A test/unit/time_management/inputs/twoyear.restart1/time_management_test_in
+A test/unit/time_management/inputs/twoyear.restart1/cism_in
+A test/unit/time_management/inputs/twoyear.restart1
+A test/unit/time_management/inputs/twoyear.restart2/time_management_test_in
+A test/unit/time_management/inputs/twoyear.restart2/cism_in
+A test/unit/time_management/inputs/twoyear.restart2
+M test/unit/time_management/testlist
+
+*** Fix path
+M test/unit/unit_test_shr/Makefile.common
+
+*** Remove redundant README file
+D README
+
+*** Remove "Known bugs" section
+M ChangeLog_template
+
+
+Summary of testing:
+
+CESM Tests (implemented change off of cesm1_1_beta13, compared with baseline
+cesm1_1_beta13):
+
+- bluefire.glc.auxtest: All tests passed except:
+
+CFAIL ERS.T31_g37.IGRCP85CN.bluefire_ibm.C.215553
+FAIL SMS.T31_T31.FG20TRCN.bluefire_ibm
+BFAIL SMS.T31_T31.FG20TRCN.bluefire_ibm.compare_hist.cesm1_1_beta13
+FAIL SMS.T31_g37.TG.bluefire_ibm
+BFAIL SMS.T31_g37.TG.bluefire_ibm.compare_hist.cesm1_1_beta13
+
+These were all expected failures (also failed in cesm1_1_beta13)
+
+- ERS.f19_g16.IGCN.bluefire_ibm: PASS
+ - ran this because of failure in ERS.T31_g37.IGRCP85CN.bluefire_ibm
+
+
+Main testing of new build: Unless otherwise noted, the following tests were done
+with a 10-year TG compset, f09_g16, GLC_GRID=gland20. I compared (1) cism_in
+input file, (2) cism.config input file, (3) year 10 cism history output (using
+cprnc). The comparison cases were generated from cesm1_1_beta13.
+
+- startup run
+ - Also made sure that list of files in run directory is the same as before
+
+- startup run at f19_g16
+
+- startup run with gland5UM
+
+- startup run with gland5
+ - just checked cism_in and cism.config -- didn't actually do the run
+
+- startup run with gland10
+ - just checked cism_in and cism.config -- didn't actually do the run
+
+- hybrid run; 1 year (continuing the above startup run)
+
+- branch run; 1 year (continuing the above startup run)
+
+- continue run; 1 year -- continuing the above startup run
+
+- continue run; 1 year -- continuing the above branch run
+
+- IG: 1-year test at f19_g16, continuing an older run that had run for
+ 10 years (hybrid)
+ - main point of this was to make sure 1-day coupling still works right
+
+
+
+Testing automatic setting of dt_option and dt_count, and making sure that it
+works to use values more general than what we have been using before:
+
+- New time manager unit tests, to make sure it works to have fractional dt_count
+ with steps_per_year (as we could get with NCPL_BASE_PERIOD='decade'):
+ - twoyear
+ - looked through output manually (briefly)
+ - compared with output from using dt_option='seconds', dt_count=63072000
+ - restart_test twoyear
+
+- Checked dt_option & dt_count in cism_in for the following:
+ - out-of-the-box TG
+ - out-of-the-box IG
+ - various values of NCPL_BASE_PERIOD & GLC_NCPL
+ - values overridden by user_nl_cism
+
+- TG run with NCPL_BASE_PERIOD='year', GLC_NCPL=2 (i.e., two time steps per
+ year): confirmed that there is one cism time step per coupling interval
+
+- TG run with NCPL_BASE_PERIOD='decade', GLC_NCPL=5 (i.e., 2-year time step),
+ with mass balance time step = 2 years (requires source mod in glint_mbal.F90),
+ ice dynamics time step = 2 years (dt=2.): confirmed that there is one cism
+ time step per coupling interval
+
+- Compared log files in two 1-year TG runs, which have the same time step but
+ specified in different ways: one with NCPL_BASE_PERIOD=day, GLC_NCPL=48
+ (results in dt_option=steps_per_day, dt_count=48); the other with
+ NCPL_BASE_PERIOD=hour, GLC_NCPL=2 (results in dt_option=seconds,
+ dt_count=1800): glc log files essentially identical (with verbose=.true.)
+ - Note that these runs both died just before the end, with "Unexpected calling
+ of GLINT at time 8759", but I don't think that's a problem that I caused
+ (but it meant I could not compare hist files)
+
+
+Other tests:
+
+- modifications via user_nl_cism: just checked the generated cism_in and
+ cism.config files to make sure the modifications are picked up
+ - changed a cism_in variable: cism_debug
+ - changed a cism.config variable: basal_tract
+
+- multi-instance test: 4 instances, with no modifications in user_nl_cism_000*:
+ just checked the generated cism_in and cism.config files: paramfile set
+ correctly in cism_in_000*; everything else matches a single-instance case
+
+- multi-instance test: 4 instances, with modifications to ihour0 and evolution
+ in user_nl_cism_0001 & 0003 (but no changes in 0002 or 0004): just checked the
+ generated cism_in and cism.config files
+
+- cism.buildIO.csh:
+ - confirmed that the generated script is essentially the same as before
+ - added a variable in glide_vars.def, confirmed that all F90 files
+ created by cism.buildIO.csh are identical to those from an old
+ case with the same change in glide_vars.def
+
+================================================================================
+Originator: sacks
+Date: March 22, 2012
+Model: cism
+Version: cism1_120322
+One-line summary: fixed memory leak
+
+Purpose of changes:
+
+Fixed memory leak in glint_downscaling_gcm (~ 40 MB/year for some test
+runs)
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+Known bugs (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+ 1433 (time manager needs additional changes to support longer timesteps)
+ 1441 (time manager doesn't restart properly when last run stopped in the middle of a day)
+
+List all modified files, and describe the changes:
+
+*** Fix memory leak
+M https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_120123/source_glimmer-cism/glint_climate.F90
+
+*** Fix comment (unrelated change)
+M https://svn-ccsm-models.cgd.ucar.edu/glc/trunk_tags/cism1_120123/test/unit/unit_test_shr/Makefile.common
+
+Summary of testing:
+
+- CESM tests: bluefire.glc.auxtest, with comparison to previous
+ version (implemented change in cesm1_1_beta11, compared with
+ baseline cesm1_1_beta11). All tests passed except:
+
+ RUN ERS.T31_g37.IGRCP85CN.bluefire_ibm.C.095358
+ FAIL SMS.T31_T31.FG20TRCN.bluefire_ibm
+ BFAIL SMS.T31_T31.FG20TRCN.bluefire_ibm.compare_hist.cesm1_1_beta11
+ FAIL SMS.T31_g37.TG.bluefire_ibm
+ BFAIL SMS.T31_g37.TG.bluefire_ibm.compare_hist.cesm1_1_beta11
+
+ All of these tests also failed in cesm1_1_beta11
+
+
+- 10-year TG compset using Bill L's recent instructions: f09_g16,
+ gland5UM, annual coupling interval using Gail's annual-average cpl
+ forcings; evolution=0, dt=ntem=nvel=niso=0.1. Last year of cism hist
+ is identical to code without this fix. NOTE: This change was done
+ off of an earlier version of cism (cism1_111214), but the change is
+ so straightforward that I'm pretty confident that the results should
+ translate to the latest version.
+
+================================================================================
+Originator: tcraig
+Date: Jan 23, 2012
+Model: cism
+Version: cism1_120123
+One-line summary: update esmf to 520r
+
+M source_glc/glc_io.F90
+M drivers/cpl_mct/glc_comp_mct.F90
+================================================================================
+Originator: sacks
+Date: Dec 21, 2011
+Model: cism
+Version: cism1_111221
+One-line summary: Restore "correct" glc_time_management, with fix for compiler bug
+
+Purpose of changes:
+
+Return glc_time_management to its "correct" state - i.e., before the changes
+made in cism1_111220a; introduce unused module variable in glc_time_management
+to get around an internal compiler error in xlf. Also, move incorrectly-placed
+'implicit none' in glc_time_management_test.F90.
+
+Changes answers relative to previous tag: No, but replaces functionality removed
+in cism1_111220a
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+Known bugs (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+ 1433 (time manager needs additional changes to support longer timesteps)
+ 1441 (time manager doesn't restart properly when last run stopped in the middle of a day)
+
+List all modified files, and describe the changes:
+
+*** Restore to version prior to cism1_111220a, introduce unused module variable
+ to get around internal compiler error
+M source_glc/glc_time_management.F90
+
+*** Move incorrectly-placed 'implicit none'
+M test/unit/time_management/glc_time_management_test.F90
+
+
+Summary of testing:
+
+- Tested build for an IGCN case
+
+- Full cesm system tests pending, including bluefire.glc.auxtest with comparison
+ to beta07 (needed to create this tag before testing was complete, but I will
+ create a new tag if any fail)
+
+================================================================================
+Originator: sacks
+Date: Dec 20, 2011
+Model: cism
+Version: cism1_111220a
+One-line summary: Rolled back glc_time_management
+
+Purpose of changes:
+
+The cesm build was failing with the new version of glc_time_management. I have
+temporarily rolled this file back to the version in 31393.
+
+This should be fixed ASAP!
+
+
+Changes answers relative to previous tag: No, but removes functionality
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+Known bugs (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+*** Rolled back
+M source_glc/glc_time_management.F90
+
+Summary of testing: Just tested build
+
+================================================================================
+Originator: sacks
+Date: Dec 20, 2011
+Model: cism
+Version: cism1_111220
+One-line summary: Updated ChangeLog entry for cism1_111214
+
+Purpose of changes:
+ I realized that I hadn't documented the testing done for cism1_111214 entirely
+ correctly; I have now fixed this.
+
+Changes answers relative to previous tag: No
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+Known bugs (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+ 1433 (time manager needs additional changes to support longer timesteps)
+ 1441 (time manager doesn't restart properly when last run stopped in the middle of a day)
+
+List all modified files, and describe the changes:
+
+*** Update ChangeLog entry for cism1_111214
+M ChangeLog
+
+Summary of testing: None
+
+================================================================================
+Originator: mvertens
+Date: Dec 18, 2011
+Model: cism
+Version: cism1_111218
+One-line summary: Simplified way that cism field names generated for driver
+
+Purpose of changes:
+ Implemented new scheme of determine cism field names that is more flexible
+ and robust
+
+Changes answers relative to previous tag: No
+
+Known bugs (include bugzilla ID) (http://bugs.cgd.ucar.edu/): None
+
+List all modified files, and describe the changes:
+M source_glc/glc_constants.F90
+ - no longer set glc_nec to 10 - but determine it from fields set in
+ coupler - consistency check is also made in the land - more
+ robust implemention
+M drivers/cpl_share/glc_cpl_indices.F90
+ - new index array for multiple elevation classes now is used instead
+ of hard wired index names for each class - much more extensible
+ scheme
+M drivers/cpl_mct/glc_comp_mct.F90
+ - using changes in glc_cpl_indices - glc_export_mct and glc_import_mct
+ are greatly simplified (no more hard-wiring)
+
+Summary of testing:
+Ran IGCN and BGCN system tests and verified that results were bit-for-bit
+
+================================================================================
+Originator: sacks
+Date: Dec 14, 2011
+Model: cism
+Version: cism1_111214
+One-line summary: Add test driver for glc_time_management
+
+Purpose of changes:
+
+Add a test driver for glc_time_management, along with a list of standard tests
+to run. Remove some variables from glc_time_management that aren't being used
+and weren't properly preserved in restarts. Add code & makefile that can be used
+for future unit testers. Add ChangeLog_template.
+
+
+Changes answers relative to previous tag: No
+
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+Known bugs (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+ 1433 (time manager needs additional changes to support longer timesteps)
+ 1441 (time manager doesn't restart properly when last run stopped in the middle of a day)
+
+
+List all modified files, and describe the changes:
+
+*** Remove unused variables that aren't preserved in restarts
+M source_glc/glc_time_management.F90
+
+*** Add code & makefile that can be used for any cism unit tester
+A test
+A test/unit
+A test/unit/unit_test_replacements
+A test/unit/unit_test_replacements/glc_communicate.F90
+A test/unit/unit_test_replacements/shr_sys_mod.F90
+A test/unit/unit_test_replacements/README
+A test/unit/unit_test_shr/writevar_mod.F90
+A test/unit/unit_test_shr/Makefile.common
+A test/unit/unit_test_shr/README
+A test/unit/unit_test_shr
+
+*** Add test driver for the glc_time_management module
+A test/unit/time_management
+A test/unit/time_management/README
+A test/unit/time_management/Filepath
+A test/unit/time_management/Srcfiles
+A test/unit/time_management/Makefile
+A test/unit/time_management/glc_time_management_test_mod.F90
+A test/unit/time_management/glc_time_management_test.F90
+
+*** Script to run exact restart test using the time management test driver
+A test/unit/time_management/restart_test
+
+*** Add list of tests to be run with the time management test driver
+A test/unit/time_management/restart_testlist
+A test/unit/time_management/testlist
+
+*** Namelists for each of the standard tests to run using the time management
+ test driver and the restart_test script (note: I am not listing all of the
+ individual files added in this directory)
+A test/unit/time_management/inputs
+
+*** Add ChangeLog_template to be used for new ChangeLog entries
+A ChangeLog_template
+
+
+Summary of testing: No system tests, but many unit tests
+
+- test/unit/time_management (tests in testlist & restart_testlist; for testlist,
+ performed regression tests against code similar to cism1_111026 where
+ possible, manually checked output where regression tests were impossible -
+ e.g., where the old code was buggy):
+
+Note that there is a known bug in newhour (see bug 1433). I am considering tests
+to have passed even if they have the wrong newhour value. Also, since I didn't
+do a careful look at every variable output by the test driver, it's possible
+that there are errors in other variables that I didn't catch. Here, I am just
+listing tests that have failures in variables that I checked, other than
+newhour (variables I checked more carefully were: iyear, imonth, iday,
+seconds_this_day, eoy, eom, eod, adjust_nyears, elapsed_days, elapsed_months,
+elapsed_years).
+
+All pass except:
+
+ oneyear_leap: model doesn't run to completion (see bug 1433)
+
+ threeyear_leap: also has problems related to bug 1433, though here the model
+ runs to completion
+
+ 219day_leap: also has problems related to bug 1433; the first error I notice
+ is that year 5, day_of_year 73 is translated to 0005-mar-13 rather than
+ 0005-mar-14, but there are likely other errors, too, as for oneyear_leap
+
+ 511day_leap: also has problems relatd to bug 1433; e.g., both the leap and the
+ non-leap tests have a time step with time stamp 0005-mar-15; the leap should
+ differ
+
+
+================================================================================
+Originator: sacks
+Date: Nov 22, 2011
+Model: cism
+Version: cism1_111122
+One-line summary: Modified time manager to support longer time steps
+
+Previously, the time manager did not correctly handle time steps longer
+than one month. With the changes here, time steps as long as desired
+should be possible. However, as noted below, I have only tested this for
+daily and annual time steps so far.
+
+M source_glc/glc_time_management.F90
+
+pretag testing: so far I have tested this for daily and annual time steps;
+more testing is needed of other length time steps
+
+ - 10-year TG compset with daily time step (i.e., the default),
+ using gland10 and 10 years of forcing from a af19_g16 IG compset:
+ last year of cism history output identical to a similar run set up
+ using old version; cism log files also essentially identical
+
+ - Similar to the above, but with annual time step & annual
+ coupling interval. This was set up by (1) averaging the 10 years
+ of cpl history files into annual-average files; (2) explicitly
+ setting atm_cpl_dt to 31536000, and the same for other components,
+ in cpl.buildnml.csh; (3) changing dt_option from steps_per_day to
+ steps_per_year in cism.buildnml.csh. Last year of cism history
+ output identical to the daily run; cism log files also show
+ correct time stepping
+
+ - exact restart, daily time step: similar to daily time step run
+ above, but two 5-year runs; checked last year of history file
+ against the above 10-year run
+
+ - exact restart, annual time step: similar to annual time step run
+ above, but two 5-year runs; checked last year of history file
+ against the above 10-year run
+
+================================================================================
+Originator: sacks
+Date: Oct 26, 2011
+Model: cism
+Version: cism1_111026
+One-line summary: change ntem default to 1000000. for gland5UM
+
+This turns off ice temperature evolution (I think) -- at least, as long as
+you are running for less than 1000000 years. This change should have been
+made at the same time as the creation of ice.config.gland5UM, when
+evolution was (correctly) set to -1.
+
+M input_templates/ice.config.gland5UM
+
+pretag testing: NONE
+
+================================================================================
+Originator: jwolfe
+Date:
+Model: cism
+Version: cism1_111007
+One-line summary: add support for new GLC_GRID gland5UM
+
+M bld/cism.cpl7.template
+A input_templates/ice.config.gland5UM
+
+================================================================================
+Originator: sacks, lipscomb
+Date: Oct 4, 2011
+Model: cism
+Version: cism1_111004
+One-line summary: allow evolution=-1; reformat glimmer_ncio.F90 to satisfy
+ some compilers
+
+* Allow evolution=-1 -- i.e., no thickness evolution
+M source_glimmer-cism/glide.F90
+M source_glimmer-cism/glide_setup.F90
+
+* Reformat to satisfy some compilers
+M source_glimmer-cism/glimmer_ncio.F90
+
+pretag testing:
+ - 10-year TG compsets before and after these modifications
+ (evolution=0 in both cases), using a single year of forcing from a
+ f19_g16 IG compset: last year of cism history output identical
+
+ - 10-year TG compset with evolution=-1, compared with the above
+ evolution=0 case: last year of cism history output differs, as
+ expected (but didn't evaluate whether the differences make sense)
+
+================================================================================
+Originator: jwolfe, lipscomb
+Date: Mon Apr 18, 2011
+Model: cism
+Version: cism1_110418
+One-line summary: changes to make restarts completely functional for branch
+ and hybrid runs, using the glimmer-cism native hotstart
+ capability
+
+M bld/cism.cpl7.template
+M input_templates/ice.config.gland10
+M input_templates/ice.config.gland20
+M input_templates/ice.config.gland5
+
+================================================================================
+Originator: jwolfe, lipscomb
+Date: Mon Mar 07, 2011
+Model: cism
+Version: cism1_110307
+One-line summary: modify input templates to remove setting native glimmer-cism
+ history output files, which interferes with creating CESM-style
+ history output
+
+M input_templates/ice.config.gland10
+M input_templates/ice.config.gland20
+M input_templates/ice.config.gland5
+
+pretag testing: Results are bfb relative to cesm1_0_beta15 for ERI.f19_f19.FG.bluefire
+
+================================================================================
+Originator: jwolfe, lipscomb
+Date: Sun Feb 20, 2011
+Model: cism
+Version: cism1_110220
+One-line summary: reworked the way CISM handles history files, to more closely
+ follow CESM naming conventions and alarms
+
+M bld/cism.cpl7.template
+M source_glc/glc_InitMod.F90
+M source_glc/glc_io.F90
+M drivers/cpl_mct/glc_comp_mct.F90
+
+pretag testing: Results are bfb relative to cesm1_0_beta15 for ERI.f19_f19.FG.bluefire
+
+================================================================================
+Originator: fischer
+Date: Tue Jan 25, 2011
+Model: cism
+Version: cism1_110125
+One-line summary: renamed glint_smb.F90 to smb_dummy.F90
+
+A source_glimmer-cism/smb_dummy.F90
+D source_glimmer-cism/glint_smb.F90
+
+================================================================================
+Originator: mvertens
+Date: Mon Jan 24, 2011
+Model: cism
+Version: cism1_110124
+One-line summary: removed use seq_flds_indices, now using local glc_cpl_indices_set
+
+A drivers/cpl_share
+A drivers/cpl_share/glc_cpl_indices.F90
+M drivers/cpl_mct/glc_comp_mct.F90
+
+pretag testing: Results are bfb relative to cesm1_0_beta14 for ERI.f19_f19.FG.bluefire
+
+================================================================================
+Originator: jwolfe, lipscomb
+Date: Sep 13, 2010
+Model: cism
+Version: cism1_100913
+One-line summary: add more capabilities, clean-up
+
+* pull vars.def files together along with template and python script, to
+ allow users to modify the IO list on a per case basis
+A bld/cismIO
+A bld/cismIO/glide_lithot_vars.def
+A bld/cismIO/README.cismIO
+A bld/cismIO/glint_mbal_vars.def
+A bld/cismIO/glint_vars.def
+A bld/cismIO/ncdf_template.F90.in
+A bld/cismIO/glide_vars.def
+A bld/cismIO/generate_ncvars.py
+M bld/cism.cpl7.template
+
+* make GLC_DEBUG a variable instead of an IFDEF
+M source_glimmer-cism/glide.F90
+M source_glimmer-cism/glint_timestep.F90
+M source_glimmer-cism/glint_climate.F90
+M source_glimmer-cism/glimmer_paramets.F90
+M source_glimmer-cism/glide_diagnostics.F90
+M source_glimmer-cism/glint_main.F90
+M source_glimmer-cism/glint_interp.F90
+M source_glimmer-cism/glint_type.F90
+
+* reworked restart to keep glc calls out of glimmer-cism
+M source_glc/glc_InitMod.F90
+M source_glc/glc_io.F90
+A source_glimmer-cism/glimmer_restart_gcm.F90
+M source_glimmer-cism/glint_initialise.F90
+M source_glimmer-cism/glimmer_config.F90
+
+* fixed configuration files to make other resolutions work correctly
+M input_templates/ice.config.gland10
+M input_templates/ice.config.gland20
+M input_templates/ice.config.gland5
+
+================================================================================
+Originator: jwolfe, lipscomb
+Date: Jun 17, 2010
+Model: cism
+Version: cism1_100617a
+One-line summary: keep use statement for shr_file_mod (temporarily)
+
+M source_glimmer-cism/glimmer_config.F90
+
+================================================================================
+Originator: jwolfe, lipscomb
+Date: Jun 17, 2010
+Model: cism
+Version: cism1_100617
+One-line summary: pick up mods to glc_comp from Bill L's sandbox
+
+M drivers/cpl_mct/glc_comp_mct.F90
+
+================================================================================
+Originator: jwolfe, lipscomb
+Date: Jun 16, 2010
+Model: cism
+Version: cism1_100616a
+One-line summary: change Filepath to refer to new drivers directory
+
+M bld/cism.cpl7.template
+
+================================================================================
+Originator: jwolfe, lipscomb
+Date: Jun 16, 2010
+Model: cism
+Version: cism1_100616
+One-line summary: rearrange driver code for future esmf support, clean-up
+
+* rearrange driver code for future esmf support
+D source_glc/glc_comp_mct.F90
+A drivers
+A drivers/cpl_mct
+A drivers/cpl_mct/glc_comp_mct.F90
+A drivers/cpl_esmf
+
+* remove unused code
+D source_glc/glc_domain_size.F90
+D source_glimmer-cism/glimmer_restart_statarr.F90
+D source_glimmer-cism/glimmer_restart_pointarr.F90
+D source_glimmer-cism/glimmer_restart_common.F90
+D source_glimmer-cism/glimmer_restart.F90
+D source_glimmer-cism/glimmer_restart_statscal.F90
+
+* fixed input template files for different resolutions
+M input_templates/ice.config.gland10
+M input_templates/ice.config.gland20
+M input_templates/ice.config.gland5
+
+* enhanced I/O to fix restart issue
+M source_glimmer-cism/glint_io.F90
+M source_glimmer-cism/glide_lithot_io.F90
+M source_glimmer-cism/glint_mbal_io.F90
+M source_glimmer-cism/glide_io.F90
+
+* clean-up and fix documentation
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+M source_glc/glc_global_grid.F90
+M source_glc/glc_io.F90
+M source_glc/glc_constants.F90
+M source_glc/glc_global_fields.F90
+M source_glc/glc_FinalMod.F90
+M source_glc/glc_time_management.F90
+M source_glimmer-cism/glide.F90
+M source_glimmer-cism/glimmer_scales.F90
+M source_glimmer-cism/glint_timestep.F90
+M source_glimmer-cism/glide_thck.F90
+M source_glimmer-cism/isostasy_setup.F90
+M source_glimmer-cism/isostasy_el.F90
+M source_glimmer-cism/glide_nc_custom.F90
+M source_glimmer-cism/isostasy_types.F90
+M source_glimmer-cism/glimmer_ts.F90
+M source_glimmer-cism/glimmer_routing.F90
+M source_glimmer-cism/glimmer_searchcircle.F90
+M source_glimmer-cism/glimmer_daily_pdd.F90
+M source_glimmer-cism/xls.F90
+M source_glimmer-cism/glint_mbal.F90
+M source_glimmer-cism/glide_temp.F90
+M source_glimmer-cism/glint_climate.F90
+M source_glimmer-cism/glimmer_utils.F90
+M source_glimmer-cism/glimmer_map_init.F90
+M source_glimmer-cism/glimmer_anomcouple.F90
+M source_glimmer-cism/glimmer_filenames.F90
+M source_glimmer-cism/glimmer_ncparams.F90
+M source_glimmer-cism/glimmer_writestats.F90
+M source_glimmer-cism/glint_initialise.F90
+M source_glimmer-cism/glimmer_paramets.F90
+M source_glimmer-cism/glimmer_vers.F90
+M source_glimmer-cism/glide_lithot1d.F90
+M source_glimmer-cism/glimmer_map_CFproj.F90
+M source_glimmer-cism/glide_mask.F90
+M source_glimmer-cism/glide_profile.F90
+M source_glimmer-cism/glide_setup.F90
+M source_glimmer-cism/glimmer_map_proj4.F90
+M source_glimmer-cism/glide_types.F90
+M source_glimmer-cism/glide_velo.F90
+M source_glimmer-cism/glimmer_global.F90
+M source_glimmer-cism/glimmer_map_types.F90
+M source_glimmer-cism/glimmer_deriv.F90
+M source_glimmer-cism/glimmer_ncdf.F90
+M source_glimmer-cism/glimmer_coordinates.F90
+M source_glimmer-cism/kelvin.F90
+M source_glimmer-cism/glide_stop.F90
+M source_glimmer-cism/ncdf_utils.F90
+M source_glimmer-cism/glint_example_clim.F90
+M source_glimmer-cism/glimmer_log.F90
+M source_glimmer-cism/glimmer_integrate.F90
+M source_glimmer-cism/glint_precip_param.F90
+M source_glimmer-cism/glint_global_grid.F90
+M source_glimmer-cism/glimmer_sparse.F90
+M source_glimmer-cism/glint_constants.F90
+M source_glimmer-cism/glide_diagnostics.F90
+M source_glimmer-cism/isostasy.F90
+M source_glimmer-cism/glint_global_interp.F90
+M source_glimmer-cism/glimmer_config.F90
+M source_glimmer-cism/glint_main.F90
+M source_glimmer-cism/glimmer_pdd.F90
+M source_glimmer-cism/glint_mpinterp.F90
+M source_glimmer-cism/glint_interp.F90
+M source_glimmer-cism/glide_lithot3d.F90
+M source_glimmer-cism/glimmer_physcon.F90
+M source_glimmer-cism/glimmer_map_trans.F90
+M source_glimmer-cism/profile.F90
+M source_glimmer-cism/glimmer_ncio.F90
+M source_glimmer-cism/glide_lithot.F90
+M source_glimmer-cism/glint_mbal_coupling.F90
+M source_glimmer-cism/glint_smb.F90
+M source_glimmer-cism/glint_type.F90
+
+================================================================================
+Originator: jwolfe
+Date: Jun 08, 2010
+Model: cism
+Version: cism1_100608
+One-line summary: Additional resolution support, fix calendar bug
+
+M bld/cism.cpl7.template
+M source_glc/glc_io.F90
+M source_glc/glc_time_management.F90
+
+================================================================================
+Originator: jwolfe
+Date: Jun 03, 2010
+Model: cism
+Version: cism1_100603
+One-line summary: Update cpl7 template to work correctly with hybrid and branch runs
+
+M bld/cism.cpl7.template
+
+================================================================================
+Originator: jwolfe
+Date: May 25, 2010
+Model: cism
+Version: cism1_100525a
+One-line summary: patch to new tag, deleting one file that no longer belongs
+
+D source_glc/glc_glint_interp.F90
+
+================================================================================
+Originator: jwolfe
+Date: May 25, 2010
+Model: cism
+Version: cism1_100525
+One-line summary: initial commit of cism1, which is more-or-less the same as
+ $SVN/glc/branch_tags/glimmer-cism-gcm_tags/glimmer-cism-gcm03_glc4_100301
+ except the var.def files have been removed from the bld as well
+ as the scripts to handle them. CSEG decided not to require
+ python, which one of the scripts uses, so the resulting _io.F90
+ files are included in source_glimmer-cism instead.
+
+A source_slap
+A source_slap/dmset.F
+A source_slap/dlaputil.F
+A source_slap/xersla.F
+A source_slap/dcgs.F
+A source_slap/blas.F
+A source_slap/dmvops.F
+A source_slap/mach.F
+D bld/namelist_definition_gglc.xml
+M bld/configure
+M bld/config_definition.xsl
+D bld/gglc.cpl7.template
+M bld/README
+A bld/namelist_definition_cism.xml
+D bld/namelist_defaults_gglc.xml
+A bld/cism.cpl7.template
+M bld/build-namelist
+M bld/config_definition.xml
+A bld/namelist_defaults_cism.xml
+M source_glc/glc_glint_interp.F90
+M source_glc/glc_comp_mct.F90
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+M source_glc/glc_global_grid.F90
+M source_glc/glc_io.F90
+M source_glc/glc_constants.F90
+M source_glc/glc_global_fields.F90
+M source_glc/glc_time_management.F90
+A source_glimmer-cism
+A source_glimmer-cism/glide.F90
+A source_glimmer-cism/glimmer_scales.F90
+A source_glimmer-cism/glint_timestep.F90
+A source_glimmer-cism/glide_thck.F90
+A source_glimmer-cism/glide_lithot_io.F90
+A source_glimmer-cism/isostasy_setup.F90
+A source_glimmer-cism/isostasy_el.F90
+A source_glimmer-cism/glide_nc_custom.F90
+A source_glimmer-cism/glint_mbal_io.F90
+A source_glimmer-cism/glimmer_restart_statarr.F90
+A source_glimmer-cism/isostasy_types.F90
+A source_glimmer-cism/glimmer_ts.F90
+A source_glimmer-cism/glimmer_routing.F90
+A source_glimmer-cism/glimmer_searchcircle.F90
+A source_glimmer-cism/glimmer_daily_pdd.F90
+A source_glimmer-cism/xls.F90
+A source_glimmer-cism/glint_mbal.F90
+A source_glimmer-cism/glide_temp.F90
+A source_glimmer-cism/glint_climate.F90
+A source_glimmer-cism/glimmer_utils.F90
+A source_glimmer-cism/glimmer_map_init.F90
+A source_glimmer-cism/glimmer_anomcouple.F90
+A source_glimmer-cism/glimmer_restart_pointarr.F90
+A source_glimmer-cism/glimmer_filenames.F90
+A source_glimmer-cism/glimmer_ncparams.F90
+A source_glimmer-cism/glimmer_writestats.F90
+A source_glimmer-cism/glint_initialise.F90
+A source_glimmer-cism/glimmer_paramets.F90
+A source_glimmer-cism/glimmer_vers.F90
+A source_glimmer-cism/glide_lithot1d.F90
+A source_glimmer-cism/glimmer_map_CFproj.F90
+A source_glimmer-cism/glide_mask.F90
+A source_glimmer-cism/glint_io.F90
+A source_glimmer-cism/glide_profile.F90
+A source_glimmer-cism/glide_setup.F90
+A source_glimmer-cism/glimmer_map_proj4.F90
+A source_glimmer-cism/glimmer_restart_common.F90
+A source_glimmer-cism/glide_types.F90
+A source_glimmer-cism/glide_velo.F90
+A source_glimmer-cism/glimmer_global.F90
+A source_glimmer-cism/glimmer_map_types.F90
+A source_glimmer-cism/glimmer_deriv.F90
+A source_glimmer-cism/glimmer_coordinates.F90
+A source_glimmer-cism/glimmer_ncdf.F90
+A source_glimmer-cism/kelvin.F90
+A source_glimmer-cism/glide_stop.F90
+A source_glimmer-cism/ncdf_utils.F90
+A source_glimmer-cism/glint_example_clim.F90
+A source_glimmer-cism/glimmer_log.F90
+A source_glimmer-cism/glimmer_integrate.F90
+A source_glimmer-cism/glint_precip_param.F90
+A source_glimmer-cism/glimmer_restart.F90
+A source_glimmer-cism/glint_global_grid.F90
+A source_glimmer-cism/glimmer_sparse.F90
+A source_glimmer-cism/glint_constants.F90
+A source_glimmer-cism/glide_diagnostics.F90
+A source_glimmer-cism/isostasy.F90
+A source_glimmer-cism/glint_global_interp.F90
+A source_glimmer-cism/glimmer_config.F90
+A source_glimmer-cism/glint_main.F90
+A source_glimmer-cism/glimmer_pdd.F90
+A source_glimmer-cism/glint_mpinterp.F90
+A source_glimmer-cism/glint_interp.F90
+A source_glimmer-cism/glide_lithot3d.F90
+A source_glimmer-cism/glimmer_restart_statscal.F90
+A source_glimmer-cism/glimmer_physcon.F90
+A source_glimmer-cism/profile.F90
+A source_glimmer-cism/glimmer_map_trans.F90
+A source_glimmer-cism/glimmer_ncio.F90
+A source_glimmer-cism/glide_lithot.F90
+A source_glimmer-cism/glint_mbal_coupling.F90
+A source_glimmer-cism/glide_io.F90
+A source_glimmer-cism/glint_type.F90
+A source_glimmer-cism/glint_smb.F90
+M ChangeLog
+D source_glimmer
+A input_templates/ice.config.gland10
+M input_templates/ice.config.gland20
+A input_templates/ice.config.gland5
+M README
+
+================================================================================
+Originator: jwolfe
+Date: May 07, 2010
+Model: glc
+Version: glc4_100507
+One-line summary: bug fixes and clean up
+
+* replace hard-wired unit numbers with calls to shr_file_getunit
+M source_glimmer/glimmer_ts.F90
+M source_glimmer/glimmer_config.F90
+
+* clean up unused files
+D input_templates/glc_in.jw
+D input_templates/glc_in
+
+* rename "ice.config" file to "cism.config"
+M bld/gglc.cpl7.template
+M bld/namelist_defaults_gglc.xml
+
+================================================================================
+Originator: jwolfe
+Date: Mar 30, 2010
+Model: glc
+Version: glc4_100330
+One-line summary: CCSMize output filenames, add tools for creating overlap files,
+ bug fix
+
+* CCSMize output filenames
+A source_glc/glc_io.F90
+M bld/gglc.cpl7.template
+M input_templates/ice.config.gland20
+M source_glimmer/glint_initialise.F90
+
+* Add tools for creating overlap files
+A tools
+A tools/README.glc_tools
+A tools/README.glc_overlap_tools
+A tools/glc2scripConvert.ncl
+A tools/scrip_make_wgts_CCSM_to_GLC_bilin.csh
+A tools/scrip2CLMoverlap.ncl
+
+* Fix bug that closed log file before the end of output
+M source_glimmer/glint_main.F90
+
+================================================================================
+Originator: jwolfe
+Date: Mar 16, 2010
+Model: glc
+Version: glc4_100316
+One-line summary: Support for fracdata files for downscaling
+
+* Changed output for gglc.buildnml.csh to point at fracdata file
+M bld/gglc.cpl7.template
+
+* The following files were changed Bill Lipscomb to support the use of fracdata
+ files for downscaling:
+M source_glc/glc_InitMod.F90
+M source_glc/glc_comp_mct.F90
+M source_glc/glc_constants.F90
+M source_glc/glc_global_grid.F90
+M source_glc/glc_glint_interp.F90
+M source_glimmer/glint_global_grid.F90
+M source_glimmer/glint_interp.F90
+M source_glimmer/glint_main.F90
+M source_glimmer/glint_mbal_coupling.F90
+M source_glimmer/glint_timestep.F90
+
+================================================================================
+Originator: jwolfe
+Date: Mar 01, 2010
+Model: glc
+Version: glc4_100301
+One-line summary: Set test cell information in code based on global grid size,
+ instead of set as parameters in glc_constants (caused a
+ problem with T31 grid)
+
+M source_glc/glc_global_grid.F90 ----- Added test cell settings based on
+ global grid size
+M source_glc/glc_constants.F90 ------- Removed parameter attribute from
+ test cell constants
+
+================================================================================
+Originator: erik
+Date: Nov 03, 2009
+Model: glc
+Version: glc4_091103
+One-line summary: Point to checked in version of topo files, add documentation,
+ add config files to buildnml script so can change, start adding
+ xml support.
+
+M bld/gglc.cpl7.template -------------- Point to checked in version of topo files
+ Add documentation and put config file in
+ the buildnml script so can be changed by
+ the user. Start adding flexibility for
+ support for different GLC grids.
+
+A bld/README -------------------------- Document files in the build directory
+A bld/namelist_defaults_gglc.xml ------ GGLC specific namelist defaults
+A bld/namelist_definition_gglc.xml ---- GGLC specific namelist defintion
+A bld/namelist_defaults_overall.xml --- Overall namelist defaults
+A bld/namelist_definition_overall.xml - Overall namelist defintion
+A bld/namelist_defaults.xsl ----------- Style sheet to display defaults as html
+A bld/namelist_definition.xsl --------- Style sheet to display definition as html
+
+Some scripts starting to add that do NOT work yet.
+
+A bld/build-namelist ----------------- Start adding a build-namelist (not working yet)
+A bld/configure ---------------------- Start adding a configure (not working yet)
+A bld/configure_definition.xml ------- Definition of configurations
+A bld/configure_definition.xsl ------- Style sheet to display definition as html
+
+================================================================================
+Originator: lipscomb
+Date: Oct 27, 2009
+Model: glc
+Version: glc4_091027
+One-line summary: Add glimmer log file and add greenland 20km config file
+
+ M source_glc/glc_InitMod.F90 ---------- Change glimmer log file
+ M source_glimmer/glimmer_log.F90 ------ Add subroutine to change glimmer log
+ M input_templates/ice.config.gland20 -- Add greenland 20km config file
+
+================================================================================
+Originator: erik
+Date: Aug 14, 2009
+Model: glc
+Version: glc4_090814
+One-line summary: Template changes to allow multiple grid files
+
+Set up grid files for 48x96, 1.9x.25, and 0.9x1.25 resolutions. Abort
+if it isn't one of those. Also remove the coupled_nml namelist as not needed,
+and set the stop_option to never, since, the coupler will tell it to stop.
+
+M bld/gglc.cpl7.template
+
+================================================================================
+Originator: lipscomb
+Date: Aug 13, 2009
+Model: glc
+Version: glc4_090813
+One-line summary: Latest changes from Bill Lipscomb
+
+Mostly changes to get time-management working correctly. Also
+make sure proper mask of r8 for some variables. Update some of
+the documentation and formatting.
+
+M README
+M source_glc/glc_glint.F90
+M source_glc/glc_comp_mct.F90
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+M source_glc/glc_global_grid.F90
+M source_glc/glc_FinalMod.F90
+M source_glc/glc_time_management.F90
+M source_glimmer/glide.F90
+M source_glimmer/glint_initialise.F90
+M source_glimmer/glimmer_ncio.F90
+
+================================================================================
+Originator: tcraig
+Date: Fri Jan 16, 2009
+Model: glc
+Version: glc4_090116
+One-line summary: Upgrade from ccsm3 to ccsm4 coupling
+
+- Try not to touch any glimmer code
+- Delete unusued code and remove use of unused code from used code as needed
+- Add bld and gglc.cpl7.template
+- Delete glc.F90 and add glc_comp_mct.F90 for migration from ccsm3 to ccsm4
+- Update the stdout, stderr, and nml unit numbers to be set from shr_file_getunit,
+ eliminate use of local get_unit in favor of shr_file_getunit in glc
+- Migrate all use of unit 6 to stdout in all source_glc code. includes write,
+ print, and flush statements. this was not done to glimmer.
+- Add a shr_sys_abort to source_glimmer/glimmer_log.F90 before the stop to
+ prevent ccsm4 hang from glimmer abort. (there are probably more calls to
+ stop that need to be fixed in the code).
+- Get rid of use and reference to ccsm3 coupling code
+- Update initialization of glc communicator from ccsm4 driver
+
+Other potential issues still not addressed
+- other unit numbers hardwired in glc/glimmer
+- use of stop, proper aborts
+- reference to gland20.input.nc file from ice.config input file
+- hardwired glc_nec = 10
+- hardwired T31 resolution
+- glimmer mostly writes to unit 6
+- restart flag captured in coupling interface but not used
+
+A bld
+A bld/gglc.cpl7.template
+D mpi/glc_gather_scatter.F90
+D mpi/glc_global_reductions.F90
+D mpi/glc_broadcast.F90
+D mpi/glc_boundary.F90
+D mpi/glc_timers.F90
+M mpi/glc_communicate.F90
+A source_glc/glc_comp_mct.F90
+D source_glc/glc_fileunits.F90
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+M source_glc/glc_global_grid.F90
+D source_glc/glc_domain.F90
+M source_glc/glc_exit_mod.F90
+M source_glc/glc_constants.F90
+M source_glc/glc_glint.F90
+D source_glc/glc_blocks.F90
+D source_glc/glc_distribution.F90
+D source_glc/glc.F90
+M source_glc/glc_FinalMod.F90
+M source_glc/glc_ErrorMod.F90
+M source_glc/glc_time_management.F90
+D source_glc/glc_coupled.F90
+D source_glc/glc_io_types.F90
+M source_glimmer/glimmer_log.F90
+
+================================================================================
+Originator: tcraig
+Date: Oct 17, 2009
+Model: glc
+Version: glc_081017
+One-line summary: Port to bluefire, update usability
+
+M input_templates/glc_in
+ update input file paths to bluefire inputdata area
+
+================================================================================
+Originator:
+Date:
+Model: glc
+Version: glc_080817
+One-line summary:
+Changes made:
+
+================================================================================
+
+Originator: jwolfe
+Date: Mon Nov 5 13:39:33 MST 2007
+Model: glc
+Version: glc_071105
+One-line summary: update to include all of Bll Lipscomb's new changes
+Changes made:
+
+? source_glc/glc_glint.F90
+? source_glc/glc_global_fields.F90
+? source_glc/glc_coupled.F90
+? source_glc/glc_global_grid.F90
+? source_glc/POP_files/grid.F90
+M source_glc/glc_domain_size.F90
+M source_glc/glc_InitMod.F90
+M source_glc/glc_RunMod.F90
+M source_glc/glc_domain.F90
+M source_glc/glc_io.F90
+M source_glc/glc_kinds_mod.F90
+M source_glc/glc_constants.F90
+M source_glc/glc.F90
+M source_glc/glc_FinalMod.F90
+M source_glc/glc_ErrorMod.F90
+M source_glc/glc_IOUnitsMod.F90
+M source_glc/glc_time_management.F90
+M source_glimmer/glint_timestep.F90
+M source_glimmer/glint_initialise.F90
+M source_glimmer/glint_example_clim.F90
+M source_glimmer/glint_main.F90
+M source_glimmer/glint_interp.F90
+M input_templates/glc_in
+
+================================================================================
diff --git a/components/cism/ChangeLog_template b/components/cism/ChangeLog_template
new file mode 100644
index 0000000000..42b0273337
--- /dev/null
+++ b/components/cism/ChangeLog_template
@@ -0,0 +1,23 @@
+================================================================================
+Originator:
+Date:
+Model:
+Version:
+One-line summary:
+
+Purpose of changes:
+
+Changes answers relative to previous tag:
+
+Bugs fixed (include bugzilla ID) (http://bugs.cgd.ucar.edu/):
+
+List all modified files, and describe the changes:
+
+Summary of testing:
+
+Externals used for testing:
+
+cism tag used for baseline comparisons:
+
+Any other externals that differed in baseline:
+
diff --git a/components/cism/README.parallelization b/components/cism/README.parallelization
new file mode 100644
index 0000000000..43a918ee67
--- /dev/null
+++ b/components/cism/README.parallelization
@@ -0,0 +1,22 @@
+------------------------------------------------------------------------
+Notes on parallelization of glc
+
+ Bill Sacks
+ Jan 18, 2013
+------------------------------------------------------------------------
+
+(See also glimmer-cism/libglint/README.parallelization.)
+
+The master task is responsible for the full global (i.e., land) grid;
+other tasks have 0-size grids.
+
+In general, grid-related variables are still allocated / initialized
+on other tasks, but with size 0.
+
+Some places where the assumption appears that only the master task has
+points are (this may not be a complete list):
+- glc_global_grid : read_horiz_grid
+- glc_comp_mct : glc_SetgsMap_mct
+- glc_comp_esmf : glc_DistGrid_esmf (similar to glc_comp_mct :
+ glc_SetgsMap_mct)
+
diff --git a/components/cism/SVN_EXTERNAL_DIRECTORIES b/components/cism/SVN_EXTERNAL_DIRECTORIES
new file mode 100644
index 0000000000..022ffa8d2a
--- /dev/null
+++ b/components/cism/SVN_EXTERNAL_DIRECTORIES
@@ -0,0 +1 @@
+glimmer-cism https://github.com/CESM-Development/cism/tags/move_glint_to_cpl_n02
diff --git a/components/cism/bld/README b/components/cism/bld/README
new file mode 100644
index 0000000000..118ef613f2
--- /dev/null
+++ b/components/cism/bld/README
@@ -0,0 +1,23 @@
+Scripts and datasets to create input text files to run the model with CESM
+
+ cism.buildexe ----------------- Script to build cism
+ (called by $CASE.build script)
+ cism.buildnml ----------------- Wrapper to build-namelist, doing some initial setup, etc.
+ (called by $CASEROOT/cesm_setup and $CASEROOT/preview_namelists)
+ cism.build_usernl -------------- Script to create user_nl_cism(_nnnn) files in $CASEROOT
+ (called by $CASEROOT/cesm_setup)
+ cism.template ------------- Script to copy necessary files from CODEROOT to the CASE directory
+ (called by $CASEROOT/cesm_setup)
+ build-namelist ----------------- Script to build the namelists needed by cism
+ (called cism.buildnml)
+ user_nl_cism ------------------- File in $CASEROOT into which users can put namelist modifications
+ (used by build-namelist)
+ trilinosOptions ---------------- Directory containing resolution-dependent trilinosOptions.xml files
+ (used when running with trilinos solver)
+
+XML namelist description files, in namelist_files:
+
+ namelist_definition_cism.xml ------ Definition of all CISM namelist items
+ (also used by clm to build CISM namelist)
+ namelist_defaults_cism.xml -------- Default values to use in CISM namelists
+ (also used by clm to build CISM namelist)
diff --git a/components/cism/bld/README.build-namelist b/components/cism/bld/README.build-namelist
new file mode 100644
index 0000000000..e8cfff212a
--- /dev/null
+++ b/components/cism/bld/README.build-namelist
@@ -0,0 +1,660 @@
+============================================================================
+Synopsis
+============================================================================
+
+SYNOPSIS
+ build-namelist [options]
+OPTIONS
+ -infile "filepath" Specify a file containing namelists to read values from.
+ -namelist "namelist" Specify namelist settings directly on the commandline by supplying
+ a string containing FORTRAN namelist syntax, e.g.,
+ -namelist "&cism_nml dt=1800 /"
+ -help [or -h] Print usage to STDOUT.
+ -silent [-s] Turns on silent mode - only fatal messages issued.
+ -verbose Turn on verbose echoing of informational messages.
+ -caseroot CASEROOT directory variable
+ -scriptsroot SCRIPTSROOT directory variable
+ -inst_string INST_STRING variable
+ -lnd_grid LND_GRID variable
+ -glc_grid GLC_GRID variable
+ -cism_phys CISM_PHYS variable
+
+The precedence for setting the values of namelist variables is (highest to
+lowest):
+ 1. namelist values set by specific command-line options, e.g., paramfile
+ 2. values set on the command-line using the -namelist option
+ 3. values read from the file specified by -infile
+ 4. values from the namelist defaults file or values specifically set in
+ build-namelist
+
+
+============================================================================
+Summary of build-namelist
+============================================================================
+
+build-namelist
+
+ - exists in $CCSMROOT/components/cism/bld (throughout this document,
+ $ALLCAPS denotes an xml variable while $nocaps denotes a perl variable)
+
+ - is called from $CASEBUILD/cism.buildnml
+
+ (cism.buildnml is now just a wrapper to build-namelist)
+
+ - allows the user to edit existing namlist variables or introduce new
+ variables if that is desired
+ (see "user_nl_cism" and "CISM Use Cases" sections below for details)
+
+ - depends on two files in $CCSMROOT/components/cism/bld/namelist_files
+
+ 1. namelist_defaults_cism.xml
+ 2. namelist_definition_cism.xml
+
+ (see "namelist_definition_cism.xml" and "namelist defaults.xml" sections
+ below for details)
+
+ - is invoked upon every build -AND- upon every call to cism.buildnml
+
+
+============================================================================
+user_nl_cism
+============================================================================
+
+ALL USER-SPECIFIED MODIFICATIONS TO THE CISM NAMELIST AND CISM.CONFIG FILE
+SHOULD OCCUR AS ENTRIES IN $CASEROOT/user_nl_cism. Simply append each
+variable entry to user_nl_cism prior to running build-namelist. Note that
+there is no distinction in user_nl_cism between variables that will appear
+in cism_in and those that will appear in cism.config: simply add a new
+variable setting in user_nl_cism, and it will be added to the appropriate
+place in cism_in or cism.config.
+
+For example, to set the value of cism_debug to .true. and basal_tract to the
+array (/1,2,3,4,5/), include the following in user_nl_cism:
+
+ cism_debug = .true.
+ basal_tract = 1 2 3 4 5
+
+After running build-namelist, the following will appear in cism_in:
+
+ &cism_params
+ ...
+ cism_debug = .true.
+ ...
+ /
+
+and the following will appear in cism.config:
+
+ [parameters]
+ basal_tract = 1 2 3 4 5
+ ...
+
+
+A new utility in $CASEROOT, preview-namelist, will enable you to preview the
+cism_in namelist and cism.config file in $CASEROOT/CaseDocs at any time
+
+
+============================================================================
+namelist_definition_cism.xml
+============================================================================
+
+The file namelist_definition_cism.xml is located in the directory
+$CCSMROOT/components/cism/bld/namelist_files/. It contains entries for all namelist
+variables that can be output by build-namelist.
+
+As mentioned in the "CISM Use Cases" section below, a modified copy of this
+file (with the same name) may be placed in the directory
+$CASEROOT/SourceMods/src.cism/. Otherwise the namelist definition file
+appears in build-namelist as follows:
+
+ $nl_definition_file = \
+ "$cfgdir/namelist_files/namelist_definition_cism.xml";
+
+Each namelist variable is defined in an element. The content of the
+element is the documentation of how the variable is used. Other aspects of
+the variable's definition are expressed as attributes of the
+element. Note that it is an XML requirement that the attribute values are
+enclosed in quotes. The attributes are:
+
+ 1. id
+ The variable's name. Although Fortran is case insensitive, the name
+ MUST BE LOWER CASE for the perl scripts.
+
+ 2. type
+ An abbreviation of the Fortran declaration for the variable. Valid
+ declarations are:
+
+ char*n
+ integer
+ logical
+ real
+
+ Any of these types may be followed by a comma separated list of
+ integers enclosed in parenthesis to indicate an array. The current
+ namelist validation code only distinguishes between string and
+ non-string types.
+
+ All namelist values are stored in exactly the format that is required
+ in a valid namelist, so if that value is a string then the quotes are
+ stored as part of the value.
+
+ 3. category
+ A category assigned for organizing the documentation.
+
+ 4. group
+ The name of the namelist (or group) that the variable is declared
+ in. Some groups will appear in cism_in; others (by convention, those
+ whose names begin with 'cism_config') will appear in cism.config.
+
+ 5. valid_values (optional)
+ This attribute is mainly useful for variables that have only a small
+ number of allowed values; an empty string denotes no restrictions,
+ as does omitting the valid_values attribute entirely.
+
+ 6. input_pathname (optional)
+ Only include this attribute to indicate that the variable contains the
+ pathname of an input dataset that resides in the CESM inputdata
+ directory tree. The recognized values are "abs" to indicate that an
+ absolute pathname is required or "rel:var_name" to indicate that the
+ pathname is relative and that the namelist variable "var_name" contains
+ the absolute root directory.
+
+The following is an example entry for the dt_option variable:
+
+
+ time-step units
+
+
+Any text that appears after the first > (after valid_values) and before the
+ string is used for documentation purposes only. In the example
+above, "time-step units" is ignored by build-namelist.
+
+
+============================================================================
+namelist_defaults_cism.xml
+============================================================================
+
+The file namelist_defaults_cism.xml is located in the directory
+$CCSMROOT/components/cism/bld/namelist_files/. It provides default values for
+variables contained in the input namelist definition file.
+
+The build-namelist script is passed the glc_grid, lnd_grid and cism_phys
+attributes as command-line arguments. These attributes, along with optional
+user-specified attributes, are used in build-namelist to find the best match
+when looking for default values of variables.
+
+In build-namelist the namelist defaults file appears as follows
+
+ $nl_defaults_file = "$cfgdir/namelist_files/namelist_defaults_cism.xml";
+
+The default namelist value for a given namelist variable is the one that
+matches the most attributes; if multiple values match the same number of
+attributes then the first value encountered will be chosen. For example,
+consider the namelist variable ewn. Its entry in the defaults file is
+
+ 151
+ 76
+ 301
+ 301
+
+The default value of ewn therefore depends on the value of glc_grid.
+
+Not all namelist items have defaults specified in
+namelist_defaults_cism.xml. Those that don't have a default value there have
+their default value set in build-namelist. This applies, for example, to
+namelist items whose default value is derived from other variables (e.g.,
+'runid' is taken from the case name, and so does not have a default in
+namelist_defaults_cism.xml).
+
+
+============================================================================
+build-namelist details
+============================================================================
+
+--- Overview of four main perl objects ---
+
+build-namelist has four perl objects that it uses
+
+ 1. $cfg
+ A configuration object obtained from the CISM config_cache.xml
+ file. This specifies the glc and land grid resolutions.
+
+ my $cfg = Build::Config->new('config_cache.xml');
+
+ 2. $definition
+ A namelist definition object which provides a method for verifying that
+ the output namelist variables are in the definition file and are output
+ in the correct namelist groups.
+
+ my $definition = Build::NamelistDefinition->new($nl_definition_file);
+
+ 3. $defaults
+ A namelist defaults object which provides default values for variables
+ contained in the namelist_definition_cism.xml file.
+
+ my $defaults = Build::NamelistDefaults->new($nl_defaults_file, $cfg);
+
+ Note that both $nl_defaults_file and $cfg are passed - this is why the
+ glc_grid, lnd_grid and cism_phys attributes do not need to be passed to
+ add_defaults() (see "Examples" subsection below)
+
+ 4. $nl
+ An empty namelist object which contains the model namelist values
+ (where the values are determined by the order of precedence outlined
+ in the "Synopsis" section above)
+
+ my $nl = Build::Namelist->new();
+
+--- Required $SCRIPTSROOT/../perl5lib perl files ---
+
+The root directory for the perl5 required utilities is
+
+ my $perl5lib_dir = ${SCRIPTSROOT}/../perl5lib";
+
+This directory contains all the required perl files:
+
+ 1. The Build::Config module provides utilities to access the configuration
+ information in the config_cache.xml file
+
+ $perl5lib_dir/Build/Config.pm
+
+ 2. The Build::NamelistDefinition module provides utilities to validate
+ that the output namelists are consistent with the namelist definition
+ file
+
+ $perl5lib_dir/Build/NamelistDefinition.pm
+
+ 3. The Build::NamelistDefaults module provides a utility to obtain default
+ values of namelist variables based on finding a best fit with the
+ attributes specified in the defaults file.
+
+ $perl5lib_dir/Build/NamelistDefaults.pm
+
+ 4. The Build::Namelist module provides utilities to parse input namelists,
+ to query and modify namelists, and to write output namelists.
+
+ $perl5lib_dir/Build/Namelist.pm
+
+--- Creation of $nl ---
+
+Additions to the namelist object, $nl, are made via calls to the
+build-namelist method add_default(), which adds a value for the specified
+variable to the specified namelist object. This method checks the
+definition file and adds the variable to the correct namelist group. The
+value can be provided by using the optional argument key 'val' in the
+calling list, otherwise a default value is obtained from the namelist
+defaults object. If no default value is found this method throws an
+exception unless the 'nofail' option is set to 1 (true).
+
+Additional optional keyword=>value pairs may be specified. If the keyword
+'val' is not present, then any other keyword=>value pairs that are specified
+will be used to match attributes in the defaults file.
+
+The variables already in the object have the higher precedence, so if the
+specified variable is already defined in the object it does not get
+overwritten.
+
+In some cases, a namelist variable only appears in the namelist if its value
+is given by the user in user_nl_cism. For these variables, the default value
+is given in the code rather than in namelist_defaults_cism.xml. This is
+achieved by NOT putting an add_default call for this variable in
+build-namelist.
+
+--- Examples ---
+
+ 1. Use the default value for namelist variable cism_debug
+
+ build-namelist:
+
+ add_default($nl, 'cism_debug');
+
+ namelist_defaults_cism.xml:
+
+ .false.
+
+ namelist_definitions_cism.xml:
+
+
+ Default: false
+
+
+ result in cism_in:
+
+ &cism_params
+ ...
+ cism_debug = .true.
+ ...
+ /
+
+
+ 2. Set the value for the namelist variable ewn, which depends on the value
+ of "glc_grid" in the config_cache.xml file. Note that the value of
+ "glc_grid" does not need to be explicitly passed.
+
+ build-namelist:
+
+ add_default($nl, 'ewn');
+
+ namelist_defaults_cism.xml:
+
+ 151
+ 76
+ 301
+ 301
+
+ namelist_definitions_cism.xml:
+
+
+
+
+ result in cism.config if glc_grid="gland5":
+
+ [grid]
+ ...
+ ewn = 301
+ ...
+
+ result in cism.config if glc_grid="gland10":
+
+ [grid]
+ ...
+ ewn = 151
+ ...
+
+
+ 3. Set the value for the namelist variable runid to $CASE
+
+ build-namelist:
+
+ add_default($nl, 'runid', 'val'=>"$CASE");
+
+ namelist_defaults_cism.xml:
+
+ The contents of namelist defaults does not matter, since a value is
+ specified in build-namelist.
+
+ namelist_definitions_cism.xml:
+
+
+ Simulation identifier (ie case name)
+
+
+ result in cism_in if $CASE="mycase":
+
+ &time_manager_nml
+ ...
+ runid = 'mycase'
+ ...
+ /
+
+
+ 4a. Add a default for variable $var if an appropriate value is found,
+ otherwise do not include $var in the namelist
+
+ build-namelist:
+
+ add_default($nl, $var, 'nofail'=>1)
+
+ 4b. Set the value for the namelist variable $var, but do not prepend it
+ with a directory prefix.
+
+ build-namelist:
+
+ add_default($nl, $var, 'noprepend'=>1)
+
+ 5. Only include the namelist variable 'calving_fraction' in cism.config if a
+ value is given by the user in user_nl_cism; otherwise, use default value
+ given in the code
+
+ (No references to calving_fraction in build-namelist)
+
+============================================================================
+Handling multiple instances
+============================================================================
+
+When NINST_GLC > 1, there are multiple instances of CISM (i.e., an
+ensemble). Each instance has its own namelist, and its own cism.config file.
+
+In this case, there is no user_nl_cism. Instead, there is one such file for
+each instance: user_nl_cism_0001, user_nl_cism_0002, etc. User modifications
+in user_nl_cism_0001 will be put in cism_in_0001 or cism.config_0001, and
+similarly for user_nl_cism_0002, etc. If there are modifications to cism_in
+or cism.config that you want to make for all instances, you must put these
+modifications in ALL of the user_nl_cism files (user_nl_cism_0001,
+user_nl_cism_0002, etc.).
+
+Note that one of the namelist items in cism_in is 'paramfile', which gives
+the name of the cism.config file. For simplicity, this namelist item is
+specified by a command-line option from the script calling build-namelist,
+and cannot be overridden by users. In the single instance case, cism_in
+specifies a paramfile of 'cism.config'. In the multi-instance case,
+cism_in_0001 specifies a paramfile of 'cism.config_0001', cism_in_0002
+specifies a paramfile of 'cism.config_0002', etc.
+
+
+============================================================================
+CISM Use Cases
+============================================================================
+
+Q: How do I add my own case-specific namelist variable changes?
+
+A: For each namelist variable, just add a line of the form
+
+ namelist_var = namelist_val
+
+ to $CASEROOT/user_nl_cism. As shown in the "user_nl_cism" section above,
+ one example is to set cism_debug to .true. and basal_tract to the array
+ (/1,2,3,4,5/). The file $CASEROOT/user_nl_cism would then look as follows
+
+ ...
+ cism_debug = .true.
+ basal_tract = 1 2 3 4 5
+
+ All cism namelist variables, as well as entries in cism.config, can be
+ changed in this manner.
+
+----------------------------------------------------------------------------
+
+Q: Rather than making the same changes to user_nl_cism over and over, can
+ I change the default values used by build-namelist?
+
+A: Yes, you can modify the namelist_defaults_cism.xml file in
+ $CCSMROOT/components/cism/bld/namelist_files to change the default values. For
+ example:
+
+ 1. If you want to change geothermal from -5.e-2 to -6.e-2, you
+ would change
+
+ -5.e-2
+
+ to
+
+ -6.e-2
+
+ This would result in the following new default setting in cism.config:
+
+ [parameters]
+ ...
+ geothermal = -6.e-2
+ ...
+
+ 2. If you want to change ntem from 1. to 2. for runs using the gland5
+ grid, without affecting the value for any other grid, you would change
+
+ 1.
+
+ to
+
+ 2.
+
+ This would result in the following new default setting in cism.config:
+
+ [time]
+ ...
+ ntem = 2.
+ ...
+
+ for any runs with GLC_GRID=gland5, while not changing the value for
+ other grids.
+
+----------------------------------------------------------------------------
+
+Q: How do I add new cism namelist variables for just my case?
+
+A: Place a modified copy of namelist_definition_cism.xml (that includes your
+ new variables) in the $CASEROOT/SourceMods/src.cism directory. You do
+ not need to modify build-namelist or the defaults file, just set the
+ appropriate values for the new variables in $CASEROOT/user_nl_cism file.
+
+ For example, to add a variable the_answer to the cism.config parameters
+ and set it equal to 42, you would take the following steps:
+
+ 1. Copy $CODEROOT/glc/cism/bld/namelist_definition_cism.xml to
+ $CASEROOT/SourceMods/src.cism
+
+ 2. Add the following (it can be added anywhere as long as it isn't in a
+ comment, but for consistency put it in the "group: cism.config:
+ parameters" block):
+
+
+ The answer to the question of life, the universe and everything
+
+
+ 3. Add the following to $CASEROOT/user_nl_cism
+
+ the_answer=42
+
+ Note that you will also need to include a SourceMod to read in this
+ new variable, otherwise you will get a runtime error!
+
+----------------------------------------------------------------------------
+
+Q: How do I add a new cism namelist variable to the cism code base?
+
+A: Follow the instructions above, but rather than editing a copy in
+ SourceMods/src.cism, edit namelist_definition_cism.xml directly in
+ $CODEROOT/glc/cism/bld/namelist_files. You may also want to update
+ namelist_defaults_cism.xml and build-namelist so your new value can be
+ set automatically. Continuing the example above, we would add
+
+ 42
+
+ to namelist_defaults_cism.xml and
+
+ add_default($nl, 'the_answer');
+
+ to build-namelist. Again, note that this will build a namelist with the
+ new variable, but you will need to update the source code to read it!
+
+----------------------------------------------------------------------------
+
+Q: How do I introduce a new namelist variable that has dependencies on other
+ namelist variables?
+
+A: You can pass values through the add_default() function. As above, suppose
+ we want to introduce a new variable 'the_answer'. Let's also introduce a
+ new variable named 'is_binary'. To have gatekeeper_id depend on
+ is_binary, we do the following.
+
+ 1. Add is_binary and the_answer to namelist_definitions_cism.xml (for
+ this example, assume is_binary is of type logical)
+
+ 2. Add this dependency to the namelist_defaults_cism.xml file
+
+ 42
+ 101010
+
+ 3. Add the following lines to build-namelist (note that is_binary must
+ be set before the_answer)
+
+ add_default($nl, 'is_binary', 'val'=>".false.");
+ my $is_binary = $nl->get_value('is_binary');
+ add_default($nl, 'the_answer', 'is_binary'=>"$is_binary");
+
+
+ Note that strings can not have spaces in the XML; to remove spaces
+ from a Perl variable use the following prior to sending it through
+ add_default as a dependency
+
+ $varname =~ s/ //g;
+
+----------------------------------------------------------------------------
+
+Q: How do I add an optional section in the cism.config file? There are some
+ CISM settings that depend on the mere presence / absence of a certain
+ section.
+
+A: An existing example of this is the [GTHF] section. If this section is
+ present in the config file, then certain code is enabled, controlled by
+ the variables in this section. The following changes were needed to
+ create this optional section; this is also what you'll have to do to
+ create a new optional section:
+
+ 1. Added a do_gthf variable in
+ namelist_files/namelist_definition_cism.xml, with
+ group="cism_config_control":
+
+
+ Determines whether the GTHF (geothermal heat flux) section is output to the cism.config file.
+ Default: false
+
+
+ Also added a corresponding default value in
+ namelist_files/namelist_defaults_cism.xml:
+
+ .false.
+
+ 2. Added a new section in namelist_files/namelist_definition_cism.xml
+ that lists all of the possible variables that might appear in that
+ section. See the section with the heading "group: cism.config: GTHF
+ (geothermal heat flux)".
+
+ Note that the variables in this section do NOT appear in
+ namelist_defaults_cism.xml. Thus, if a user sets do_gthf to
+ .true. without specifying the values of any of these variables, they
+ will take their values from the hard-coded defaults in the cism code.
+
+ 3. Added some code in build-namelist that controls whether to print the
+ [GTHF] section in the cism.config file:
+
+ # Some code in cism keys off of whether the [GTHF] section is present
+ # (even if it's empty), thus we only want to add this section if it's
+ # really desired by the user
+ add_default($nl, 'do_gthf');
+ if ($nl->get_value('do_gthf') eq '.true.') {
+ print $fh "\n[GTHF]\n";
+ $nl->write_cism_config($fh, "cism_config_gthf");
+ }
+ else {
+ confirm_empty("cism_config_gthf", "items in gthf section can only be set if do_gthf is set to .true.");
+ }
+
+
+ Note that, if defaults were desired (specified in
+ namelist_defaults_cism.xml), add_default lines could be added in the
+ 'if' block of the above conditional, before the call to
+ write_cism_config.
+
+
diff --git a/components/cism/bld/build-namelist b/components/cism/bld/build-namelist
new file mode 100755
index 0000000000..b09d1a04ab
--- /dev/null
+++ b/components/cism/bld/build-namelist
@@ -0,0 +1,1041 @@
+#!/usr/bin/env perl
+#-----------------------------------------------------------------------------------------------
+#
+# build-namelist
+#
+# This script builds the namelists for the CISM configuration of CESM1.
+#
+# build-namelist uses a config_cache.xml file that current contains the glc grid information.
+# build-namelist reads this file to obtain information it needs to provide
+# default values that are consistent with the CISM library. For example, the grid resolution
+# is obtained from the cache file and used to determine appropriate defaults for namelist input
+# that is resolution dependent.
+#
+# The simplest use of build-namelist is to execute it from the build directory where configure
+# was run. By default it will use the config_cache.xml file that was written by configure to
+# determine the build time properties of the executable, and will write the files that contain
+# the output namelists in that same directory.
+#
+#
+# Date Contributor Modification
+# -------------------------------------------------------------------------------------------
+# 2012-01-30 Vertenstein Original version
+#--------------------------------------------------------------------------------------------
+use strict;
+use Cwd qw(getcwd abs_path);
+use English;
+use Getopt::Long;
+use IO::File;
+#-----------------------------------------------------------------------------------------------
+
+sub usage {
+ die < 0,
+ silent => 0,
+ caseroot => undef,
+ scriptsroot => undef,
+ inst_string => undef,
+ paramfile => undef,
+ lnd_grid => undef,
+ glc_grid => undef,
+ cism_phys => undef,
+ );
+
+GetOptions(
+ "h|help" => \$opts{'help'},
+ "infile=s" => \$opts{'infile'},
+ "namelist=s" => \$opts{'namelist'},
+ "s|silent" => \$opts{'silent'},
+ "v|verbose" => \$opts{'verbose'},
+ "caseroot=s" => \$opts{'caseroot'},
+ "scriptsroot=s" => \$opts{'scriptsroot'},
+ "inst_string=s" => \$opts{'inst_string'},
+ "paramfile=s" => \$opts{'paramfile'},
+ "lnd_grid=s" => \$opts{'lnd_grid'},
+ "glc_grid=s" => \$opts{'glc_grid'},
+ "cism_phys=s" => \$opts{'cism_phys'},
+) or usage();
+
+# Give usage message.
+usage() if $opts{'help'};
+
+# Check for unparsed arguments
+if (@ARGV) {
+ print "ERROR: unrecognized arguments: @ARGV\n";
+ usage();
+}
+
+# Define print levels:
+# 0 - only issue fatal error messages
+# 1 - only informs what files are created (default)
+# 2 - verbose
+my $print = 1;
+if ($opts{'silent'}) { $print = 0; }
+if ($opts{'verbose'}) { $print = 2; }
+my $eol = "\n";
+
+if ($print>=2) { print "Setting CISM configuration script directory to $cfgdir$eol"; }
+
+my $CASEROOT = $opts{'caseroot'};
+my $SCRIPTSROOT = $opts{'scriptsroot'};
+my $INST_STRING = $opts{'inst_string'};
+my $LND_GRID = $opts{'lnd_grid'};
+my $GLC_GRID = $opts{'glc_grid'};
+my $CISM_PHYS = $opts{'cism_phys'};
+
+#-----------------------------------------------------------------------------------------------
+# Build empty config_cache.xml file (needed below)
+
+my $bldconfdir = "$CASEROOT/Buildconf/datmconf";
+if ( $opts{'debug'} ) {
+ my $cmd = "mkdir -p $bldconfdir";
+ print "Execute: $cmd\n";
+ system( "$cmd" );
+ chdir( "$bldconfdir" );
+}
+
+# build config_cache.xml file (needed below)
+my $config_cache = "${CASEROOT}/Buildconf/cismconf/config_cache.xml";
+my $fh = new IO::File;
+$fh->open(">$config_cache") or die "** can't open file: $config_cache\n";
+print $fh <<"EOF";
+
+
+
+
+
+
+EOF
+$fh->close;
+if ($print>=2) { print "Wrote file $config_cache $eol"; }
+(-f "config_cache.xml") or die <<"EOF";
+** $ProgName - Cannot find configuration cache file: config_cache.xml\" **
+EOF
+
+#-----------------------------------------------------------------------------------------------
+# Make sure we can find required perl modules, definition, and defaults files.
+# Look for them under the directory that contains the configure script.
+
+# The root directory for the perl5 required utilities
+my $perl5lib_dir = "${SCRIPTSROOT}/../utils/perl5lib";
+
+# The root diretory for the perl SetupTools.pm module
+my $SetupTools_dir = "${SCRIPTSROOT}/Tools";
+
+# The Build::Config module provides utilities to access the configuration information
+# in the config_cache.xml file (see below)
+(-f "$perl5lib_dir/Build/Config.pm") or die <<"EOF";
+** $ProgName - Cannot find perl module \"Build/Config.pm\" in directory \"$perl5lib_dir\" **
+EOF
+
+# The Build::NamelistDefinition module provides utilities to validate that the output
+# namelists are consistent with the namelist definition file
+(-f "$perl5lib_dir/Build/NamelistDefinition.pm") or die <<"EOF";
+** $ProgName - Cannot find perl module \"Build/NamelistDefinition.pm\" in directory \"$perl5lib_dir\" **
+EOF
+
+# The Build::NamelistDefaults module provides a utility to obtain default values of namelist
+# variables based on finding a best fit with the attributes specified in the defaults file.
+(-f "$perl5lib_dir/Build/NamelistDefaults.pm") or die <<"EOF";
+** $ProgName - Cannot find perl module \"Build/NamelistDefaults.pm\" in directory \"$perl5lib_dir\" **
+EOF
+
+# The Build::Namelist module provides utilities to parse input namelists, to query and modify
+# namelists, and to write output namelists.
+(-f "$perl5lib_dir/Build/Namelist.pm") or die <<"EOF";
+** $ProgName - Cannot find perl module \"Build/Namelist.pm\" in directory \"$perl5lib_dir\" **
+EOF
+
+# The namelist definition file contains entries for all namelist variables that
+# can be output by build-namelist. The version of the file that is associate with a
+# fixed CISM tag is $cfgdir/namelist_files/namelist_definition.xml. To aid developers
+# who make use of the SourceMods/src.cism directory - we allow the definition file
+# to come from that directory
+my $nl_definition_file;
+if (-f "${CASEROOT}/SourceMods/src.cism/namelist_definition_cism.xml") {
+ $nl_definition_file = "${CASEROOT}/SourceMods/src.cism/namelist_definition_cism.xml";
+}
+if (! defined $nl_definition_file) {
+ # default location of namelist definition file
+ $nl_definition_file = "$cfgdir/namelist_files/namelist_definition_cism.xml";
+ (-f "$nl_definition_file") or die <<"EOF";
+ ** $ProgName - ERROR: Cannot find namelist definition file \"$nl_definition_file\" **
+EOF
+}
+if ($print>=2) { print "Using namelist definition file $nl_definition_file$eol"; }
+
+# The namelist defaults file contains default values for all required namelist variables.
+my $nl_defaults_file = "$cfgdir/namelist_files/namelist_defaults_cism.xml";
+(-f "$nl_defaults_file") or die <<"EOF";
+** $ProgName - Cannot find namelist defaults file \"$nl_defaults_file\" **
+EOF
+if ($print>=2) { print "Using namelist defaults file $nl_defaults_file$eol"; }
+
+#-----------------------------------------------------------------------------------------------
+# Add $perl5lib_dir to the list of paths that Perl searches for modules
+unshift @INC, "$CASEROOT/Tools/", "$perl5lib_dir";
+require Build::Config;
+require Build::NamelistDefinition;
+require Build::NamelistDefaults;
+require Build::Namelist;
+require SetupTools;
+
+#-----------------------------------------------------------------------------------------------
+# Create a configuration object from the CISM config_cache.xml file- created by
+# cism.cpl7.template in $CASEROOT/Buildconf/cismconf
+my $cfg = Build::Config->new('config_cache.xml');
+
+# Create a namelist definition object. This object provides a method for verifying that the
+# output namelist variables are in the definition file, and are output in the correct
+# namelist groups.
+my $definition = Build::NamelistDefinition->new($nl_definition_file);
+
+# Create a namelist defaults object. This object provides default values for variables
+# contained in the input defaults file. The configuration object provides attribute
+# values that are relevent for the CISM library for which the namelist is being produced.
+my $defaults = Build::NamelistDefaults->new($nl_defaults_file, $cfg);
+
+# Create an empty namelist object. Add values to it in order of precedence.
+my $nl = Build::Namelist->new();
+
+#-----------------------------------------------------------------------------------------------
+# Process the user input in order of precedence.
+# At each point we'll only add new values to the namelist and not overwrite previously
+# specified specified values which have higher precedence.
+
+# Process command-line options
+my $val;
+my $group;
+my $var;
+
+# paramfile
+# Note special handling of paramfile: This namelist item, which appears in cism_in, cannot be
+# set by the user, but instead must be specified as a command-line option. This is because this
+# file (cism.config) is created by build-namelist, and then copied by the calling script into a
+# new location. Thus, the calling script needs to know the name of the paramfile; to keep things
+# simple, we have the calling script set this value and do not allow the user to override it.
+$var='paramfile';
+if (defined $opts{$var}) {
+ $val = $opts{$var};
+ $val = quote_string($val);
+ $group = $definition->get_group_name($var);
+ $nl->set_variable_value($group, $var, $val);
+} else {
+ die "$ProgName - ERROR: paramfile must be specified";
+}
+
+
+# Process the -namelist arg.
+if (defined $opts{'namelist'}) {
+ # Parse commandline namelist
+ my $nl_arg = Build::Namelist->new($opts{'namelist'});
+
+ # Validate input namelist -- trap exceptions
+ my $nl_arg_valid;
+ eval { $nl_arg_valid = $definition->validate($nl_arg); };
+ if ($@) {
+ die "$ProgName - ERROR: Invalid namelist variable in commandline arg '-namelist'.\n $@";
+ }
+
+ # Merge input values into namelist. Previously specified values have higher precedence
+ # and are not overwritten.
+ $nl->merge_nl($nl_arg_valid);
+}
+
+# Process the -infile arg.
+if (defined $opts{'infile'}) {
+ # Parse namelist input from a file
+ my $nl_infile = Build::Namelist->new($opts{'infile'});
+
+ # Validate input namelist -- trap exceptions
+ my $nl_infile_valid;
+ eval { $nl_infile_valid = $definition->validate($nl_infile); };
+ if ($@) {
+ die "$ProgName - ERROR: Invalid namelist variable in '-infile' $opts{'infile'}.\n $@";
+ }
+
+ # Merge input values into namelist. Previously specified values have higher precedence
+ # and are not overwritten.
+ $nl->merge_nl($nl_infile_valid);
+}
+
+#-----------------------------------------------------------------------------------------------
+# Determine namelist
+#-----------------------------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------------------------
+####################################
+# Required xml variables #
+####################################
+
+my %xmlvars = ();
+SetupTools::getxmlvars($CASEROOT, \%xmlvars);
+foreach my $attr (keys %xmlvars) {
+ $xmlvars{$attr} = SetupTools::expand_xml_var($xmlvars{$attr}, \%xmlvars);
+}
+
+my $CASE = $xmlvars{'CASE'};
+my $CALENDAR = $xmlvars{'CALENDAR'};
+my $EXEROOT = $xmlvars{'EXEROOT'};
+my $CODEROOT = $xmlvars{'CODEROOT'};
+my $SCRIPTSROOT = $xmlvars{'SCRIPTSROOT'};
+my $CASEROOT = $xmlvars{'CASEROOT'};
+my $DIN_LOC_ROOT = $xmlvars{'DIN_LOC_ROOT'};
+my $RUN_TYPE = $xmlvars{'RUN_TYPE'};
+my $RUN_STARTDATE = $xmlvars{'RUN_STARTDATE'};
+my $RUN_REFCASE = $xmlvars{'RUN_REFCASE'};
+my $RUN_REFDATE = $xmlvars{'RUN_REFDATE'};
+my $CONTINUE_RUN = $xmlvars{'CONTINUE_RUN'};
+my $NCPL_BASE_PERIOD = $xmlvars{'NCPL_BASE_PERIOD'};
+my $GLC_NCPL = $xmlvars{'GLC_NCPL'};
+my $GLC_TWO_WAY_COUPLING = $xmlvars{'GLC_TWO_WAY_COUPLING'};
+my $CISM_USE_TRILINOS = $xmlvars{'CISM_USE_TRILINOS'};
+my $CISM_OBSERVED_IC = $xmlvars{'CISM_OBSERVED_IC'};
+
+(-d $DIN_LOC_ROOT) or die <<"EOF";
+** $ProgName - CCSM inputdata root is not a directory: \"$DIN_LOC_ROOT\" **
+EOF
+if ($print>=2) { print "CESM inputdata root directory: $DIN_LOC_ROOT$eol"; }
+
+####################################
+# Preliminary consistency checks
+####################################
+
+check_grid($GLC_GRID, $CISM_PHYS);
+
+####################################
+# namelist group: time_manager_nml #
+####################################
+
+my $startdate = $RUN_STARTDATE;
+if ($RUN_TYPE eq 'branch') { $startdate = $RUN_REFDATE; }
+
+my $iyear0 = `echo $startdate | cut -c1-4 | sed -e 's/^0*//'`;
+$iyear0 =~ s/\n/ /g; # remove imbedded newline
+my $imonth0 = `echo $startdate | cut -c6-7 | sed -e 's/^0*//'`;
+$imonth0 =~ s/\n/ /g; # remove imbedded newline
+my $iday0 = `echo $startdate | cut -c9-10 | sed -e 's/^0*//'`;
+$iday0 =~ s/\n/ /g; # remove imbedded newline
+my $ihour0 = 0;
+
+my ($dt_option, $dt_count) = get_glc_dt($NCPL_BASE_PERIOD, $GLC_NCPL);
+
+add_default($nl, 'runid', 'val'=>"$CASE");
+add_default($nl, 'dt_option', 'val'=>$dt_option);
+add_default($nl, 'dt_count', 'val'=>$dt_count);
+add_default($nl, 'allow_leapyear', 'calendar'=>"$CALENDAR");
+add_default($nl, 'iyear0', 'val'=>$iyear0);
+add_default($nl, 'imonth0', 'val'=>$imonth0);
+add_default($nl, 'iday0' , 'val'=>$iday0);
+add_default($nl, 'ihour0' , 'val'=>$ihour0);
+add_default($nl, 'iminute0','val'=>0);
+add_default($nl, 'isecond0','val'=>0);
+add_default($nl, 'date_separator');
+add_default($nl, 'stop_option');
+
+####################################
+# namelist group: cism_params #
+####################################
+
+add_default($nl, 'paramfile', 'noprepend'=>'1' );
+add_default($nl, 'cism_debug');
+add_default($nl, 'ice_flux_routing');
+
+my $hotstart;
+if ($RUN_TYPE eq 'startup') {
+ add_default($nl, 'cisminputfile');
+ $hotstart = 0;
+}
+elsif ($CISM_OBSERVED_IC eq 'TRUE') {
+ if ($RUN_TYPE eq 'branch') {
+ die "$ProgName - ERROR: CISM_OBSERVED_IC=TRUE not allowed for branch runs - only for hybrid runs\n";
+ }
+
+ add_default($nl, 'cisminputfile');
+ $hotstart = 0;
+}
+elsif ($RUN_TYPE eq 'branch' || $RUN_TYPE eq 'hybrid') {
+ add_default($nl, 'cisminputfile',
+ 'val'=>"${RUN_REFCASE}.cism.r.${RUN_REFDATE}-00000.nc", 'noprepend'=>'1');
+ $hotstart = 1;
+}
+else {
+ die "$ProgName - ERROR: Unknown RUN_TYPE: $RUN_TYPE\n";
+}
+
+####################################
+# namelist group: cism_history #
+####################################
+
+add_default($nl, 'cesm_history_vars');
+add_default($nl, 'history_option');
+if (!($nl->get_value('history_option') =~ /coupler/)) {
+ add_default($nl, 'history_frequency');
+}
+
+####################################
+# namelist group: glc_override_nml #
+####################################
+
+add_default($nl, 'enable_frac_overrides');
+if ($nl->get_value('enable_frac_overrides') =~ $TRUE) {
+ add_default($nl, 'override_delay');
+ add_default($nl, 'decrease_frac');
+ add_default($nl, 'increase_frac');
+ add_default($nl, 'rearrange_freq');
+}
+
+#-----------------------------------------------------------------------------------------------
+# *** Write output namelist file (cism_in) and input dataset list (cism.input_data_list) ***
+#-----------------------------------------------------------------------------------------------
+# Set namelist groups to be written out
+
+my @groups = qw(cism_params cism_history time_manager_nml glc_override_nml);
+
+# Write out all groups to cism_in
+my $outfile = "./cism_in";
+$nl->write($outfile, 'groups'=>\@groups);
+if ($print>=2) { print "Writing cism glc component namelist to $outfile $eol"; }
+
+# Write input dataset list.
+check_input_files($nl, $DIN_LOC_ROOT, "../cism.input_data_list");
+
+####################################
+# create cism.config in run dir #
+####################################
+
+# Write cism.config
+my $fh = new IO::File;
+$fh->open(">cism.config") or die "** can't open file: cism.config\n";
+
+print $fh "[grid]\n";
+add_default($nl, 'ewn');
+add_default($nl, 'nsn');
+add_default($nl, 'upn');
+add_default($nl, 'dew');
+add_default($nl, 'dns');
+$nl->write_cism_config($fh, "cism_config_grid");
+
+print $fh "\n[GLAD climate]\n";
+add_default($nl, 'evolve_ice');
+add_default($nl, 'test_coupling');
+add_default($nl, 'zero_gcm_fluxes', 'glc_two_way_coupling'=>"$GLC_TWO_WAY_COUPLING");
+$nl->write_cism_config($fh, "cism_config_climate");
+
+print $fh "\n[projection]\n";
+add_default($nl, 'type');
+add_default($nl, 'centre_latitude');
+add_default($nl, 'centre_longitude');
+add_default($nl, 'false_easting');
+add_default($nl, 'false_northing');
+add_default($nl, 'standard_parallel');
+$nl->write_cism_config($fh, "cism_config_projection");
+
+print $fh "\n[options]\n";
+add_default($nl, 'dycore');
+add_default($nl, 'temperature');
+add_default($nl, 'temp_init');
+add_default($nl, 'flow_law');
+add_default($nl, 'basal_water');
+add_default($nl, 'basal_mass_balance');
+add_default($nl, 'gthf');
+add_default($nl, 'isostasy');
+add_default($nl, 'marine_margin');
+add_default($nl, 'slip_coeff');
+add_default($nl, 'evolution');
+add_default($nl, 'vertical_integration');
+add_default($nl, 'sigma');
+add_default($nl, 'hotstart', 'val'=>$hotstart);
+$nl->write_cism_config($fh, "cism_config_options");
+
+# The [sigma] section only applies if we're using user-defined sigma
+# levels -- if not, leave it out to avoid confusion
+if ($nl->get_value('sigma') == $SIGMA_IN_CONFIG_FILE) {
+ print $fh "\n[sigma]\n";
+ add_default($nl, 'sigma_levels');
+ $nl->write_cism_config($fh, "cism_config_sigma");
+}
+else {
+ confirm_empty("cism_config_sigma", "items in sigma section can only be set if sigma is set to $SIGMA_IN_CONFIG_FILE");
+}
+
+
+print $fh "\n[time]\n";
+add_default($nl, 'dt');
+add_default($nl, 'ntem');
+add_default($nl, 'dt_diag');
+add_default($nl, 'idiag');
+add_default($nl, 'jdiag');
+$nl->write_cism_config($fh, "cism_config_time");
+
+print $fh "\n[parameters]\n";
+add_default($nl, 'log_level');
+add_default($nl, 'ice_limit');
+add_default($nl, 'marine_limit');
+add_default($nl, 'geothermal');
+add_default($nl, 'flow_factor');
+add_default($nl, 'hydro_time');
+add_default($nl, 'basal_tract_const');
+$nl->write_cism_config($fh, "cism_config_parameters");
+
+# The ho_options section only applies if dycore is not 0
+if ($nl->get_value('dycore') != 0) {
+ print $fh "\n[ho_options]\n";
+ add_default($nl, 'which_ho_babc');
+ add_default($nl, 'which_ho_efvs');
+ add_default($nl, 'which_ho_resid');
+ add_default($nl, 'which_ho_disp');
+ add_default($nl, 'which_ho_sparse', 'dycore'=>$nl->get_value('dycore'), 'cism_use_trilinos'=>"$CISM_USE_TRILINOS");
+ add_default($nl, 'which_ho_nonlinear', 'dycore'=>$nl->get_value('dycore'));
+ $nl->write_cism_config($fh, "cism_config_ho_options");
+}
+else {
+ confirm_empty("cism_config_ho_options", "items in ho_options_section can only be set if dycore is not 0");
+}
+
+# The [GTHF] section only applies if the 'gthf' option is set to
+# GTHF_CALCULATE -- if not, we leave it out to avoid confusion
+if ($nl->get_value('gthf') == $GTHF_CALCULATE) {
+ print $fh "\n[GTHF]\n";
+ $nl->write_cism_config($fh, "cism_config_gthf");
+}
+else {
+ confirm_empty("cism_config_gthf", "items in gthf section can only be set if gthf is set to $GTHF_CALCULATE");
+}
+
+# The [isostasy] section only applies if the 'isostasy' option is
+# turned on -- if not, we leave it out to avoid confusion
+if ($nl->get_value('isostasy') == $ISOSTASY_ON) {
+ print $fh "\n[isostasy]\n";
+ $nl->write_cism_config($fh, "cism_config_isostasy");
+}
+else {
+ confirm_empty("cism_config_isostasy", "items in isostasy section can only be set if isostasy is set to $ISOSTASY_ON");
+}
+
+
+print $fh <<"EOF";
+
+########################################
+# I/O configuration follows
+########################################
+
+\[CF default]
+title: Glimmer-CISM simulation
+institution: Community Earth System Model
+EOF
+
+my $cisminputfile= $nl->get_value('cisminputfile');
+$cisminputfile =~ s/\'//g;
+if ($CONTINUE_RUN eq 'FALSE') {
+print $fh <<"EOF";
+
+[CF input]
+name: $cisminputfile
+EOF
+}
+$fh->close;
+
+check_consistency($nl, \%xmlvars);
+
+
+#-----------------------------------------------------------------------------------------------
+# END OF MAIN SCRIPT
+#===============================================================================================
+
+#===============================================================================================
+sub add_default {
+
+# Add a value for the specified variable to the specified namelist object. The variables
+# already in the object have the higher precedence, so if the specified variable is already
+# defined in the object then don't overwrite it, just return.
+#
+# This method checks the definition file and adds the variable to the correct
+# namelist group.
+#
+# The value can be provided by using the optional argument key 'val' in the
+# calling list. Otherwise a default value is obtained from the namelist
+# defaults object. If no default value is found this method throws an exception
+# unless the 'nofail' option is set true.
+#
+# Additional optional keyword=>value pairs may be specified. If the keyword 'val' is
+# not present, then any other keyword=>value pairs that are specified will be used to
+# match attributes in the defaults file.
+#
+# Example 1: Specify the default value $val for the namelist variable $var in namelist
+# object $nl:
+#
+# add_default($nl, $var, 'val'=>$val)
+#
+# Example 2: Add a default for variable $var if an appropriate value is found. Otherwise
+# don't add the variable
+#
+# add_default($nl, $var, 'nofail'=>1)
+#
+#
+# ***** N.B. ***** This routine assumes the following variables are in package main::
+# $definition -- the namelist definition object
+# $DIN_LOC_ROOT -- CCSM inputdata root directory
+
+ my $nl = shift; # namelist object
+ my $var = shift; # name of namelist variable
+ my %opts = @_; # options
+
+ my $val = undef;
+
+ # Query the definition to find which group the variable belongs to. Exit if not found.
+ my $group = $definition->get_group_name($var);
+ unless ($group) {
+ my $fname = $definition->get_file_name();
+ die "$ProgName - ERROR: variable \"$var\" not found in namelist definition file $fname.\n";
+ }
+
+ # check whether the variable has a value in the namelist object -- if so then return
+ $val = $nl->get_variable_value($group, $var);
+ if (defined $val) {return;}
+
+ # Look for a specified value in the options hash
+ if (defined $opts{'val'}) {
+ $val = $opts{'val'};
+ }
+ # or else get a value from namelist defaults object.
+ # Note that if the 'val' key isn't in the hash, then just pass anything else
+ # in %opts to the get_value method to be used as attributes that are matched
+ # when looking for default values.
+ else {
+ $val = get_default_value($var, \%opts);
+ }
+
+ # if no value is found then exit w/ error (unless 'nofail' option set)
+ unless (defined $val) {
+ unless ($opts{'nofail'}) {
+ print "$ProgName - ERROR: No default value found for $var\n".
+ "user defined attributes:\n";
+ foreach my $key (keys(%opts)) {
+ if ($key ne 'nofail' and $key ne 'val') {
+ print "key=$key val=$opts{$key}\n";
+ }
+ }
+ die;
+ }
+ else {
+ return;
+ }
+ }
+
+ # query the definition to find out if the variable is an input pathname
+ my $is_input_pathname = $definition->is_input_pathname($var);
+
+ # The default values for input pathnames are relative. If the namelist
+ # variable is defined to be an absolute pathname, then prepend
+ # the CCSM inputdata root directory.
+ # TODO: unless ignore_abs is passed as argument
+ if ($is_input_pathname eq 'abs') {
+ unless ($opts{'noprepend'}){
+ $val = set_abs_filepath($val, $DIN_LOC_ROOT);
+ }
+ }
+
+ # query the definition to find out if the variable takes a string value.
+ # The returned string length will be >0 if $var is a string, and 0 if not.
+ my $str_len = $definition->get_str_len($var);
+
+ # If the variable is a string, then add quotes if they're missing
+ if ($str_len > 0) {
+ $val = quote_string($val);
+ }
+
+ # set the value in the namelist
+ $nl->set_variable_value($group, $var, $val);
+}
+
+#-----------------------------------------------------------------------------------------------
+
+sub get_default_value {
+
+# Return a default value for the requested variable.
+# Return undef if no default found.
+#
+# ***** N.B. ***** This routine assumes the following variables are in package main::
+# $defaults -- the namelist defaults object
+# $uc_defaults -- the use CASE defaults object
+
+ my $var_name = lc(shift); # name of namelist variable (CASE insensitive interface)
+ my $usr_att_ref = shift; # reference to hash containing user supplied attributes
+
+ # Check in the namelist defaults
+ return $defaults->get_value($var_name, $usr_att_ref);
+
+}
+
+#-----------------------------------------------------------------------------------------------
+
+sub confirm_empty {
+
+# Confirm that a namelist group is empty (i.e., has no defined
+# namelist items).
+# Die if it isn't empty
+#
+# Usage: confirm_empty(group, errmsg)
+#
+# - group: name of namelist group
+# - errmsg: error message to print if group is not empty
+
+ my $group = shift;
+ my $errmsg = shift;
+
+ my ($numvars, $varnames) = $nl->get_defined_vars_in_group($group);
+ if ($numvars > 0) {
+ print "$ProgName: ERROR: $errmsg\n";
+ die "$ProgName: ERROR: This applies to: $varnames\n";
+ }
+}
+
+#-----------------------------------------------------------------------------------------------
+
+sub check_input_files {
+
+# For each variable in the namelist which is an input dataset, check to see if it
+# exists locally.
+#
+# ***** N.B. ***** This routine assumes the following variables are in package main::
+# $definition -- the namelist definition object
+
+ my $nl = shift; # namelist object
+ my $inputdata_rootdir = shift; # if false prints test, else creates inputdata file
+ my $outfile = shift;
+ open(OUTFILE, ">$outfile") if defined $inputdata_rootdir;
+
+ # Look through all namelist groups
+ my @groups = $nl->get_group_names();
+ foreach my $group (@groups) {
+
+ # Look through all variables in each group
+ my @vars = $nl->get_variable_names($group);
+ foreach my $var (@vars) {
+
+ # Is the variable an input dataset?
+ my $input_pathname_type = $definition->is_input_pathname($var);
+
+ # If it is, check whether it exists locally and print status
+ if ($input_pathname_type) {
+ # Get pathname of input dataset
+ my $pathname = $nl->get_variable_value($group, $var);
+ # Need to strip the quotes
+ $pathname =~ s/[\'\"]//g;
+
+ if ($input_pathname_type eq 'abs') {
+ if ($inputdata_rootdir) {
+ print OUTFILE "$var = $pathname\n";
+ }
+ }
+ elsif ($input_pathname_type =~ m/rel:(.+)/o) {
+ # The match provides the namelist variable that contains the
+ # root directory for a relative filename
+ my $rootdir_var = $1;
+ my $rootdir = $nl->get_variable_value($group, $rootdir_var);
+ $rootdir =~ s/[\'\"]//g;
+ if ($inputdata_rootdir) {
+ $pathname = "$rootdir/$pathname";
+ print OUTFILE "$var = $pathname\n";
+ }
+ }
+ }
+ }
+ }
+ close OUTFILE if defined $inputdata_rootdir;
+ return 0 if defined $inputdata_rootdir;
+}
+
+#-----------------------------------------------------------------------------------------------
+
+sub set_abs_filepath {
+
+# check whether the input filepath is an absolute path, and if it isn't then
+# prepend a root directory
+
+ my ($filepath, $rootdir) = @_;
+
+ # strip any leading/trailing whitespace
+ $filepath =~ s/^\s+//;
+ $filepath =~ s/\s+$//;
+ $rootdir =~ s/^\s+//;
+ $rootdir =~ s/\s+$//;
+
+ # strip any leading/trailing quotes
+ $filepath =~ s/^['"]+//;
+ $filepath =~ s/["']+$//;
+ $rootdir =~ s/^['"]+//;
+ $rootdir =~ s/["']+$//;
+
+ my $out = $filepath;
+ unless ( $filepath =~ /^\// ) { # unless $filepath starts with a /
+ $out = "$rootdir/$filepath"; # prepend the root directory
+ }
+ return $out;
+}
+
+#-------------------------------------------------------------------------------
+
+sub valid_option {
+
+ my ($val, @expect) = @_;
+ my ($expect);
+
+ $val =~ s/^\s+//;
+ $val =~ s/\s+$//;
+ foreach $expect (@expect) {
+ if ($val =~ /^$expect$/i) { return $expect; }
+ }
+ return undef;
+}
+
+#-------------------------------------------------------------------------------
+
+sub quote_string {
+ my $str = shift;
+ $str =~ s/^\s+//;
+ $str =~ s/\s+$//;
+ unless ($str =~ /^['"]/) { #"'
+ $str = "\'$str\'";
+ }
+ return $str;
+}
+
+#-------------------------------------------------------------------------------
+
+sub get_glc_dt {
+
+# Usage: ($dt_option, $dt_count) = get_glc_dt($ncpl_base_period, $glc_ncpl)
+#
+# Given the number of coupling intervals for GLC and the coupling base period,
+# returns appropriate values of dt_option and dt_count for CISM's namelist. Note
+# that this gives the dt values in cism_in - NOT the values in cism.config.
+#
+# This assumes that we want one timestep per coupling interval.
+
+ my $ncpl_base_period = shift;
+ my $glc_ncpl = shift;
+
+ my ($dt_option, $dt_count);
+
+ if ($ncpl_base_period eq 'hour') {
+ if (3600 % $glc_ncpl != 0) { die "$ProgName: ERROR glc NCPL doesn't divide an hour evenly\n";}
+ $dt_option = 'seconds';
+ $dt_count = 3600 / $glc_ncpl;
+ } elsif ($ncpl_base_period eq 'day') {
+ $dt_option = 'steps_per_day';
+ $dt_count = $glc_ncpl;
+ } elsif ($ncpl_base_period eq 'year') {
+ $dt_option = 'steps_per_year';
+ $dt_count = $glc_ncpl;
+ } elsif ($ncpl_base_period eq 'decade') {
+ $dt_option = 'steps_per_year';
+ $dt_count = $glc_ncpl / 10;
+ } else {
+ die "$ProgName: ERROR invalid NCPL_BASE_PERIOD $ncpl_base_period\n";
+ }
+
+ return ($dt_option, $dt_count);
+}
+
+#-------------------------------------------------------------------------------
+
+sub check_grid {
+
+# Usage: check_grid($glc_grid, $cism_phys)
+#
+# Checks that the glc_grid is valid given the cism_phys setting
+
+ my ($glc_grid, $cism_phys) = @_;
+
+ # we only care about the keys in allowed_grids, not the values; but we use a
+ # hash for easy lookup later
+ my %allowed_grids;
+
+ if ($cism_phys eq "cism1") {
+ %allowed_grids = (gland20 => 1,
+ gland10 => 1,
+ gland5 => 1,
+ gland5UM => 1,
+ );
+ }
+ elsif ($cism_phys eq "cism2") {
+ %allowed_grids = (gland20 => 1,
+ gland4 => 1,
+ );
+ }
+
+ if (!exists($allowed_grids{$glc_grid})) {
+ print "ERROR: For cism_phys = $cism_phys, allowable grids are:\n";
+ print join(", ", sort keys %allowed_grids);
+ print "\n";
+ print "Current value of GLC_GRID is: $glc_grid\n";
+ die;
+ }
+}
+
+
+#-------------------------------------------------------------------------------
+
+sub check_consistency {
+
+# Usage: check_consistency($nl, \%xmlvars)
+# (note that xmlvars is a reference to a hash)
+#
+# Checks various options for consistency; dies if any errors are found
+
+ my ($nl, $xmlvars) = @_;
+ my $num_errors = 0;
+
+ $num_errors += check_cism_dt($nl->get_value('dt'));
+
+# ----------------------------------------------------------------------
+# Requirements for evolve_ice = .false.
+# ----------------------------------------------------------------------
+
+ if ($nl->get_value('evolve_ice') == 0) {
+ if ($nl->get_value('zero_gcm_fluxes') == 0) {
+ print "$ProgName: ERROR: for evolve_ice = 0, you must also set zero_gcm_fluxes = 1\n";
+ print "(This is because evolve_ice = 0 implies that there will be no fluxes,\n";
+ print "and you must explicitly set zero_gcm_fluxes = 1 for the sake of logic\n";
+ print "that depends on whether these fluxes will be zero - particularly, the creation\n";
+ print "of icemask_coupled_fluxes used by CLM).\n";
+ $num_errors += 1;
+ }
+ }
+
+# ----------------------------------------------------------------------
+# Requirements for use of trilinos solver
+# ----------------------------------------------------------------------
+
+ if ($nl->get_value('which_ho_sparse') == 4) {
+ if ($xmlvars->{'CISM_USE_TRILINOS'} eq 'FALSE') {
+ print "$ProgName: ERROR: Use of the trilinos solver (which_ho_sparse=4) requires building with trilinos (CISM_USE_TRILINOS=TRUE)\n";
+ $num_errors += 1;
+ }
+
+ if ($nl->get_value('dycore') == 0) {
+ print "$ProgName: ERROR: Can't use trilinos solver (which_ho_sparse=4) with glide dycore (dycore=0)\n";
+ $num_errors += 1;
+ }
+ }
+
+# ----------------------------------------------------------------------
+# Requirements for running with more than one MPI task per ensemble member
+# ----------------------------------------------------------------------
+
+ if ($xmlvars->{'NTASKS_GLC'} > $xmlvars->{'NINST_GLC'}) {
+ if ($nl->get_value('dycore') == 0) {
+ print "$ProgName: ERROR: With dycore=0 (glide dycore), can only have 1 GLC task per instance\n";
+ print "NTASKS_GLC = " . $xmlvars->{'NTASKS_GLC'} . ", NINST_GLC = " . $xmlvars->{'NINST_GLC'} . "\n";
+ $num_errors += 1;
+ }
+
+ if ($nl->get_value('dycore') == 1 && $nl->get_value('which_ho_sparse') != 4) {
+ print "$ProgName: ERROR: To run with more than 1 GLC task per instance, with dycore=1, must use trilinos solver (which_ho_sparse=4)\n";
+ print "NTASKS_GLC = " . $xmlvars->{'NTASKS_GLC'} . ", NINST_GLC = " . $xmlvars->{'NINST_GLC'} . "\n";
+ print "which_ho_sparse = " . $nl->get_value('which_ho_sparse') . "\n";
+ $num_errors += 1;
+ }
+ }
+
+
+ die if ($num_errors > 0);
+}
+
+#-------------------------------------------------------------------------------
+
+sub check_cism_dt {
+
+# Usage: check_cism_dt($dt)
+#
+# Checks cism's dt value: i.e., the dt variable in the time section of cism.config.
+# Returns 0 if okay, > 0 if errors found (i.e., if dt is an inappropriate value). Also, if
+# any errors are found, an error message is printed.
+
+ my $dt = shift;
+ my $num_errors = 0;
+
+
+# Ensure that dt translates into an integer number of hours
+
+ my $dt_hours = $dt * 365. * 24.;
+ # round to nearest integer:
+ my $dt_hours_int = sprintf("%.0f", $dt_hours);
+
+ # make sure difference is basically 0, by comparing relative difference with a value near machine epsilon
+ if (abs($dt_hours - $dt_hours_int)/$dt_hours > 1.e-15) {
+ print "$ProgName: ERROR: dt (in years) must translate into an integer number of hours\n";
+ print "dt = $dt\n";
+ print "dt (hours) = $dt_hours\n";
+ $num_errors += 1;
+ }
+
+ return $num_errors;
+}
+
diff --git a/components/cism/bld/cism.buildlib b/components/cism/bld/cism.buildlib
new file mode 100755
index 0000000000..fa7a817bb8
--- /dev/null
+++ b/components/cism/bld/cism.buildlib
@@ -0,0 +1,128 @@
+#! /usr/bin/env perl
+use strict;
+
+if ($#ARGV == -1) {
+ die " ERROR cism.buildexe: must specify a caseroot input argument";
+}
+my ($CASEROOT) = @ARGV;
+chdir "${CASEROOT}";
+
+my $CASEBUILD = `./xmlquery CASEBUILD -value`;
+my $CCSMROOT = `./xmlquery CCSMROOT -value`;
+my $CASETOOLS = `./xmlquery CASETOOLS -value`;
+my $OBJROOT = `./xmlquery OBJROOT -value`;
+my $EXEROOT = `./xmlquery EXEROOT -value`;
+my $LIBROOT = `./xmlquery LIBROOT -value`;
+my $GMAKE_J = `./xmlquery GMAKE_J -value`;
+my $GMAKE = `./xmlquery GMAKE -value`;
+my $CISM_USE_TRILINOS = `./xmlquery CISM_USE_TRILINOS -value`;
+
+# directory in which glc is built
+my $glc_dir = "$EXEROOT/glc";
+
+# directory in which glc obj files are built
+my $glc_obj_dir = "$OBJROOT/glc/obj";
+
+# directory in which glimmer-cism library is created
+my $cism_libdir = "$glc_dir/lib";
+
+# directory in which we can find source mods
+my $sourcemod_dir = "$CASEROOT/SourceMods/src.cism";
+
+chdir "$glc_obj_dir";
+
+# ----------------------------------------------------------------------
+# Create Filepath
+# ----------------------------------------------------------------------
+# The following just gives the filepath for the cesm-specific code:
+# the glimmer-cism stuff is picked up by the cmake-based build
+
+open(file,">Filepath") or die "Could not open file Filepath to write";
+print file "$sourcemod_dir \n";
+print file "$CCSMROOT/components/cism/drivers/cpl \n";
+print file "$CCSMROOT/components/cism/source_glc \n";
+print file "$CCSMROOT/components/cism/mpi \n";
+close(file);
+
+# ----------------------------------------------------------------------
+# Set options to cmake
+#
+# Note: Makefile variables should be given as: \\\$(VAR)
+# Perl will expand this to \$(VAR)
+# The extra preceding backslash is needed so that when cmake_opts is put on the command line,
+# the shell doesn't try to interpret the '$'.
+# ----------------------------------------------------------------------
+# Note that some other generic CMAKE options are set in the Makefile
+my $cmake_opts;
+$cmake_opts = "";
+$cmake_opts = "$cmake_opts -D CISM_COUPLED=ON";
+$cmake_opts = "$cmake_opts -D CISM_USE_MPI_WITH_SLAP=ON";
+
+# CISM_USE_GPTL_INSTRUMENTATION is unnecessary (and possibly harmful)
+# when built inside CESM; for CESM we instead use -DCCSMCOUPLED, which
+# also gives us timing instrumentation
+$cmake_opts = "$cmake_opts -D CISM_USE_GPTL_INSTRUMENTATION=OFF";
+$cmake_opts = "$cmake_opts -D CISM_BINARY_DIR=$glc_dir";
+$cmake_opts = "$cmake_opts -D CMAKE_Fortran_MODULE_DIRECTORY=$glc_obj_dir";
+$cmake_opts = "$cmake_opts -D CISM_NETCDF_DIR=\\\$(NETCDF_PATH)";
+$cmake_opts = "$cmake_opts -D CISM_MPI_INC_DIR=\\\$(INC_MPI)";
+$cmake_opts = "$cmake_opts -D CISM_SOURCEMOD_DIR=$sourcemod_dir/glimmer-cism";
+
+# Turn on MPI_MODE always. This works within CESM because we always
+# have an mpi library (possibly mpi-serial). And always turning on
+# MPI_MODE means that we can defer more decisions to
+# runtime. (Although this comes with a small performance cost when we
+# don't actually need mpi.)
+$cmake_opts = "$cmake_opts -D CISM_MPI_MODE=ON";
+$cmake_opts = "$cmake_opts -D CISM_SERIAL_MODE=OFF";
+if ("$CISM_USE_TRILINOS" eq 'TRUE') {
+ $cmake_opts = "$cmake_opts -D CISM_USE_TRILINOS=ON";
+ $cmake_opts = "$cmake_opts -D CISM_TRILINOS_DIR=\\\$(TRILINOS_PATH)";
+} else {
+ $cmake_opts = "$cmake_opts -D CISM_USE_TRILINOS=OFF";
+}
+
+# ----------------------------------------------------------------------
+# Set mkDepends to append libglimmercismfortran.a to the end of each
+# .o dependency line.
+#
+# Rationale: Some of the source files in the cesm-specific code depend
+# on files included in this library. Ideally, we would be able to
+# determine the actual dependencies, but that's not easy with the
+# current tools and the fact that we build the glimmer-cism code using
+# a different build system than the cesm-specific code. So for now, we
+# just rebuild all the cesm-specific code whenever anything in the
+# libglimmercismfortran.a library changes.
+#
+# WJS (3-6-13): I thought we would just need to include these options
+# in the call to make the complib target. But for some reason that I
+# can't determine, mkDepends is called when we make $glc_dir/Makefile,
+# so we also need to include these options there.
+# ----------------------------------------------------------------------
+my $mkdepends_opts = "-d $cism_libdir/libglimmercismfortran.a";
+
+# ----------------------------------------------------------------------
+# create the glimmer-cism makefile by running cmake (done via a rule
+# in the system-level makefile)
+# ----------------------------------------------------------------------
+my $sysmod = "$GMAKE $glc_dir/Makefile MODEL=cism USER_CMAKE_OPTS=\"$cmake_opts\" USER_MKDEPENDS_OPTS=\"$mkdepends_opts\" GLC_DIR=$glc_dir -f $CASETOOLS/Makefile";
+system($sysmod) == 0 or die "ERROR cism.buildexe: $sysmod failed: $?\n";
+
+# ----------------------------------------------------------------------
+# create the glimmer-cism library (or libraries), using the makefile
+# created by cmake
+# ----------------------------------------------------------------------
+chdir "$glc_dir";
+
+my $sysmod = "$GMAKE -j $GMAKE_J";
+system($sysmod) == 0 or die "ERROR cism.buildexe: $sysmod failed: $?\n";
+
+chdir "$glc_obj_dir";
+
+# ----------------------------------------------------------------------
+# create the cesm-specific portion of the glc library using cesm's makefile
+# ----------------------------------------------------------------------
+my $sysmod = "$GMAKE complib -j $GMAKE_J MODEL=cism COMPLIB=$LIBROOT/libglc.a USER_MKDEPENDS_OPTS=\"$mkdepends_opts\" GLC_DIR=$glc_dir -f $CASETOOLS/Makefile";
+system($sysmod) == 0 or die "ERROR cism.buildexe: $sysmod failed: $?\n";
+
+exit 0
diff --git a/components/cism/bld/cism.buildnml b/components/cism/bld/cism.buildnml
new file mode 100755
index 0000000000..debf0240bc
--- /dev/null
+++ b/components/cism/bld/cism.buildnml
@@ -0,0 +1,113 @@
+#! /usr/bin/env perl
+use strict;
+use Cwd;
+
+if ($#ARGV == -1) {
+ die " ERROR cism.buildexe: must specify a caseroot input argument";
+}
+my ($CASEROOT) = @ARGV;
+chdir "${CASEROOT}";
+
+my @dirs = ("$CASEROOT/Tools");
+unshift @INC, @dirs;
+require SetupTools;
+my $sysmod;
+
+my $CASEBUILD = `./xmlquery CASEBUILD -value`;
+my $GLC_GRID = `./xmlquery GLC_GRID -value`;
+my $CISM_PHYS = `./xmlquery CISM_PHYS -value`;
+my $CISM_USE_TRILINOS = `./xmlquery CISM_USE_TRILINOS -value`;
+my $CCSMROOT = `./xmlquery CCSMROOT -value`;
+my $LND_GRID = `./xmlquery LND_GRID -value`;
+my $NINST_GLC = `./xmlquery NINST_GLC -value`;
+my $RUNDIR = `./xmlquery RUNDIR -value`;
+my $SCRIPTSROOT = `./xmlquery SCRIPTSROOT -value`;
+my $UTILROOT = `./xmlquery UTILROOT -value`;
+
+if (! -d "$CASEBUILD/cismconf" ) {
+ $sysmod = "mkdir $CASEBUILD/cismconf";
+ system($sysmod) == 0 or die "ERROR cism.buildnml: $sysmod failed: $?\n";
+}
+chdir "$CASEBUILD/cismconf";
+
+my $inst_string;
+my $inst_counter = 1;
+while ($inst_counter <= $NINST_GLC) {
+
+ # -----------------------------------------------------
+ # determine instance string
+ # -----------------------------------------------------
+
+ $inst_string = "";
+ if ($NINST_GLC > 1) {
+ $inst_string = `printf _%04d $inst_counter`;
+
+ # If multi-instance case does not have restart file, use single-case restart
+ # for each instance
+ if ( (! -e "$RUNDIR/rpointer.glc${inst_string}") && (-e "$RUNDIR/rpointer.glc") ) {
+ $sysmod = "cp -v $RUNDIR/rpointer.glc $RUNDIR/rpointer.glc${inst_string}";
+ system($sysmod) == 0 or die "ERROR cism.buildnml: $sysmod failed: $?\n";
+ }
+ }
+
+ # -----------------------------------------------------
+ # create cismconf/cesm_namelist
+ # -----------------------------------------------------
+
+ SetupTools::create_namelist_infile("$CASEROOT",
+ "$CASEROOT/user_nl_cism${inst_string}",
+ "$CASEBUILD/cismconf/cesm_namelist");
+
+ # -----------------------------------------------------
+ # call build-namelist
+ # -----------------------------------------------------
+
+ if (-e "$CASEBUILD/cism.input_data_list") {
+ $sysmod = "rm $CASEBUILD/cism.input_data_list";
+ system($sysmod) == 0 or die "ERROR cism.buildnml: $sysmod failed: $?\n";
+ }
+
+ $sysmod = "$CCSMROOT/components/cism/bld/build-namelist";
+ $sysmod = "$sysmod -infile $CASEBUILD/cismconf/cesm_namelist";
+ $sysmod = "$sysmod -caseroot $CASEROOT";
+ $sysmod = "$sysmod -scriptsroot $SCRIPTSROOT";
+ $sysmod = "$sysmod -inst_string \"$inst_string\" ";
+ $sysmod = "$sysmod -paramfile cism.config${inst_string}";
+ $sysmod = "$sysmod -lnd_grid $LND_GRID -glc_grid $GLC_GRID";
+ $sysmod = "$sysmod -cism_phys $CISM_PHYS";
+ system($sysmod) == 0 or die "ERROR cism.buildnml: $sysmod failed: $?\n";
+
+ if (-d ${RUNDIR}) {
+ $sysmod = "cp $CASEBUILD/cismconf/cism_in ${RUNDIR}/cism_in${inst_string}";
+ system($sysmod) == 0 or die "ERROR cism.buildnml: $sysmod failed: $?\n";
+
+ $sysmod = "cp $CASEBUILD/cismconf/cism.config ${RUNDIR}/cism.config${inst_string}";
+ system($sysmod) == 0 or die "ERROR cism.buildnml: $sysmod failed: $?\n";
+ }
+
+ # -----------------------------------------------------
+ # increment instance counter
+ # -----------------------------------------------------
+
+ $inst_counter = $inst_counter + 1;
+}
+
+if ($CISM_USE_TRILINOS eq "TRUE") {
+ my $sourcemod_dir = "$CASEROOT/SourceMods/src.cism";
+ if (-e "${sourcemod_dir}/trilinosOptions.xml") {
+ $sysmod = "cp ${sourcemod_dir}/trilinosOptions.xml ${RUNDIR}";
+ system($sysmod) == 0 or die "ERROR cism.buildnml: $sysmod failed: $?\n";
+ } else {
+ my $trilinos_options_dir = "$CCSMROOT/components/cism/bld/trilinosOptions";
+ my $trilinos_file = "trilinosOptions_${GLC_GRID}.xml";
+ if (-e "${trilinos_options_dir}/${trilinos_file}") {
+ $sysmod = "cp ${trilinos_options_dir}/${trilinos_file} ${RUNDIR}/trilinosOptions.xml";
+ system($sysmod) == 0 or die "ERROR cism.buildnml: $sysmod failed: $?\n";
+ } else {
+ die "ERROR: no trilinosOptions file found in $trilinos_options_dir for GLC_GRID=$GLC_GRID \n";
+ }
+ }
+}
+
+exit (0);
+
diff --git a/components/cism/bld/cism.template b/components/cism/bld/cism.template
new file mode 100755
index 0000000000..21d49e7d82
--- /dev/null
+++ b/components/cism/bld/cism.template
@@ -0,0 +1,81 @@
+#! /usr/bin/env perl
+use strict;
+
+if ($#ARGV == -1) {
+ die " ERROR cism.buildn_usernl: must specify a caseroot input argument";
+}
+my ($CASEROOT) = @ARGV;
+my $sysmod;
+
+my $CCSMROOT = `./xmlquery CCSMROOT -value`;
+my $CASEBUILD = `./xmlquery CASEBUILD -value`;
+
+#-------------------------------------------------------------------------------
+# stage variable definition files and related scripts and templates to
+# $CASEBUILD/cismIOconf, so users can modify easily modify IO fields
+#-------------------------------------------------------------------------------
+
+my $IOCONF_DIR = "$CASEBUILD/cismIOconf";
+my $GLIMMER_CISM_ROOT = "$CCSMROOT/components/cism/glimmer-cism";
+if (! -d "$IOCONF_DIR") {
+ $sysmod = "mkdir $IOCONF_DIR";
+ system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+}
+$sysmod = "cp $CCSMROOT/components/cism/bld/cismIO/README.cismIO $IOCONF_DIR/";
+system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+
+$sysmod = "cp $GLIMMER_CISM_ROOT/utils/build/generate_ncvars.py $IOCONF_DIR/";
+system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+
+$sysmod = "cp $GLIMMER_CISM_ROOT/libglimmer/ncdf_template.F90.in $IOCONF_DIR/";
+system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+
+$sysmod = "cp $GLIMMER_CISM_ROOT/libglide/glide_vars.def $IOCONF_DIR/";
+system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+
+$sysmod = "cp $GLIMMER_CISM_ROOT/libglad/glad_vars.def $IOCONF_DIR/";
+system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+
+$sysmod = "cp $GLIMMER_CISM_ROOT/libglad/glad_mbal_vars.def $IOCONF_DIR/";
+system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+
+# NOTE(wjs, 2015-04-03) glint is no longer used by CESM. However, I'm keeping
+# the glint stuff here for now so that we can keep the glint default i/o files
+# up-to-date (since I use this mechanism to regenerate the default i/o files)
+
+$sysmod = "cp $GLIMMER_CISM_ROOT/libglint/glint_vars.def $IOCONF_DIR/";
+system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+
+$sysmod = "cp $GLIMMER_CISM_ROOT/libglint/glint_mbal_vars.def $IOCONF_DIR/";
+system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+
+#-------------------------------------------------------------------------------
+# create cism.buildIO.csh script to allow users to modify the IO fields
+#-------------------------------------------------------------------------------
+
+my $buildIO_script = "$IOCONF_DIR/cism.buildIO.csh";
+open(file,">$buildIO_script") or die "Could not open file $buildIO_script to write";
+
+my $output = <> $buildIO_script";
+system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+
+$sysmod = "chmod u+x $buildIO_script";
+system($sysmod) == 0 or die "ERROR cism.build_usernl: $sysmod failed: $?\n";
+
+exit(0);
diff --git a/components/cism/bld/cismIO/README.cismIO b/components/cism/bld/cismIO/README.cismIO
new file mode 100644
index 0000000000..2dfccbd5bd
--- /dev/null
+++ b/components/cism/bld/cismIO/README.cismIO
@@ -0,0 +1,16 @@
+This directory and its scripts are intended to allow the user to change
+IO fields from the CISM code. The CISM IO files, *_io.F90, are auto-generated
+and typically difficult to modify. However, the corresponding variable
+definition files, *_vars.def, are easily modified and the IO files can be
+re-generated by running the cism.buildIO.csh script contained in this
+directory.
+
+Usage of this script requires that the user has defined an enviroment variable
+PYTHON pointing to a local version of python, After that, the user simply
+runs the enclosed cism.buildIO.csh script, which runs a python script on each
+variable definition file and stages the CISM IO files in the correct
+SourceMods/src.cism directory.
+
+WARNING: this script has hard-wired paths and is intended for use for a specific
+case. Please do not try to use it elsewhere.
+
diff --git a/components/cism/bld/cismIO/cism.buildIO.template.csh b/components/cism/bld/cismIO/cism.buildIO.template.csh
new file mode 100755
index 0000000000..0ca96420bc
--- /dev/null
+++ b/components/cism/bld/cismIO/cism.buildIO.template.csh
@@ -0,0 +1,52 @@
+# -----------------------------------------------------------------------------
+# NOTE: If you are viewing this script within the bld subdirectory of the cism
+# code directory, please note that this is not a complete script. Instead, it
+# is embedded in a script that is created by cism.cpl7.template (in the parent
+# directory). That is where some variables are defined (cism_confIOdir,
+# sourcemod_dir). This is done because cism.cpl7.template has access to the
+# CASEROOT and CASEBUILD environment variables, whereas this script (which is
+# meant to be run as a standalone script -- NOT part of the cesm build) does
+# not necessarily know the values of these variables.
+#
+# If you are viewing this script from within your CASE directory, then the
+# above note does not apply.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# generate new glimmer _io.F90 files
+# -----------------------------------------------------------------------------
+cd $cism_confIOdir
+
+# NOTE(wjs, 2015-04-03): glint is no longer used by CESM. However, I'm keeping
+# the glint stuff here for now so that we can keep the glint default i/o files
+# up-to-date (since I use this mechanism to regenerate the default i/o files)
+foreach file (glide glad glad_mbal glint glint_mbal)
+ set file_varsdef = ${file}_vars.def
+ set file_ioF90 = ${file}_io.F90
+ if (-f ${file_varsdef}) then
+ # ---------------------------------------------------------------------------
+ # create new _io.F90 file using the glimmer python script
+ # ---------------------------------------------------------------------------
+ $PYTHON generate_ncvars.py $file_varsdef ncdf_template.F90.in
+
+ if (-f ${file_ioF90}) then
+ # ---------------------------------------------------------------------------
+ # compare new _io.F90 file with current version in the objdir (if it exists)
+ # if different, copy the new one to the objdir
+ # ---------------------------------------------------------------------------
+ cp ${file_ioF90} ${sourcemod_dir}/${file_ioF90}
+ else
+ # ---------------------------------------------------------------------------
+ # if new _io.F90 file not created for some reason, exit
+ # ---------------------------------------------------------------------------
+ echo ERROR: glimmer python script failed to produce new file: ${file_ioF90}
+ exit 2
+ endif
+
+ else
+ echo ERROR: missing glimmer variable definition file: ${file_varsdef}
+ exit 2
+ endif
+end
+
+
diff --git a/components/cism/bld/namelist_files/namelist_defaults_cism.xml b/components/cism/bld/namelist_files/namelist_defaults_cism.xml
new file mode 100644
index 0000000000..e9c9416422
--- /dev/null
+++ b/components/cism/bld/namelist_files/namelist_defaults_cism.xml
@@ -0,0 +1,256 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+glc/cism/gland20.input_c150415.nc
+glc/cism/gland10.input_c150415.nc
+glc/cism/gland5.input_c150415.nc
+glc/cism/IceSheetData_UMontana/PresentDayGreenland/Greenland_5km_v1.1_SacksRev_c150415.nc
+
+
+glc/cism/gland20.input_c150415.nc
+
+
+glc/cism/Greenland/glissade/init/greenland_4km.glissade.10kyr.beta6.SSacab_c150415a.nc
+
+
+
+
+
+.false.
+
+ocn
+
+
+
+
+
+'acab artm thk usurf topg uvel vvel uflx vflx temp bmlt bwat'
+
+nyears
+
+1
+
+
+
+
+
+-
+
+never
+
+.false.
+
+
+
+
+
+.false.
+
+0
+
+0.0
+
+0.0
+
+0
+
+
+
+
+
+151
+76
+301
+301
+376
+
+281
+141
+561
+561
+701
+
+11
+
+10000.
+20000.
+5000.
+5000.
+4000.
+
+10000.
+20000.
+5000.
+5000.
+4000.
+
+
+
+
+
+
+
+
+0.00 0.15 0.30 0.45 0.60 0.75 0.83 0.90 0.95 0.98 1.00
+
+
+
+
+
+1
+
+.false.
+
+0
+1
+
+
+
+
+
+"STERE"
+
+90.0
+
+321.0
+
+800000.0
+
+3400000.0
+
+71.0
+
+
+
+
+
+0
+2
+
+1
+
+2
+
+2
+
+0
+
+1
+
+0
+
+0
+
+1
+
+0
+
+0
+3
+
+0
+
+0
+
+
+
+
+
+0.1
+0.1
+0.05
+0.05
+
+
+0.5
+
+0.0083333333333333333
+
+1
+
+
+1.
+
+
+38
+19
+76
+76
+95
+
+114
+57
+228
+228
+285
+
+
+
+
+
+6
+
+100.
+
+-200.
+
+-5.e-2
+
+3.
+1.
+
+1000.
+
+1.e-4
+
+
+
+
+
+
+
+
+5
+4
+4
+4
+4
+
+2
+
+3
+
+1
+
+3
+1
+4
+
+1
+0
+
+
diff --git a/components/cism/bld/namelist_files/namelist_definition_cism.xml b/components/cism/bld/namelist_files/namelist_definition_cism.xml
new file mode 100644
index 0000000000..033ed90a15
--- /dev/null
+++ b/components/cism/bld/namelist_files/namelist_definition_cism.xml
@@ -0,0 +1,1150 @@
+
+
+
+
+
+
+
+
+
+
+
+
+Input file
+Default: For startup runs or hybrid runs with CISM_OBSERVED_IC=TRUE, a resolution-dependent
+ initial conditions file (e.g., gland10.input.nc).
+ For branch/hybrid runs with CISM_OBSERVED_IC=FALSE, a restart file name
+ built based on RUN_REFCASE and RUN_REFDATE.
+
+
+
+
+
+
+
+Name of top-level configuration file for Glimmer Glacier model
+(Determined by scripts -- cannot be set by user)
+
+
+
+Determines whether extra diagnostics are printed in the cism log file
+Default: false
+
+
+
+Code describing how the solid ice runoff flux (i.e., calving) should
+be routed.
+ocn: all solid ice goes to the ocean component
+ice: all solid ice goes to the sea ice component
+Default: ocn
+
+
+
+
+
+
+
+Space-delimited list of variables output to history file
+Default: 'acab artm thk usurf topg uvel vvel uflx vflx temp bmlt bwat'
+
+
+
+How history frequency is specified
+nyears: Write history every N years
+coupler: Get history frequency from coupler (HIST_OPTION/HIST_N xml variables)
+ WARNING: SHOULD NOT BE USED IN PRODUCTION RUNS - frequency metadata not set properly
+Default: nyears
+
+
+
+History frequency
+e.g., if history_option=nyears, then 1 = annual, 2 = every two years, etc.
+Ignored for history_option = 'coupler'
+Default: 1
+
+
+
+
+
+
+
+Simulation identifier (ie case name)
+Default: case name set by create_newcase
+
+
+
+GLC time-step units
+This generally should not be changed
+Valid values: steps_per_year, steps_per_day, seconds, hours
+Default: set based on NCPL_BASE_PERIOD and GLC_NCPL in env_run.xml,
+so that there is one GLC time step per coupling period
+
+
+
+Time step, in units given by dt_option
+This generally should not be changed
+Default: set based on NCPL_BASE_PERIOD and GLC_NCPL in env_run.xml,
+so that there is one GLC time step per coupling period
+
+
+
+Whether leap years are enabled in the GLC time manager.
+CAUTION: Leap years don't work correctly with GLC time steps longer than a few months.
+Default: .false.
+
+
+
+Starting year number
+Default: comes from RUN_STARTDATE or RUN_REFDATE
+
+
+
+Starting month number
+Default: comes from RUN_STARTDATE or RUN_REFDATE
+
+
+
+Starting day number in month
+Default: comes from RUN_STARTDATE or RUN_REFDATE
+
+
+
+Starting hour of the day
+Default: 0
+
+
+
+Starting minute of the day
+Default: 0
+
+
+
+Starting second of the minute
+Default: 0
+
+
+
+Character to separate date values
+Default: '-'
+
+
+
+Stop option -- always let the coupler stop the model so use 'never'.
+Default: 'never'
+
+
+
+
+
+
+
+Whether to enable overrides of the glc fraction sent to the coupler.
+If this is false, the other settings in this namelist group are ignored.
+ONLY MEANT FOR TESTING - SHOULD NOT BE USED FOR SCIENCE RUNS.
+Default: .false.
+
+
+
+Time delay before beginning any overrides (days).
+Default: 0 (start overrides at beginning of run)
+
+
+
+Fractional decrease in glacier area, per day (should be positive).
+(days_elapsed * decrease_frac) determines the elevation threshold below which ice_covered is set to 0.
+When this factor reaches 1, all elevations below 3500 m are set to non-ice-covered.
+Default: 0 (no decrease)
+
+
+
+Fractional increase in glacier area, per day.
+(days_elapsed * increase_frac) determines the elevation threshold above which ice_covered is set to 1.
+When this factor reaches 1, all elevations >= 0 m are set to ice-covered.
+Default: 0 (no increase)
+
+
+
+Frequency (days) at which we rearrange elevation classes.
+Default: 0 (no flips ever)
+
+
+
+
+
+
+
+
+Number of nodes in x-direction
+Default: resolution-dependent
+
+
+
+Number of nodes in y-direction
+Default: resolution-dependent
+
+
+
+Number of nodes in z-direction
+Default: 11
+
+
+
+Node spacing in x-direction (m)
+Default: resolution-dependent
+
+
+
+Node spacing in y-direction (m)
+Default: resolution-dependent
+
+
+
+
+
+
+
+
+
+List of sigma levels, in ascending order, separated by spaces
+These run between 0.0 (at top surface) and 1.0 (at lower surface)
+Only relevant if sigma = 2
+Default: 0.00 0.15 0.30 0.45 0.60 0.75 0.83 0.90 0.95 0.98 1.00
+
+
+
+
+
+
+
+0: Do not let the ice sheet evolve (hold ice state fixed at initial condition)
+1: Let the ice sheet evolve
+Default: 1
+
+
+
+Ice time-step multiplier: allows asynchronous climate-ice coupling
+Default: 1
+
+
+
+If this is set to true, it sets the mass balance timestep to 1 day.
+This means the ice dynamics is called after one day of climate simulation.
+THIS IS ONLY FOR TESTING OF COUPLING PROCEDURES, NOT TO BE USED FOR SCIENCE.
+
+
+
+0: Send true fluxes to the GCM
+1: Zero out all fluxes sent to the GCM
+Default: Depends on GLC_TWO_WAY_COUPLING xml variable
+
+
+
+
+
+
+
+
+
+
+String specifying the map projection type
+Valid values: LAEA, AEA, LCC, STERE
+Default: STERE
+
+
+
+Central latitude (degrees north)
+Default: 90.0
+
+
+
+Central longitude (degrees east)
+Default: 321.0
+
+
+
+False easting (m)
+Default: 800000.0
+
+
+
+False northing (m)
+Default: 3400000.0
+
+
+
+Location of standard parallel(s) (degrees north)
+Up to two standard parallels may be specified (depending on the projection)
+Default: 71.0
+
+
+
+Scale factor; only relevant for the Stereographic projection
+Default: 0.0
+
+
+
+
+
+
+
+
+Which dycore to use
+0: glide dycore (SIA, serial only)
+1: NOT SUPPORTED: glam dycore (HO, FDM, serial or parallel)
+ Note that this option is not allowed within CESM, because it is buggy
+2: glissade dycore (HO, FEM, serial or parallel)
+Default: 0 for cism1, 2 for cism2
+
+
+
+Determines the temperature solution method
+0: isothermal: set column to surface air temperature
+1: prognostic temperature solution
+2: do NOTHING: hold temperatures steady at initial value
+3: prognostic enthalpy solution
+Default: 1
+
+
+
+Temperature initialization method
+0: Initialize temperature to 0 C
+1: Initialize temperature to surface air temperature
+2: Initialize temperature with a linear profile in each column
+Default: 2
+
+
+
+0: constant value, taken from default_flwa
+1: uniform value equal to the Paterson-Budd value at -10 deg C
+2: Paterson-Budd temperature-dependent relationship
+Default: 2
+
+
+
+Determines the treatment of basal water
+0: Set to zero everywhere
+1: Calculated from local water balance
+2: Compute the basal water flux, then find depth via calculation
+3: Set to constant everywhere (10m)
+4: Calculated from till water content, in the basal processes module
+Default: 0
+
+
+
+0: ignore marine margin
+1: set thickness to zero if floating
+2: lose a specified fraction of floating ice
+3: set thickness to zero if relaxed bedrock is below a given depth (marine_limit)
+4: set thickness to zero if current bedrock is below a given depth (marine_limit)
+5: Huybrechts grounding line scheme for Greenland initialization
+Default: 1
+
+
+
+Basal traction parameter
+0: no basal sliding
+1: constant basal traction coefficient
+2: constant coefficient where basal water is present, else no sliding
+3: constant coefficient where the basal temperature is at the pressure melting point, else no sliding
+4: coefficient is proportional to basal melt rate
+5: coefficient is a linear function of basal water depth
+Default: 0
+
+
+
+0: pseudo-diffusion
+1: ADI scheme [CANNOT BE USED: RESTARTS ARE NOT EXACT]
+2: diffusion
+3: remap thickness
+4: 1st order upwind
+5: no thickness evolution
+Default: 0 for cism1, 3 for cism2
+
+
+
+Method of integration used to obtain vertical velocity
+0: standard vertical integration
+1: vertical integration constrained to obey an upper kinematic boundary condition
+Default: 0
+
+
+
+0: relaxed topography is read from a separate variable
+1: first time slice of input topography is assumed to be relaxed
+2: first time slice of input topography is assumed to be in isostatic equilibrium with ice thickness
+Default: 0
+
+
+
+0: not in continuity equation
+1: in continuity equation
+Default: 1
+
+
+
+0: prescribed uniform geothermal heat flux
+1: read 2D geothermal flux field from input file (if present)
+2: calculate geothermal flux using 3d diffusion
+Default: 0
+
+
+
+0: no isostasy
+1: compute isostasy
+Default: 0
+
+
+
+How to determine sigma values
+0: compute standard Glimmer sigma coordinates
+1: sigma coordinates are given in external file [NOT ALLOWED WHEN RUNNING CISM IN CESM]
+2: read sigma coordinates from config file (from sigma_levels)
+3: evenly spaced levels, as required for glam dycore
+4: compute Pattyn sigma coordinates
+Default: 0
+
+
+
+0: no periodic EW boundary conditions
+1: periodic EW boundary conditions
+(This is a Glimmer serial option. The parallel code enforces periodic
+EW and NS boundary conditions by default.)
+Default: 0
+
+
+
+Hotstart (restart) the model if set to 1.
+This allows for exact restarts from previous initial conditions
+Default: 0 for startup or hybrid with CISM_OBSERVED_IC=TRUE, 1 for hybrid/branch with CISM_OBSERVED_IC=FALSE
+
+
+
+
+
+
+
+Ice sheet timestep (years)
+Must translate into an integer number of hours
+Default: Depends on resolution and physics option
+
+
+
+Multiplier of ice sheet timestep, dt
+(in theory, can be real-valued, but values less than 1 are not handled properly, so restricted to being an integer)
+Default: 1
+
+
+
+Subcycling for glissade
+Default: 1
+
+
+
+Profile period (number of time steps between profiles)
+Default: 100
+
+
+
+Diagnostic frequency (years)
+Set to 0 for no diagnostic output; set to dt for diagnostic output every time step
+Default: 1
+
+
+
+x coordinate of point for diagnostic output
+Default: resolution-dependent
+
+
+
+y coordinate of point for diagnostic output
+Default: resolution-dependent
+
+
+
+
+
+
+
+Set to a value between 0 (no messages) and 6 (all messages)
+Default: 6
+
+
+
+Thickness below which ice dynamics is ignored (m)
+Below this limit, ice is only accumulated
+Default: 100.
+
+
+
+All ice is assumed lost once water depths reach this value (for marine_margin=2 or 4) (m)
+Note that water depth is negative
+Default: -200.
+
+
+
+Fraction of ice lost to calving
+Default: (use hard-coded default)
+
+
+
+Constant geothermal heat flux (W m-2; sign convention is positive down)
+(May be overwritten by a spatially-varying field in input file [bheatflx])
+Default: -0.05
+
+
+
+The flow law is enhanced with this factor.
+The greater the value, the lower the viscosity and the faster the ice will flow.
+Default: 3.0 for cism1, 1.0 for cism2
+
+
+
+Glen's A to use in isothermal case
+Default: (use hard-coded default)
+
+
+
+Time scale for basal water to drain (yr-1)
+(Not relevant for basal_water=2)
+Default: 1000.
+
+
+
+(m yr-1 Pa-1)
+Default: 1.e-4
+
+
+
+(m yr-1 Pa-1)
+(Only used for slip_coeff = BTRC_LINEAR_BMLT)
+Default: (use hard-coded default)
+
+
+
+(Pa-1)
+(Only used for slip_coeff = BTRC_LINEAR_BMLT)
+Default: (use hard-coded default)
+
+
+
+5-element list of values
+(Only used for slip_coeff = BTRC_TANH_BWAT)
+Default: (use hard-coded default)
+
+
+
+Optional periodic offset for ismip-hom and similar tests
+May be needed to ensure continuous ice geometry at the edges of the
+global domain
+Default: 0.
+
+
+
+Optional periodic offset for ismip-hom and similar tests
+May be needed to ensure continuous ice geometry at the edges of the
+global domain
+Default: 0.
+
+
+
+
+
+
+
+
+
+Basal boundary condition for Payne/Price dynamical core
+0: constant value of 10 Pa/yr (useful for debugging)
+1: simple hard-coded pattern (useful for debugging)
+2: treat betasquared value as a till yield stress (in Pa) using Picard iteration
+3: linear (inverse) function of bwat
+4: very large value for betasquared to enforce no slip everywhere
+5: betasquared field passed in from .nc input file as part of standard i/o
+6: no slip everywhere (using Dirichlet BC rather than large betasquared)
+7: treat betasquared value as till yield stress (in Pa) using Newton-type iteration (in devel.)
+Default: 5
+
+
+
+How effective viscosity is computed for higher-order dynamical core
+0: constant value
+1: multiple of flow factor
+2: compute from effective strain rate
+Default: 2
+
+
+
+Method for computing residual in Payne/Price dynamical core
+0: maxval
+1: maxval ignoring basal velocity
+2: mean value
+3: L2 norm of system residual, Ax-b=resid
+Default: 3
+
+
+
+Method for computing the dissipation during the temperature calculation
+-1: no dissipation
+0: 0-order SIA approx.
+1: 1st order solution (e.g., Blatter-Pattyn)
+Default: 1
+
+
+
+Method for solving the sparse linear system that arises from the higher-order solver
+0: Biconjugate gradient, incomplete LU preconditioner
+1: GMRES, incomplete LU preconditioner
+2: Conjugate gradient, incomplete LU preconditioner
+3: Conjugate gradient, structured grid, parallel-enabled
+4: standalone interface to Trilinos
+Default: 3
+
+
+
+Method for solving the nonlinear iteration when solving the first-order momentum balance
+0: use the standard Picard iteration
+1: use Jacobian Free Newton Krylov (JFNK) method
+Default: 0
+
+
+
+How to compute the gradient at the ice margin in the glissade dycore.
+Not valid for other dycores.
+0: Use info from all neighbor cells, ice-covered or ice-free
+1: Use info from ice-covered and/or land cells, not ice-free ocean
+2: Use info from ice-covered cells only
+Default: (use hard-coded default)
+
+
+
+Flag that indicates which Stokes approximation to use with the glissade dycore.
+Not valid for other dycores.
+-1: Shallow-ice approximation, Glide-type calculation (uses glissade_velo_sia)
+0: Shallow-ice approximation, vertical-shear stresses only (uses glissade_velo_higher)
+1: Shallow-shelf approximation, horizontal-plane stresses only (uses glissade_velo_higher)
+2: Blatter-Pattyn approximation with both vertical-shear and horizontal-plane stresses (uses glissade_velo_higher)
+3: Vertically integrated 'L1L2' approximation with vertical-shear and horizontal-plane stresses (uses glissade_velo_higher)
+Default: (use hard-coded default)
+
+
+
+
+
+
+
+
+
+1: 1D calculations
+3: 3D calculations
+Only relevant if gthf = 2
+Default: 1
+
+
+
+Number of vertical layers
+Only relevant if gthf = 2
+Default: 20
+
+
+
+Initial surface temperature (degrees C)
+Only relevant if gthf = 2
+Default: (use hard-coded default)
+
+
+
+Depth below sea-level at which geothermal heat gradient is applied (m)
+Only relevant if gthf = 2
+Default: (use hard-coded default)
+
+
+
+Number of time steps for spinning up GTHF calculations
+Only relevant if gthf = 2
+Default: 0
+
+
+
+Density of lithosphere (kg m-3)
+Only relevant if gthf = 2
+Default: (use hard-coded default)
+
+
+
+Specific heat capacity of lithosphere (J kg-1 K-1)
+Only relevant if gthf = 2
+Default: (use hard-coded default)
+
+
+
+thermal conductivity of lithosphere (W m-1 K-1)
+Only relevant if gthf = 2
+Default: (use hard-coded default)
+
+
+
+
+
+
+
+
+
+
+0: local lithosphere, equilibrium bedrock depression is found using Archimedes' principle
+1: elastic lithosphere, flexural rigidity is taken into account
+Only relevant if isostasy = 1
+Default: 0
+
+
+
+0: fluid mantle, isostatic adjustment happens instantaneously
+1: relaxing mantle, mantle is approximated by a half-space
+Only relevant if isostasy = 1
+Default: 0
+
+
+
+Characteristic time constant of relaxing mantle (years)
+Only relevant if isostasy = 1
+Default: (use hard-coded default)
+
+
+
+Lithosphere update period (years)
+Only relevant if isostasy = 1
+Default: (use hard-coded default)
+
+
+
+Flexural rigidity of the lithosphere
+Only relevant if 'lithosphere' is set to 1
+Default: (use hard-coded default)
+
+
+
+
+
diff --git a/components/cism/bld/trilinosOptions/README b/components/cism/bld/trilinosOptions/README
new file mode 100644
index 0000000000..f18193b040
--- /dev/null
+++ b/components/cism/bld/trilinosOptions/README
@@ -0,0 +1,6 @@
+This directory contains resolution-dependent trilinosOptions xml
+files; one of these is copied to the CESM run directory by
+cism.buildnml, depending on the value of GLC_GRID. However, if
+there is a trilinosOptions.xml file in SourceMods/src.cism, that file
+is used instead.
+
diff --git a/components/cism/bld/trilinosOptions/trilinosOptions_gland10.xml b/components/cism/bld/trilinosOptions/trilinosOptions_gland10.xml
new file mode 100755
index 0000000000..9a1d2f3751
--- /dev/null
+++ b/components/cism/bld/trilinosOptions/trilinosOptions_gland10.xml
@@ -0,0 +1,107 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/components/cism/bld/trilinosOptions/trilinosOptions_gland20.xml b/components/cism/bld/trilinosOptions/trilinosOptions_gland20.xml
new file mode 100755
index 0000000000..9a1d2f3751
--- /dev/null
+++ b/components/cism/bld/trilinosOptions/trilinosOptions_gland20.xml
@@ -0,0 +1,107 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/components/cism/bld/trilinosOptions/trilinosOptions_gland4.xml b/components/cism/bld/trilinosOptions/trilinosOptions_gland4.xml
new file mode 100755
index 0000000000..9a1d2f3751
--- /dev/null
+++ b/components/cism/bld/trilinosOptions/trilinosOptions_gland4.xml
@@ -0,0 +1,107 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/components/cism/bld/trilinosOptions/trilinosOptions_gland5.xml b/components/cism/bld/trilinosOptions/trilinosOptions_gland5.xml
new file mode 100755
index 0000000000..9a1d2f3751
--- /dev/null
+++ b/components/cism/bld/trilinosOptions/trilinosOptions_gland5.xml
@@ -0,0 +1,107 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/components/cism/bld/trilinosOptions/trilinosOptions_gland5UM.xml b/components/cism/bld/trilinosOptions/trilinosOptions_gland5UM.xml
new file mode 100755
index 0000000000..9a1d2f3751
--- /dev/null
+++ b/components/cism/bld/trilinosOptions/trilinosOptions_gland5UM.xml
@@ -0,0 +1,107 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/components/cism/bld/user_nl_cism b/components/cism/bld/user_nl_cism
new file mode 100644
index 0000000000..99097f72e2
--- /dev/null
+++ b/components/cism/bld/user_nl_cism
@@ -0,0 +1,21 @@
+!-----------------------------------------------------------------------
+! Users should ONLY USE user_nl_cism to change namelist variables for
+! any of the namelists in cism_in and the namelist-like sections in
+! cism.config.
+!
+! Users should add ALL user specific namelist changes below using the
+! following syntax:
+!
+! namelist_var = new_namelist_value
+!
+! Note that there is no distinction between variables that will appear
+! in cism_in and those that will appear in cism.config: simply add a new
+! variable setting here, and it will be added to the appropriate place
+! in cism_in or cism.config.
+!
+! For example to change the value of evolution to 0, add the following
+! below:
+!
+! evolution = 0
+!-----------------------------------------------------------------------
+
diff --git a/components/cism/cimetest/testlist_cism.xml b/components/cism/cimetest/testlist_cism.xml
new file mode 100644
index 0000000000..93b29775a0
--- /dev/null
+++ b/components/cism/cimetest/testlist_cism.xml
@@ -0,0 +1,134 @@
+
+
+
+
+
+ yellowstone
+
+
+
+
+
+
+ yellowstone
+
+
+ yellowstone
+
+
+
+
+ yellowstone
+
+
+
+
+
+
+ yellowstone
+ yellowstone
+
+
+
+
+
+
+ yellowstone
+
+
+
+
+ yellowstone
+ yellowstone
+
+
+ yellowstone
+ yellowstone
+
+
+
+
+
+
+ yellowstone
+
+
+ yellowstone
+ yellowstone
+
+
+ yellowstone
+
+
+
+
+
+
+ yellowstone
+
+
+ yellowstone
+
+
+
+
+
+
+ yellowstone
+
+
+ yellowstone
+
+
+
+
+
+
+ edison
+ yellowstone
+
+
+ yellowstone
+
+
+
+
+
+
+ yellowstone
+
+
+ titan
+ yellowstone
+ yellowstone
+ yellowstone
+
+
+
+
+ hopper
+ yellowstone
+ yellowstone
+
+
+
+
+
+
+ hobart
+ yellowstone
+
+
+
+
+
+
+ yellowstone
+
+
+ hobart
+ yellowstone
+
+
+
+
diff --git a/components/cism/cimetest/testmods_dirs/cism/apply_to_multiinstance/README b/components/cism/cimetest/testmods_dirs/cism/apply_to_multiinstance/README
new file mode 100644
index 0000000000..c012faea8b
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/apply_to_multiinstance/README
@@ -0,0 +1,3 @@
+This testmods directory should not be used directly. Instead, it can be included
+by any testmods that wants to apply its user_nl changes to multi-instance as
+well as single-intance tests.
diff --git a/components/cism/cimetest/testmods_dirs/cism/apply_to_multiinstance/shell_commands b/components/cism/cimetest/testmods_dirs/cism/apply_to_multiinstance/shell_commands
new file mode 100644
index 0000000000..ad5adf8ae3
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/apply_to_multiinstance/shell_commands
@@ -0,0 +1,11 @@
+# Apply these testmods to multi-instance tests, too (assuming there are only two instances)
+#
+# Doing this copy rather than explicitly including user_nl_cism_0001, etc. is
+# preferable both to avoid duplication and also so that the FINAL version of
+# user_nl_cism is copied in the case that there is another testmods directory
+# that includes this one.
+#
+# Ideally, these copies would be done automatically when applying testmods in
+# create_newcase.
+cp user_nl_cism user_nl_cism_0001
+cp user_nl_cism user_nl_cism_0002
diff --git a/components/cism/cimetest/testmods_dirs/cism/oneway/README b/components/cism/cimetest/testmods_dirs/cism/oneway/README
new file mode 100644
index 0000000000..4a6807ea65
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/oneway/README
@@ -0,0 +1 @@
+This set of testmods turns off two-way coupling
diff --git a/components/cism/cimetest/testmods_dirs/cism/oneway/xmlchange_cmnds b/components/cism/cimetest/testmods_dirs/cism/oneway/xmlchange_cmnds
new file mode 100644
index 0000000000..404b22b006
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/oneway/xmlchange_cmnds
@@ -0,0 +1 @@
+./xmlchange GLC_TWO_WAY_COUPLING=FALSE
diff --git a/components/cism/cimetest/testmods_dirs/cism/override_glc_frac/include_user_mods b/components/cism/cimetest/testmods_dirs/cism/override_glc_frac/include_user_mods
new file mode 100644
index 0000000000..1c29cfcb00
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/override_glc_frac/include_user_mods
@@ -0,0 +1 @@
+../apply_to_multiinstance
diff --git a/components/cism/cimetest/testmods_dirs/cism/override_glc_frac/user_nl_cism b/components/cism/cimetest/testmods_dirs/cism/override_glc_frac/user_nl_cism
new file mode 100644
index 0000000000..c08fd0bbc0
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/override_glc_frac/user_nl_cism
@@ -0,0 +1,7 @@
+! Turn on all overrides of the glc fraction
+
+enable_frac_overrides = .true.
+override_delay = 1
+decrease_frac = 0.05
+increase_frac = 0.05
+rearrange_freq = 3
diff --git a/components/cism/cimetest/testmods_dirs/cism/test_coupling/include_user_mods b/components/cism/cimetest/testmods_dirs/cism/test_coupling/include_user_mods
new file mode 100644
index 0000000000..1c29cfcb00
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/test_coupling/include_user_mods
@@ -0,0 +1 @@
+../apply_to_multiinstance
diff --git a/components/cism/cimetest/testmods_dirs/cism/test_coupling/user_nl_cism b/components/cism/cimetest/testmods_dirs/cism/test_coupling/user_nl_cism
new file mode 100644
index 0000000000..1e65ec4b98
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/test_coupling/user_nl_cism
@@ -0,0 +1,7 @@
+! This option changes the ice sheet dynamics time step to 1 day rather than 1 year
+! Thus, the ice sheet dynamics can be exercised in a few-day run
+test_coupling = .true.
+
+! This is needed to give CISM history output in the (typically short)
+! tests that are done with this testmod directory
+history_option = 'coupler'
diff --git a/components/cism/cimetest/testmods_dirs/cism/trilinos/README b/components/cism/cimetest/testmods_dirs/cism/trilinos/README
new file mode 100644
index 0000000000..e8fe16fd42
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/trilinos/README
@@ -0,0 +1,5 @@
+This testmods directory tests the use of the trilinos solver.
+
+Note: We also want to allow the case where cism is built using
+CISM_USE_TRILINOS=TRUE, but trilinos isn't actually chosen at runtime. However,
+for now I don't feel it's worth actually having a test of that combination.
diff --git a/components/cism/cimetest/testmods_dirs/cism/trilinos/include_user_mods b/components/cism/cimetest/testmods_dirs/cism/trilinos/include_user_mods
new file mode 100644
index 0000000000..1c29cfcb00
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/trilinos/include_user_mods
@@ -0,0 +1 @@
+../apply_to_multiinstance
diff --git a/components/cism/cimetest/testmods_dirs/cism/trilinos/shell_commands b/components/cism/cimetest/testmods_dirs/cism/trilinos/shell_commands
new file mode 100755
index 0000000000..de22fb6073
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/trilinos/shell_commands
@@ -0,0 +1 @@
+./xmlchange CISM_USE_TRILINOS=TRUE
diff --git a/components/cism/cimetest/testmods_dirs/cism/trilinos/user_nl_cism b/components/cism/cimetest/testmods_dirs/cism/trilinos/user_nl_cism
new file mode 100644
index 0000000000..7b6d595280
--- /dev/null
+++ b/components/cism/cimetest/testmods_dirs/cism/trilinos/user_nl_cism
@@ -0,0 +1,2 @@
+which_ho_sparse = 4
+
diff --git a/components/cism/drivers/cpl/glc_comp_esmf.F90 b/components/cism/drivers/cpl/glc_comp_esmf.F90
new file mode 100644
index 0000000000..0e5523e13b
--- /dev/null
+++ b/components/cism/drivers/cpl/glc_comp_esmf.F90
@@ -0,0 +1,668 @@
+module glc_comp_esmf
+
+#ifdef ESMF_INTERFACE
+! !USES:
+
+ use shr_sys_mod
+ use shr_kind_mod, only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8
+ use shr_kind_mod, only: CS=>SHR_KIND_CS, CL=>SHR_KIND_CL
+ use shr_file_mod, only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel
+ use shr_file_mod, only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio
+ use shr_file_mod, only: shr_file_freeunit
+
+ use esmf
+ use esmfshr_mod
+
+ use seq_infodata_mod, only: seq_infodata_start_type_start, seq_infodata_start_type_cont
+ use seq_infodata_mod, only: seq_infodata_start_type_brnch
+ use seq_timemgr_mod
+
+ use glc_import_export
+ use glc_cpl_indices
+ use glc_constants, only: verbose, stdout, stderr, nml_in, radius
+ use glc_errormod, only: glc_success
+ use glc_InitMod, only: glc_initialize
+ use glc_RunMod, only: glc_run
+ use glc_FinalMod, only: glc_final
+ use glc_io, only: glc_io_write_restart
+ use glc_communicate, only: init_communicate, my_task, master_task
+ use glc_time_management, only: iyear,imonth,iday,ihour,iminute,isecond,runtype
+ use glc_fields, only: ice_sheet
+
+ implicit none
+ SAVE
+ private ! By default make data private
+
+ !--------------------------------------------------------------------------
+ ! Public interfaces
+ !--------------------------------------------------------------------------
+
+ public :: glc_register_esmf
+ public :: glc_init_esmf
+ public :: glc_run_esmf
+ public :: glc_final_esmf
+
+ !--------------------------------------------------------------------------
+ ! Private interfaces
+ !--------------------------------------------------------------------------
+
+ private :: glc_distgrid_esmf
+ private :: glc_domain_esmf
+
+ !--------------------------------------------------------------------------
+ ! Private module data interfaces
+ !--------------------------------------------------------------------------
+
+ !--- stdin input stuff ---
+ character(CS) :: str ! cpp defined model name
+
+ !--- other ---
+ integer(IN) :: errorcode ! glc error code
+
+ ! my_task_local and master_task_local are needed for some checks that are done before
+ ! init_communicate is called (although, it's possible that init_communicate could be
+ ! moved to earlier to prevent the need for these copies)
+ integer(IN) :: my_task_local ! my task in mpi communicator mpicom
+ integer(IN) :: master_task_local=0 ! task number of master task
+
+
+!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+CONTAINS
+!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+!====================================================================================
+
+ subroutine glc_register_esmf(comp, rc)
+ implicit none
+ type(ESMF_GridComp) :: comp
+ integer, intent(out) :: rc
+
+ rc = ESMF_SUCCESS
+ print *, "In glc register routine"
+ ! Register the callback routines.
+
+ call ESMF_GridCompSetEntryPoint(comp, ESMF_METHOD_INITIALIZE, &
+ glc_init_esmf, phase=1, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_GridCompSetEntryPoint(comp, ESMF_METHOD_RUN, &
+ glc_run_esmf, phase=1, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_GridCompSetEntryPoint(comp, ESMF_METHOD_FINALIZE, &
+ glc_final_esmf, phase=1, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ end subroutine glc_register_esmf
+
+!====================================================================================
+
+ subroutine glc_init_esmf(comp, import_state, export_state, EClock, rc)
+
+ use glc_ensemble , only : set_inst_vars, write_inst_vars, get_inst_name
+ use glc_files , only : set_filenames, ionml_filename
+ use glc_coupling_flags , only : has_ocn_coupling, has_ice_coupling
+ use glc_indexing_info , only : nx_tot, ny_tot, npts_tot
+
+ !-----------------------------------------------------------------------
+ ! !DESCRIPTION:
+ ! Initialize glc model
+ !
+ ! !INPUT/OUTPUT PARAMETERS:
+ implicit none
+ type(ESMF_GridComp) :: comp
+ type(ESMF_State) :: import_state
+ type(ESMF_State) :: export_state
+ type(ESMF_Clock) :: EClock
+ integer, intent(out) :: rc
+ !
+ ! !LOCAL VARIABLES:
+ type(ESMF_DistGrid) :: distgrid
+ type(ESMF_Array) :: dom, g2x, x2g
+ type(ESMF_VM) :: vm
+ integer(IN) :: ierr
+ integer(IN) :: i,j,n
+ integer(IN) :: shrlogunit, shrloglev
+ character(CL) :: starttype
+ real(R8), pointer :: fptr(:,:)
+ integer :: mpicom_loc, mpicom_vm
+ character(ESMF_MAXSTR) :: convCIM, purpComp
+ integer(IN) :: COMPID
+ character(CS) :: myModelName
+
+ !--- formats ---
+ character(*), parameter :: F00 = "('(glc_init_esmf) ',8a)"
+ character(*), parameter :: F01 = "('(glc_init_esmf) ',a,8i8)"
+ character(*), parameter :: F02 = "('(glc_init_esmf) ',a,4es13.6)"
+ character(*), parameter :: F03 = "('(glc_init_esmf) ',a,i8,a)"
+ character(*), parameter :: F90 = "('(glc_init_esmf) ',73('='))"
+ character(*), parameter :: F91 = "('(glc_init_esmf) ',73('-'))"
+ character(*), parameter :: subName = "(glc_init_esmf) "
+ !-----------------------------------------------------------------------
+
+ ! Determine attribute vector indices
+
+ call glc_cpl_indices_set()
+
+ rc = ESMF_SUCCESS
+
+ ! duplicate the mpi communicator from the current VM
+ call ESMF_VMGetCurrent(vm, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_VMGet(vm, mpiCommunicator=mpicom_vm, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call MPI_Comm_dup(mpicom_vm, mpicom_loc, rc)
+ if(rc /= 0) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ ! Get id of this task
+ call MPI_Comm_rank(mpicom_loc, my_task_local, ierr)
+
+ ! Initialize glc id
+
+ call ESMF_AttributeGet(export_state, name="ID", value=COMPID, rc=rc)
+ if (rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ !---------------------------------------------------------------------------
+ ! set variables that depend on ensemble index
+ !---------------------------------------------------------------------------
+
+ call set_inst_vars(COMPID)
+ call get_inst_name(myModelName)
+ call set_filenames()
+
+ !---------------------------------------------------------------------------
+ ! determine type of run
+ !---------------------------------------------------------------------------
+
+ call ESMF_AttributeGet(export_state, name="start_type", value=starttype, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ if ( trim(starttype) == trim(seq_infodata_start_type_start)) then
+ runtype = "initial"
+ else if (trim(starttype) == trim(seq_infodata_start_type_cont) ) then
+ runtype = "continue"
+ else if (trim(starttype) == trim(seq_infodata_start_type_brnch)) then
+ runtype = "branch"
+ else
+ write(*,*) 'glc_comp_esmf ERROR: unknown starttype'
+ call shr_sys_abort()
+ end if
+
+ !----------------------------------------------------------------------------
+ ! Initialize glc
+ !----------------------------------------------------------------------------
+
+ if (my_task_local == master_task_local) then
+ stdout = shr_file_getUnit()
+ call shr_file_setIO(ionml_filename,stdout)
+ else
+ stdout = 6
+ endif
+ stderr = stdout
+ nml_in = shr_file_getUnit()
+
+ call shr_file_getLogUnit (shrlogunit)
+ call shr_file_getLogLevel(shrloglev)
+ call shr_file_setLogUnit (stdout)
+
+ errorCode = glc_Success
+ if (verbose .and. my_task_local == master_task_local) then
+ write(stdout,F00) ' Starting'
+ write(stdout,*) subname, 'COMPID: ', COMPID
+ call write_inst_vars
+ call shr_sys_flush(stdout)
+ endif
+ call init_communicate(mpicom_loc)
+
+ call glc_initialize(errorCode)
+
+ if (verbose .and. my_task == master_task) then
+ write(stdout,F01) ' GLC Initial Date ',iyear,imonth,iday,ihour,iminute,isecond
+ write(stdout,F01) ' Initialize Done', errorCode
+ call shr_sys_flush(stdout)
+ endif
+
+ !---------------------------------------------------------------------------
+ ! Initialize distgrids, domains, and arrays
+ !---------------------------------------------------------------------------
+
+ ! Initialize glc distgrid
+
+ distgrid = glc_distgrid_esmf(rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_AttributeSet(export_state, name="gsize", value=npts_tot, rc=rc)
+ if (rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ ! Initialize glc domain (needs glc initialization info)
+
+ dom = mct2esmf_init(distgrid, attname=seq_flds_dom_fields, name="domain", rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call glc_domain_esmf(dom, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ ! Inialize input/output arrays
+
+ g2x = mct2esmf_init(distgrid, attname=seq_flds_g2x_fields, name="d2x", rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ x2g = mct2esmf_init(distgrid, attname=seq_flds_x2g_fields, name="x2d", rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_StateAdd(export_state, (/dom/), rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_StateAdd(export_state, (/g2x/), rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_StateAdd(import_state, (/x2g/), rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ !---------------------------------------------------------------------------
+ ! send initial state to driver
+ !---------------------------------------------------------------------------
+
+ call ESMF_ArrayGet(g2x, localDe=0, farrayPtr=fptr, rc=rc)
+ if (rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call glc_export(fptr)
+
+ call ESMF_AttributeSet(export_state, name="glc_present", value=.true., rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_AttributeSet(export_state, name="glclnd_present", value=.true., rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_AttributeSet(export_state, name="glcocn_present", &
+ value=has_ocn_coupling(), rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_AttributeSet(export_state, name="glcice_present", &
+ value=has_ice_coupling(), rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_AttributeSet(export_state, name="glc_prognostic", value=.true., rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_AttributeSet(export_state, name="glc_nx", value=nx_tot, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_AttributeSet(export_state, name="glc_ny", value=ny_tot, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+#ifdef USE_ESMF_METADATA
+ convCIM = "CIM"
+ purpComp = "Model Component Simulation Description"
+
+ call ESMF_AttributeAdd(comp, &
+ convention=convCIM, purpose=purpComp, rc=rc)
+
+ call ESMF_AttributeSet(comp, "ShortName", "GLC", &
+ convention=convCIM, purpose=purpComp, rc=rc)
+
+ call ESMF_AttributeSet(comp, "LongName", &
+ "TBD", &
+ convention=convCIM, purpose=purpComp, rc=rc)
+
+ call ESMF_AttributeSet(comp, "Description", &
+ "TBD", &
+
+ convention=convCIM, purpose=purpComp, rc=rc)
+ call ESMF_AttributeSet(comp, "ReleaseDate", "2010", &
+ convention=convCIM, purpose=purpComp, rc=rc)
+
+ call ESMF_AttributeSet(comp, "ModelType", "GlC", &
+ convention=convCIM, purpose=purpComp, rc=rc)
+
+ ! call ESMF_AttributeSet(comp, "Name", "someone", &
+ ! convention=convCIM, purpose=purpComp, rc=rc)
+ ! call ESMF_AttributeSet(comp, "EmailAddress", &
+ ! "someone@someplace", &
+ ! convention=convCIM, purpose=purpComp, rc=rc)
+ ! call ESMF_AttributeSet(comp, "ResponsiblePartyRole", "contact", &
+ ! convention=convCIM, purpose=purpComp, rc=rc)
+#endif
+
+ if (my_task == master_task) then
+ write(stdout,F91)
+ write(stdout,F00) trim(myModelName),': start of main integration loop'
+ write(stdout,F91)
+ end if
+
+ !----------------------------------------------------------------------------
+ ! Reset shr logging to original values
+ !----------------------------------------------------------------------------
+
+ call shr_file_setLogUnit (shrlogunit)
+ call shr_file_setLogLevel(shrloglev)
+
+ end subroutine glc_init_esmf
+
+!====================================================================================
+
+ subroutine glc_run_esmf(comp, import_state, export_state, EClock, rc)
+
+ !---------------------------------------------------------------------------
+ ! !DESCRIPTION:
+ ! Run GLC
+ !
+ ! !ARGUMENTS:
+ implicit none
+ type(ESMF_GridComp) :: comp
+ type(ESMF_State) :: import_state
+ type(ESMF_State) :: export_state
+ type(ESMF_Clock) :: EClock
+ integer, intent(out) :: rc
+ !
+ ! !LOCAL VARIABLES:
+ integer(IN) :: cesmYMD ! cesm model date
+ integer(IN) :: cesmTOD ! cesm model sec
+ integer(IN) :: glcYMD ! glc model date
+ integer(IN) :: glcTOD ! glc model sec
+ logical :: stop_alarm ! is it time to stop
+ logical :: rest_alarm ! is it time to write a restart
+ logical :: done ! time loop logical
+ integer(IN) :: shrlogunit, shrloglev
+ real(R8), pointer :: fptr(:,:)
+ type(ESMF_Array) :: x2g, g2x
+ character(*), parameter :: F00 = "('(glc_run_esmf) ',8a)"
+ character(*), parameter :: F01 = "('(glc_run_esmf) ',a,8i8)"
+ character(*), parameter :: F04 = "('(glc_run_esmf) ',2a,2i8,'s')"
+ character(*), parameter :: subName = "(glc_run_esmf) "
+ !---------------------------------------------------------------------------
+
+ ! Reset shr logging to my log file
+
+ rc = ESMF_SUCCESS
+
+ call shr_file_getLogUnit (shrlogunit)
+ call shr_file_getLogLevel(shrloglev)
+ call shr_file_setLogUnit (stdout)
+
+ ! Set internal time info
+
+ errorCode = glc_Success
+ call seq_timemgr_EClockGetData(EClock,curr_ymd=cesmYMD, curr_tod=cesmTOD)
+ stop_alarm = seq_timemgr_StopAlarmIsOn( EClock )
+
+ glcYMD = iyear*10000 + imonth*100 + iday
+ glcTOD = ihour*3600 + iminute*60 + isecond
+ done = .false.
+ if (glcYMD == cesmYMD .and. glcTOD == cesmTOD) done = .true.
+ if (verbose .and. my_task == master_task) then
+ write(stdout,F01) ' Run Starting ',glcYMD,glcTOD
+ call shr_sys_flush(stdout)
+ endif
+
+ ! Unpack import state
+
+ call ESMF_StateGet(import_state, itemName="x2d", array=x2g, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_ArrayGet(x2g, localDe=0, farrayPtr=fptr, rc=rc)
+ if (rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call glc_import(fptr)
+
+ ! Run
+
+ do while (.not. done)
+ if (glcYMD > cesmYMD .or. (glcYMD == cesmYMD .and. glcTOD > cesmTOD)) then
+ write(stdout,*) subname,' ERROR overshot coupling time ',glcYMD,glcTOD,cesmYMD,cesmTOD
+ call shr_sys_abort('glc error overshot time')
+ endif
+
+ call glc_run(EClock)
+
+ glcYMD = iyear*10000 + imonth*100 + iday
+ glcTOD = ihour*3600 + iminute*60 + isecond
+ if (glcYMD == cesmYMD .and. glcTOD == cesmTOD) done = .true.
+ if (verbose .and. my_task == master_task) then
+ write(stdout,F01) ' GLC Date ',glcYMD,glcTOD
+ endif
+ enddo
+
+ if (verbose .and. my_task == master_task) then
+ write(stdout,F01) ' Run Done',glcYMD,glcTOD
+ call shr_sys_flush(stdout)
+ endif
+
+ ! Pack export state
+
+ call ESMF_StateGet(export_state, itemName="d2x", array=g2x, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call ESMF_ArrayGet(g2x, localDe=0, farrayPtr=fptr, rc=rc)
+ if (rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call glc_export(fptr)
+
+ ! Log output for model date
+
+ if (my_task == master_task) then
+ call seq_timemgr_EClockGetData(EClock,curr_ymd=cesmYMD, curr_tod=cesmTOD)
+ write(stdout,F01) ' CESM Date ', cesmYMD,cesmTOD
+ glcYMD = iyear*10000 + imonth*100 + iday
+ glcTOD = ihour*3600 + iminute*60 + isecond
+ write(stdout,F01) ' GLC Date ',glcYMD,glcTOD
+ call shr_sys_flush(stdout)
+ end if
+
+ ! If time to write restart, do so
+
+ rest_alarm = seq_timemgr_RestartAlarmIsOn( EClock )
+ if (rest_alarm) then
+ ! TODO loop over instances
+ call glc_io_write_restart(ice_sheet%instances(1), EClock)
+ endif
+
+ ! Reset shr logging to original values
+
+ call shr_file_setLogUnit (shrlogunit)
+ call shr_file_setLogLevel(shrloglev)
+ call shr_sys_flush(stdout)
+
+ end subroutine glc_run_esmf
+
+!====================================================================================
+
+ subroutine glc_final_esmf(comp, import_state, export_state, EClock, rc)
+
+ use glc_ensemble, only : get_inst_name
+
+ !------------------------------------------------------------------------------
+ ! !DESCRIPTION:
+ ! Finalize GLC
+ !
+ ! !ARGUMENTS:
+ !
+ implicit none
+ type(ESMF_GridComp) :: comp
+ type(ESMF_State) :: import_state
+ type(ESMF_State) :: export_state
+ type(ESMF_Clock) :: EClock
+ integer, intent(out) :: rc
+
+ integer(IN) :: shrlogunit, shrloglev
+ character(CS) :: myModelName
+
+ !--- formats ---
+ character(*), parameter :: F00 = "('(glc_final_mct) ',8a)"
+ character(*), parameter :: F01 = "('(glc_final_mct) ',a,8i8)"
+ character(*), parameter :: F91 = "('(glc_final_mct) ',73('-'))"
+ character(*), parameter :: subName = "(glc_final_mct) "
+ !---------------------------------------------------------------------------
+
+ ! Reset shr logging to my log file
+ call shr_file_getLogUnit (shrlogunit)
+ call shr_file_getLogLevel(shrloglev)
+ call shr_file_setLogUnit (stdout)
+
+ call get_inst_name(myModelName)
+
+ if (my_task == master_task) then
+ write(stdout,F91)
+ write(stdout,F00) trim(myModelName),': end of main integration loop'
+ write(stdout,F91)
+ end if
+
+ errorCode = glc_Success
+
+ call glc_final(errorCode)
+
+ ! Note that restart for final timestep was written in run phase.
+ rc = ESMF_SUCCESS
+
+ ! Destroy ESMF objects
+
+ call esmfshr_util_StateArrayDestroy(export_state,"d2x",rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call esmfshr_util_StateArrayDestroy(export_state,"domain",rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ call esmfshr_util_StateArrayDestroy(import_state,"x2d",rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ if (verbose .and. my_task == master_task) then
+ write(stdout,F01) ' Done',errorCode
+ call shr_sys_flush(stdout)
+ endif
+
+ ! Reset shr logging to original values
+
+ call shr_file_setLogUnit (shrlogunit)
+ call shr_file_setLogLevel(shrloglev)
+ call shr_sys_flush(stdout)
+
+ end subroutine glc_final_esmf
+
+!=================================================================================
+
+ function glc_distgrid_esmf(rc)
+
+ ! Initialize global index space array
+
+ use glc_broadcast, only: broadcast_scalar
+ use glc_indexing_info, only : local_indices, global_indices, nx, ny, npts
+
+ !-------------------------------------------------------------------
+ ! Arguments
+ implicit none
+ integer, intent(out):: rc
+
+ ! Return:
+ type(ESMF_DistGrid) :: glc_DistGrid_esmf ! Resulting distributed grid
+
+ ! Local Variables
+ integer,allocatable :: gindex(:)
+ integer :: i, j, n
+ integer :: ier
+
+ !--- formats ---
+ character(*), parameter :: F02 = "('(glc_DistGrid_esmf) ',a,4es13.6)"
+ character(*), parameter :: subName = "(glc_DistGrid_esmf) "
+ !-------------------------------------------------------------------
+
+ allocate(gindex(npts))
+ do j = 1,ny
+ do i = 1,nx
+ n = local_indices(i,j)
+ gindex(n) = global_indices(i,j)
+ enddo
+ enddo
+
+ glc_DistGrid_esmf = mct2esmf_init(gindex, rc=rc)
+ if (rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ deallocate(gindex)
+
+ end function glc_DistGrid_esmf
+
+!=======================================================================
+
+ subroutine glc_domain_esmf( dom, rc )
+
+ !-------------------------------------------------------------------
+ use glc_indexing_info, only : nx, ny, local_indices
+ use glad_main, only : glad_get_lat_lon, glad_get_areas
+
+ implicit none
+ type(ESMF_Array), intent(inout) :: dom
+ integer, intent(out) :: rc
+
+ ! Local Variables
+ integer :: j,i,n
+ integer :: klon,klat,karea,kmask,kfrac ! domain fields
+ real(R8), pointer :: fptr(:,:)
+ real(r8), allocatable :: lats(:,:) ! latitude of each point (degrees)
+ real(r8), allocatable :: lons(:,:) ! longitude of each point (degrees)
+ real(r8), allocatable :: areas(:,:) ! area of each point (square meters)
+ !-------------------------------------------------------------------
+
+ ! Initialize domain type
+ ! lat/lon in degrees, area in radians^2
+ !
+ rc = ESMF_SUCCESS
+
+ call ESMF_ArrayGet(dom, localDe=0, farrayPtr=fptr, rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ ! Fill in correct values for domain components
+ klon = esmfshr_util_ArrayGetIndex(dom,'lon ',rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ klat = esmfshr_util_ArrayGetIndex(dom,'lat ',rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ karea = esmfshr_util_ArrayGetIndex(dom,'area',rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ kmask = esmfshr_util_ArrayGetIndex(dom,'mask',rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ kfrac = esmfshr_util_ArrayGetIndex(dom,'frac',rc=rc)
+ if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT)
+
+ ! Fill in correct values for domain components
+
+ allocate(lats(nx, ny))
+ allocate(lons(nx, ny))
+ allocate(areas(nx, ny))
+
+ ! TODO(wjs, 2015-04-02) The following may need a loop over instances
+ call glad_get_lat_lon(ice_sheet, instance_index = 1, &
+ lats = lats, lons = lons)
+ call glad_get_areas(ice_sheet, instance_index = 1, areas = areas)
+
+ fptr(:,:) = -9999.0_R8
+ fptr(kmask,:) = -0.0_R8
+ do j = 1,ny
+ do i = 1,nx
+ n = local_indices(i,j)
+ fptr(klon , n) = lons(i,j)
+ fptr(klat , n) = lats(i,j)
+
+ ! convert from m^2 to radians^2
+ fptr(karea, n) = areas(i,j)/(radius*radius)
+
+ ! For now, assume mask and frac are 1 everywhere. This may need to be changed
+ ! in the future.
+ fptr(kmask, n) = 1._r8
+ fptr(kfrac, n) = 1._r8
+ end do
+ end do
+
+ deallocate(lats)
+ deallocate(lons)
+ deallocate(areas)
+
+ end subroutine glc_domain_esmf
+
+#endif
+
+end module glc_comp_esmf
+
diff --git a/components/cism/drivers/cpl/glc_comp_mct.F90 b/components/cism/drivers/cpl/glc_comp_mct.F90
new file mode 100644
index 0000000000..2da1a3af15
--- /dev/null
+++ b/components/cism/drivers/cpl/glc_comp_mct.F90
@@ -0,0 +1,556 @@
+module glc_comp_mct
+
+ ! !uses:
+
+ use shr_sys_mod
+ use shr_kind_mod, only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, &
+ CS=>SHR_KIND_CS, CL=>SHR_KIND_CL
+ use shr_file_mod, only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel, &
+ shr_file_setlogunit, shr_file_setloglevel, shr_file_setio, &
+ shr_file_freeunit
+ use mct_mod
+ use esmf
+
+ use seq_cdata_mod , only: seq_cdata_getdata=>seq_cdata_setptrs, seq_cdata
+ use seq_infodata_mod
+ use seq_timemgr_mod
+
+ use glc_import_export
+ use glc_cpl_indices
+ use glc_constants, only: verbose, stdout, stderr, nml_in, radius
+ use glc_errormod, only: glc_success
+ use glc_InitMod, only: glc_initialize
+ use glc_RunMod, only: glc_run
+ use glc_FinalMod, only: glc_final
+ use glc_io, only: glc_io_write_restart
+ use glc_communicate, only: init_communicate, my_task, master_task
+ use glc_time_management, only: iyear,imonth,iday,ihour,iminute,isecond,runtype
+ use glc_fields, only: ice_sheet
+
+ ! Public types:
+ implicit none
+ save
+ private ! except
+
+ ! Public interfaces
+ public :: glc_init_mct
+ public :: glc_run_mct
+ public :: glc_final_mct
+
+ ! Private data interfaces
+
+ !--- stdin input stuff ---
+ character(CS) :: str ! cpp defined model name
+
+ !--- other ---
+ integer(IN) :: errorcode ! glc error code
+
+ ! my_task_local and master_task_local are needed for some checks that are done before
+ ! init_communicate is called (although, it's possible that init_communicate could be
+ ! moved to earlier to prevent the need for these copies)
+ integer(IN) :: my_task_local ! my task in mpi communicator mpicom
+ integer(IN),parameter :: master_task_local=0 ! task number of master task
+
+!===============================================================================
+CONTAINS
+!===============================================================================
+
+ subroutine glc_init_mct( EClock, cdata, x2g, g2x, NLFilename )
+
+ ! description:
+ ! initialize glc model
+
+ ! uses:
+
+ use glc_ensemble , only : set_inst_vars, write_inst_vars, get_inst_name
+ use glc_files , only : set_filenames, ionml_filename
+ use glc_coupling_flags , only : has_ocn_coupling, has_ice_coupling
+ use glc_indexing_info , only : nx_tot, ny_tot, npts
+
+ ! input/output parameters:
+
+ type(ESMF_Clock) , intent(inout) :: EClock
+ type(seq_cdata) , intent(inout) :: cdata
+ type(mct_aVect) , intent(inout) :: x2g, g2x
+ character(len=*), optional , intent(in) :: NLFilename ! Namelist filename
+
+ !--- local variables ---
+ integer(IN) :: ierr ! error code
+ integer(IN) :: i,j,n
+ integer(IN) :: COMPID
+ integer(IN) :: mpicom
+ type(mct_gsMap), pointer :: gsMap
+ type(mct_gGrid), pointer :: dom
+ type(seq_infodata_type), pointer :: infodata ! Input init object
+ integer(IN) :: shrlogunit, shrloglev
+ character(CL) :: starttype
+ character(CS) :: myModelName
+
+ !--- formats ---
+ character(*), parameter :: F00 = "('(glc_init_mct) ',8a)"
+ character(*), parameter :: F01 = "('(glc_init_mct) ',a,8i8)"
+ character(*), parameter :: F02 = "('(glc_init_mct) ',a,4es13.6)"
+ character(*), parameter :: F91 = "('(glc_init_mct) ',73('-'))"
+ character(*), parameter :: subName = "(glc_init_mct) "
+ !-------------------------------------------------------------------------------
+
+ !----------------------------------------------------------------------------
+ ! Determine attribute vector indices
+ !----------------------------------------------------------------------------
+
+ call glc_cpl_indices_set()
+
+ !----------------------------------------------------------------------------
+ ! Set cdata pointers
+ !----------------------------------------------------------------------------
+
+ call seq_cdata_getdata(cdata, &
+ id=compid, mpicom=mpicom, gsMap=gsMap, dom=dom, infodata=infodata)
+
+ call mpi_comm_rank(mpicom, my_task_local, ierr)
+
+ !---------------------------------------------------------------------------
+ ! set variables that depend on ensemble index
+ !---------------------------------------------------------------------------
+
+ call set_inst_vars(COMPID)
+ call get_inst_name(myModelName)
+ call set_filenames()
+
+ !---------------------------------------------------------------------------
+ ! use infodata to determine type of run
+ !---------------------------------------------------------------------------
+
+ call seq_infodata_GetData( infodata, &
+ start_type=starttype)
+
+ if ( trim(starttype) == trim(seq_infodata_start_type_start)) then
+ runtype = "initial"
+ else if (trim(starttype) == trim(seq_infodata_start_type_cont) ) then
+ runtype = "continue"
+ else if (trim(starttype) == trim(seq_infodata_start_type_brnch)) then
+ runtype = "branch"
+ else
+ write(*,*) 'glc_comp_mct ERROR: unknown starttype'
+ call shr_sys_abort()
+ end if
+
+ !----------------------------------------------------------------------------
+ ! Reset shr logging to my log file
+ !----------------------------------------------------------------------------
+ !--- open log file ---
+ if (my_task_local == master_task_local) then
+ stdout = shr_file_getUnit()
+ call shr_file_setIO(ionml_filename,stdout)
+ else
+ stdout = 6
+ endif
+ stderr = stdout
+ nml_in = shr_file_getUnit()
+
+ call shr_file_getLogUnit (shrlogunit)
+ call shr_file_getLogLevel(shrloglev)
+ call shr_file_setLogUnit (stdout)
+
+ errorCode = glc_Success
+ if (verbose .and. my_task_local == master_task_local) then
+ write(stdout,F00) ' Starting'
+ write(stdout,*) subname, 'COMPID: ', COMPID
+ call write_inst_vars
+ call shr_sys_flush(stdout)
+ endif
+ call init_communicate(mpicom)
+ call glc_initialize(errorCode)
+ if (verbose .and. my_task == master_task) then
+ write(stdout,F01) ' GLC Initial Date ',iyear,imonth,iday,ihour,iminute,isecond
+ write(stdout,F01) ' Initialize Done', errorCode
+ call shr_sys_flush(stdout)
+ endif
+
+ ! Initialize MCT gsmap
+
+ call glc_SetgsMap_mct(mpicom, COMPID, gsMap)
+
+ ! Initialize MCT domain
+
+ call glc_domain_mct(gsMap,dom)
+
+ ! Set flags in infodata
+
+ call seq_infodata_PutData(infodata, glc_present=.true., &
+ glclnd_present = .true., &
+ glcocn_present=has_ocn_coupling(), &
+ glcice_present=has_ice_coupling(), &
+ glc_prognostic = .true., glc_nx=nx_tot, glc_ny=ny_tot)
+
+ ! Initialize MCT attribute vectors
+
+ call mct_aVect_init(g2x, rList=seq_flds_g2x_fields, lsize=npts)
+ call mct_aVect_zero(g2x)
+
+ call mct_aVect_init(x2g, rList=seq_flds_x2g_fields, lsize=npts)
+ call mct_aVect_zero(x2g)
+
+ ! Create initial glc export state
+
+ call glc_export(g2x%rattr)
+
+ if (my_task == master_task) then
+ write(stdout,F91)
+ write(stdout,F00) trim(myModelName),': start of main integration loop'
+ write(stdout,F91)
+ end if
+
+ !----------------------------------------------------------------------------
+ ! Reset shr logging to original values
+ !----------------------------------------------------------------------------
+ call shr_file_setLogUnit (shrlogunit)
+ call shr_file_setLogLevel(shrloglev)
+ call shr_sys_flush(stdout)
+
+end subroutine glc_init_mct
+
+!===============================================================================
+!BOP ===========================================================================
+!
+! !IROUTINE: glc_run_mct
+!
+! !DESCRIPTION:
+! run method for glc model
+!
+! !REVISION HISTORY:
+!
+! !INTERFACE: ------------------------------------------------------------------
+
+subroutine glc_run_mct( EClock, cdata, x2g, g2x)
+
+ implicit none
+
+! !INPUT/OUTPUT PARAMETERS:
+
+ type(ESMF_Clock) ,intent(inout) :: EClock
+ type(seq_cdata) ,intent(inout) :: cdata
+ type(mct_aVect) ,intent(inout) :: x2g ! driver -> glc
+ type(mct_aVect) ,intent(inout) :: g2x ! glc -> driver
+
+!EOP
+ !--- local ---
+ integer(IN) :: cesmYMD ! cesm model date
+ integer(IN) :: cesmTOD ! cesm model sec
+ integer(IN) :: glcYMD ! glc model date
+ integer(IN) :: glcTOD ! glc model sec
+ integer(IN) :: n ! index
+ integer(IN) :: nf ! fields loop index
+ integer(IN) :: ki ! index of ifrac
+ real(R8) :: lat ! latitude
+ real(R8) :: lon ! longitude
+ integer(IN) :: shrlogunit, shrloglev
+ logical :: stop_alarm ! is it time to stop
+ logical :: rest_alarm ! is it time to write a restart
+ logical :: done ! time loop logical
+ integer :: num
+ character(len= 2) :: cnum
+ character(len=64) :: name
+
+ character(*), parameter :: F00 = "('(glc_run_mct) ',8a)"
+ character(*), parameter :: F01 = "('(glc_run_mct) ',a,8i8)"
+ character(*), parameter :: F04 = "('(glc_run_mct) ',2a,2i8,'s')"
+ character(*), parameter :: subName = "(glc_run_mct) "
+!-------------------------------------------------------------------------------
+
+ !----------------------------------------------------------------------------
+ ! Reset shr logging to my log file
+ !----------------------------------------------------------------------------
+ call shr_file_getLogUnit (shrlogunit)
+ call shr_file_getLogLevel(shrloglev)
+ call shr_file_setLogUnit (stdout)
+
+ ! Set internal time info
+
+ errorCode = glc_Success
+ call seq_timemgr_EClockGetData(EClock,curr_ymd=cesmYMD, curr_tod=cesmTOD)
+ stop_alarm = seq_timemgr_StopAlarmIsOn( EClock )
+
+ glcYMD = iyear*10000 + imonth*100 + iday
+ glcTOD = ihour*3600 + iminute*60 + isecond
+ done = .false.
+ if (glcYMD == cesmYMD .and. glcTOD == cesmTOD) done = .true.
+ if (verbose .and. my_task == master_task) then
+ write(stdout,F01) ' Run Starting ',glcYMD,glcTOD
+ call shr_sys_flush(stdout)
+ endif
+
+ ! Unpack
+
+ call glc_import(x2g%rattr)
+
+ ! Run
+
+ do while (.not. done)
+ if (glcYMD > cesmYMD .or. (glcYMD == cesmYMD .and. glcTOD > cesmTOD)) then
+ write(stdout,*) subname,' ERROR overshot coupling time ',glcYMD,glcTOD,cesmYMD,cesmTOD
+ call shr_sys_abort('glc error overshot time')
+ endif
+
+ call glc_run(EClock)
+
+ glcYMD = iyear*10000 + imonth*100 + iday
+ glcTOD = ihour*3600 + iminute*60 + isecond
+ if (glcYMD == cesmYMD .and. glcTOD == cesmTOD) done = .true.
+ if (verbose .and. my_task == master_task) then
+ write(stdout,F01) ' GLC Date ',glcYMD,glcTOD
+ endif
+ enddo
+
+ if (verbose .and. my_task == master_task) then
+ write(stdout,F01) ' Run Done',glcYMD,glcTOD
+ call shr_sys_flush(stdout)
+ endif
+
+ ! Pack
+
+ call glc_export(g2x%rattr)
+
+ ! Log output for model date
+
+ if (my_task == master_task) then
+ call seq_timemgr_EClockGetData(EClock,curr_ymd=cesmYMD, curr_tod=cesmTOD)
+ write(stdout,F01) ' CESM Date ', cesmYMD,cesmTOD
+ glcYMD = iyear*10000 + imonth*100 + iday
+ glcTOD = ihour*3600 + iminute*60 + isecond
+ write(stdout,F01) ' GLC Date ',glcYMD,glcTOD
+ call shr_sys_flush(stdout)
+ end if
+
+ ! If time to write restart, do so
+
+ rest_alarm = seq_timemgr_RestartAlarmIsOn( EClock )
+ if (rest_alarm) then
+ ! TODO loop over instances
+ call glc_io_write_restart(ice_sheet%instances(1), EClock)
+ endif
+
+ ! Reset shr logging to original values
+
+ call shr_file_setLogUnit (shrlogunit)
+ call shr_file_setLogLevel(shrloglev)
+ call shr_sys_flush(stdout)
+
+end subroutine glc_run_mct
+
+!===============================================================================
+!BOP ===========================================================================
+!
+! !IROUTINE: glc_final_mct
+!
+! !DESCRIPTION:
+! finalize method for glc model
+!
+! !REVISION HISTORY:
+!
+! !INTERFACE: ------------------------------------------------------------------
+!
+subroutine glc_final_mct( EClock, cdata, x2d, d2x)
+
+! !USES:
+
+ use glc_ensemble, only : get_inst_name
+
+! !INPUT/OUTPUT PARAMETERS:
+
+ type(ESMF_Clock) ,intent(inout) :: EClock
+ type(seq_cdata) ,intent(inout) :: cdata
+ type(mct_aVect) ,intent(inout) :: x2d
+ type(mct_aVect) ,intent(inout) :: d2x
+
+!EOP
+
+ integer(IN) :: shrlogunit, shrloglev
+ character(CS) :: myModelName
+
+
+ !--- formats ---
+ character(*), parameter :: F00 = "('(glc_final_mct) ',8a)"
+ character(*), parameter :: F01 = "('(glc_final_mct) ',a,8i8)"
+ character(*), parameter :: F91 = "('(glc_final_mct) ',73('-'))"
+ character(*), parameter :: subName = "(glc_final_mct) "
+
+!-------------------------------------------------------------------------------
+!
+!-------------------------------------------------------------------------------
+
+ ! Reset shr logging to my log file
+ call shr_file_getLogUnit (shrlogunit)
+ call shr_file_getLogLevel(shrloglev)
+ call shr_file_setLogUnit (stdout)
+
+ call get_inst_name(myModelName)
+
+ if (my_task == master_task) then
+ write(stdout,F91)
+ write(stdout,F00) trim(myModelName),': end of main integration loop'
+ write(stdout,F91)
+ end if
+
+ errorCode = glc_Success
+
+ call glc_final(errorCode)
+
+ if (verbose .and. my_task == master_task) then
+ write(stdout,F01) ' Done',errorCode
+ call shr_sys_flush(stdout)
+ endif
+
+ ! Reset shr logging to original values
+
+ call shr_file_setLogUnit (shrlogunit)
+ call shr_file_setLogLevel(shrloglev)
+ call shr_sys_flush(stdout)
+
+end subroutine glc_final_mct
+
+!=================================================================================
+
+subroutine glc_setgsmap_mct( mpicom_g, GLCID, gsMap_g )
+
+ ! Initialize MCT global seg map
+
+ use glc_indexing_info, only : local_indices, global_indices, nx, ny, npts
+
+ integer , intent(in) :: mpicom_g
+ integer , intent(in) :: GLCID
+ type(mct_gsMap), intent(out) :: gsMap_g
+
+ ! Local Variables
+
+ integer,allocatable :: gindex(:)
+ integer :: i, j, n
+ integer :: ier
+
+ !--- formats ---
+ character(*), parameter :: F02 = "('(glc_SetgsMap_mct) ',a,4es13.6)"
+ character(*), parameter :: subName = "(glc_SetgsMap_mct) "
+ !-------------------------------------------------------------------
+
+ allocate(gindex(npts))
+
+ do j = 1,ny
+ do i = 1,nx
+ n = local_indices(i,j)
+ gindex(n) = global_indices(i,j)
+ enddo
+ enddo
+
+ call mct_gsMap_init( gsMap_g, gindex, mpicom_g, GLCID )
+
+ deallocate(gindex)
+
+end subroutine glc_SetgsMap_mct
+
+!===============================================================================
+
+ subroutine glc_domain_mct( gsMap_g, dom_g )
+
+ use glc_indexing_info, only : npts, nx, ny, local_indices
+ use glad_main, only : glad_get_lat_lon, glad_get_areas
+
+ !-------------------------------------------------------------------
+ type(mct_gsMap), intent(inout) :: gsMap_g
+ type(mct_ggrid), intent(out) :: dom_g
+
+ ! Local Variables
+
+ integer :: i,j,n ! index
+ real(r8), pointer :: data(:) ! temporary
+ integer , pointer :: idata(:) ! temporary
+ real(r8), allocatable :: lats(:,:) ! latitude of each point (degrees)
+ real(r8), allocatable :: lons(:,:) ! longitude of each point (degrees)
+ real(r8), allocatable :: areas(:,:) ! area of each point (square meters)
+ character(*), parameter :: subName = "(glc_domain_mct) "
+ !-------------------------------------------------------------------
+
+ ! Initialize mct domain type
+
+ call mct_gGrid_init( GGrid=dom_g, CoordChars=trim(seq_flds_dom_coord), &
+ OtherChars=trim(seq_flds_dom_other), lsize=npts )
+
+ ! Initialize attribute vector with special value
+
+ allocate(data(npts))
+ dom_g%data%rAttr(:,:) = -9999.0_R8
+ dom_g%data%iAttr(:,:) = -9999
+ data(:) = 0.0_R8
+ call mct_gGrid_importRAttr(dom_g,"mask" ,data,npts)
+ call mct_gGrid_importRAttr(dom_g,"frac" ,data,npts)
+
+ ! Determine global gridpoint number attribute, GlobGridNum, which is set automatically by MCT
+
+ call mct_gsMap_orderedPoints(gsMap_g, my_task, idata)
+ call mct_gGrid_importIAttr(dom_g,'GlobGridNum',idata,npts)
+
+ ! Fill in correct values for domain components
+ ! lat/lon in degrees, area in radians^2, real-valued mask and frac
+
+ allocate(lats(nx, ny))
+ allocate(lons(nx, ny))
+ allocate(areas(nx, ny))
+
+ ! TODO(wjs, 2015-04-02) The following may need a loop over instances
+ call glad_get_lat_lon(ice_sheet, instance_index = 1, &
+ lats = lats, lons = lons)
+ call glad_get_areas(ice_sheet, instance_index = 1, areas = areas)
+
+ do j = 1,ny
+ do i = 1,nx
+ n = local_indices(i,j)
+ data(n) = lons(i,j)
+ end do
+ end do
+ call mct_gGrid_importRattr(dom_g,"lon",data,npts)
+
+ do j = 1,ny
+ do i = 1,nx
+ n = local_indices(i,j)
+ data(n) = lats(i,j)
+ end do
+ end do
+ call mct_gGrid_importRattr(dom_g,"lat",data,npts)
+
+ do j = 1,ny
+ do i = 1,nx
+ n = local_indices(i,j)
+ ! convert from m^2 to radians^2
+ data(n) = areas(i,j)/(radius*radius)
+ end do
+ end do
+ call mct_gGrid_importRattr(dom_g,"area",data,npts)
+
+ ! For now, assume mask and frac are 1 everywhere. This may need to be changed in the
+ ! future.
+ data(:) = 1._r8
+ call mct_gGrid_importRattr(dom_g,"mask",data,npts)
+ call mct_gGrid_importRattr(dom_g,"frac",data,npts)
+
+ deallocate(data)
+ deallocate(idata)
+ deallocate(lats)
+ deallocate(lons)
+ deallocate(areas)
+
+ if (verbose .and. my_task==master_task) then
+ i = mct_aVect_nIattr(dom_g%data)
+ do n = 1,i
+ write(stdout,*) subname,' dom_g ',n,minval(dom_g%data%iAttr(n,:)),maxval(dom_g%data%iAttr(n,:))
+ enddo
+ i = mct_aVect_nRattr(dom_g%data)
+ do n = 1,i
+ write(stdout,*) subname,' dom_g ',n,minval(dom_g%data%rAttr(n,:)),maxval(dom_g%data%rAttr(n,:))
+ enddo
+ call shr_sys_flush(stdout)
+ endif
+
+ end subroutine glc_domain_mct
+
+!===============================================================================
+
+end module glc_comp_mct
diff --git a/components/cism/drivers/cpl/glc_coupling_flags.F90 b/components/cism/drivers/cpl/glc_coupling_flags.F90
new file mode 100644
index 0000000000..d95675eaa3
--- /dev/null
+++ b/components/cism/drivers/cpl/glc_coupling_flags.F90
@@ -0,0 +1,94 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!BOP
+!
+! !MODULE: glc_coupling_flags - determine coupling flags
+!
+module glc_coupling_flags
+
+! !DESCRIPTION:
+!
+! This module determines various coupling flags
+!
+! !REVISION HISTORY:
+! Author: Bill Sacks
+
+! !USES:
+
+ use glc_kinds_mod
+ use glc_constants, only: stdout
+ use glc_exit_mod
+
+ implicit none
+ private
+ save
+
+! !PUBLIC MEMBER FUNCTIONS:
+
+ public :: has_ocn_coupling
+ public :: has_ice_coupling
+
+!EOP
+
+!***********************************************************************
+
+contains
+
+!***********************************************************************
+!BOP
+! !IROUTINE: has_ocn_coupling
+! !INTERFACE:
+
+ logical function has_ocn_coupling()
+
+! !DESCRIPTION:
+! Returns true if glc has coupling to ocn
+
+! !USES:
+
+ use glc_route_ice_runoff, only: ice_needs_ocean_coupling
+
+!EOP
+!-----------------------------------------------------------------------
+
+! Local variables
+
+ logical :: liq_to_ocean
+ logical :: ice_to_ocean
+
+!-----------------------------------------------------------------------
+
+ ! For now, liquid runoff is always sent to the ocean
+ liq_to_ocean = .true.
+
+ ice_to_ocean = ice_needs_ocean_coupling()
+
+ has_ocn_coupling = (liq_to_ocean .or. ice_to_ocean)
+
+ end function has_ocn_coupling
+
+!***********************************************************************
+
+!***********************************************************************
+!BOP
+! !IROUTINE: has_ice_coupling
+! !INTERFACE:
+
+ logical function has_ice_coupling()
+
+! !DESCRIPTION:
+! Returns true if glc has coupling to ice
+
+! !USES:
+
+ use glc_route_ice_runoff, only: ice_needs_sea_ice_coupling
+
+!EOP
+!-----------------------------------------------------------------------
+
+ has_ice_coupling = ice_needs_sea_ice_coupling()
+
+ end function has_ice_coupling
+
+!***********************************************************************
+
+end module glc_coupling_flags
diff --git a/components/cism/drivers/cpl/glc_cpl_indices.F90 b/components/cism/drivers/cpl/glc_cpl_indices.F90
new file mode 100644
index 0000000000..fd829dc903
--- /dev/null
+++ b/components/cism/drivers/cpl/glc_cpl_indices.F90
@@ -0,0 +1,70 @@
+module glc_cpl_indices
+
+ use seq_flds_mod
+ use mct_mod
+ use glc_constants, only : glc_smb
+ use shr_sys_mod , only : shr_sys_abort
+
+ implicit none
+
+ SAVE
+ public
+
+ ! drv -> glc
+
+ integer, public :: index_x2g_Sl_tsrf = 0
+ integer, public :: index_x2g_Flgl_qice = 0
+
+ ! glc -> drv
+
+ integer, public :: index_g2x_Fogg_rofi = 0 ! frozen runoff -> ocn
+ integer, public :: index_g2x_Figg_rofi = 0 ! frozen runoff -> ice
+ integer, public :: index_g2x_Fogg_rofl = 0 ! liquid runoff -> ocn
+ integer, public :: index_g2x_Sg_ice_covered = 0
+ integer, public :: index_g2x_Sg_topo = 0
+ integer, public :: index_g2x_Flgg_hflx = 0
+ integer, public :: index_g2x_Sg_icemask = 0
+ integer, public :: index_g2x_Sg_icemask_coupled_fluxes = 0
+
+contains
+
+ subroutine glc_cpl_indices_set( )
+
+ !-------------------------------------------------------------
+ type(mct_aVect) :: g2x ! temporary
+ type(mct_aVect) :: x2g ! temporary
+ !-------------------------------------------------------------
+
+ ! create temporary attribute vectors
+
+ call mct_aVect_init(x2g, rList=seq_flds_x2g_fields, lsize=1)
+ call mct_aVect_init(g2x, rList=seq_flds_g2x_fields, lsize=1)
+
+ ! glc -> drv
+
+ index_g2x_Fogg_rofi = mct_avect_indexra(g2x,'Fogg_rofi')
+ index_g2x_Figg_rofi = mct_avect_indexra(g2x,'Figg_rofi')
+ index_g2x_Fogg_rofl = mct_avect_indexra(g2x,'Fogg_rofl')
+ index_g2x_Sg_ice_covered = mct_avect_indexra(g2x,'Sg_ice_covered')
+ index_g2x_Sg_topo = mct_avect_indexra(g2x,'Sg_topo')
+ index_g2x_Flgg_hflx = mct_avect_indexra(g2x,'Flgg_hflx')
+ index_g2x_Sg_icemask = mct_avect_indexra(g2x,'Sg_icemask')
+ index_g2x_Sg_icemask_coupled_fluxes = mct_avect_indexra(g2x,'Sg_icemask_coupled_fluxes')
+
+ ! drv -> glc
+ index_x2g_Sl_tsrf = mct_avect_indexra(x2g,'Sl_tsrf')
+ index_x2g_Flgl_qice = mct_avect_indexra(x2g,'Flgl_qice')
+
+ call mct_aVect_clean(x2g)
+ call mct_aVect_clean(g2x)
+
+ ! Set glc_smb
+ ! true => get surface mass balance from CLM via coupler (in multiple elev classes)
+ ! false => use PDD scheme in GLIMMER
+ ! For now, we always use true
+
+ glc_smb = .true.
+
+ end subroutine glc_cpl_indices_set
+
+end module glc_cpl_indices
diff --git a/components/cism/drivers/cpl/glc_import_export.F90 b/components/cism/drivers/cpl/glc_import_export.F90
new file mode 100644
index 0000000000..494976f887
--- /dev/null
+++ b/components/cism/drivers/cpl/glc_import_export.F90
@@ -0,0 +1,117 @@
+module glc_import_export
+
+ use shr_sys_mod
+ use shr_kind_mod, only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8
+ use shr_kind_mod, only: CS=>SHR_KIND_CS, CL=>SHR_KIND_CL
+ use glc_constants, only: verbose, stdout, stderr, tkfrz
+ use glc_communicate, only: my_task, master_task
+ use glc_cpl_indices
+
+ implicit none
+ save
+ public
+
+ ! Public interfaces
+ public :: glc_import
+
+!=================================================================================
+contains
+!=================================================================================
+
+ subroutine glc_import(x2g)
+
+ !-------------------------------------------------------------------
+ use glc_indexing_info, only : nx, ny, local_indices
+ use glc_fields, only: tsfc, qsmb
+
+ real(r8) , intent(in) :: x2g(:,:)
+
+ integer(IN) :: i,j,n
+ character(*), parameter :: subName = "(glc_import) "
+ !-------------------------------------------------------------------
+
+ do j = 1, ny
+ do i = 1, nx
+ n = local_indices(i,j)
+ tsfc(i,j) = x2g(index_x2g_Sl_tsrf, n) - tkfrz
+ qsmb(i,j) = x2g(index_x2g_Flgl_qice, n)
+ enddo
+ enddo
+
+ !Jer hack fix:
+ !For some land points where CLM sees ocean, and all ocean points, CLM doesn't provide a temperature,
+ !and so the incoming temperature is 0.d0. This gets dropped to -273.15, in the above code. So,
+ !manually reverse this, below, to set to 0C.
+ where (tsfc < -250.d0) tsfc=0.d0
+
+ end subroutine glc_import
+
+!=================================================================================
+
+ subroutine glc_export(g2x)
+
+ !-------------------------------------------------------------------
+ use glc_indexing_info, only : nx, ny, local_indices
+ use glc_fields , only: ice_covered, topo, rofi, rofl, hflx, &
+ ice_sheet_grid_mask, icemask_coupled_fluxes ! to coupler
+ use glc_route_ice_runoff, only: route_ice_runoff
+ use glc_override_frac , only: frac_overrides_enabled, do_frac_overrides
+
+ real(r8) ,intent(inout) :: g2x(:,:)
+
+ ! if doing frac overrides, these are the modified versions sent to the coupler;
+ ! otherwise they point to the real fields
+ real(r8), pointer :: ice_covered_to_cpl(:,:)
+ real(r8), pointer :: topo_to_cpl(:,:)
+ logical :: fields_to_cpl_allocated ! whether we allocated the above fields
+
+ integer(IN) :: i,j,n
+ character(*), parameter :: subName = "(glc_export) "
+ !-------------------------------------------------------------------
+
+ ! If overrides of glc fraction are enabled (for testing purposes), then apply
+ ! these overrides, otherwise use the real version of ice_covered and topo
+ if (frac_overrides_enabled()) then
+ allocate(ice_covered_to_cpl(lbound(ice_covered,1):ubound(ice_covered,1), &
+ lbound(ice_covered,2):ubound(ice_covered,2)))
+ allocate(topo_to_cpl(lbound(topo,1):ubound(topo,1), &
+ lbound(topo,2):ubound(topo,2)))
+
+ ice_covered_to_cpl = ice_covered
+ topo_to_cpl = topo
+ call do_frac_overrides(ice_covered_to_cpl, topo_to_cpl, ice_sheet_grid_mask)
+ fields_to_cpl_allocated = .true.
+ else
+ ice_covered_to_cpl => ice_covered
+ topo_to_cpl => topo
+ fields_to_cpl_allocated = .false.
+ end if
+
+ do j = 1, ny
+ do i = 1, nx
+ n = local_indices(i,j)
+
+ call route_ice_runoff(rofi(i,j), &
+ rofi_to_ocn=g2x(index_g2x_Fogg_rofi, n), &
+ rofi_to_ice=g2x(index_g2x_Figg_rofi, n))
+
+ g2x(index_g2x_Fogg_rofl, n) = rofl(i,j)
+
+ g2x(index_g2x_Sg_ice_covered, n) = ice_covered_to_cpl(i,j)
+ g2x(index_g2x_Sg_topo, n) = topo_to_cpl(i,j)
+ g2x(index_g2x_Flgg_hflx, n) = hflx(i,j)
+
+ g2x(index_g2x_Sg_icemask, n) = ice_sheet_grid_mask(i,j)
+ g2x(index_g2x_Sg_icemask_coupled_fluxes, n) = icemask_coupled_fluxes(i,j)
+
+ enddo
+ enddo
+
+ if (fields_to_cpl_allocated) then
+ deallocate(ice_covered_to_cpl)
+ deallocate(topo_to_cpl)
+ end if
+
+ end subroutine glc_export
+
+end module glc_import_export
diff --git a/components/cism/glimmer-cism/AUTHORS b/components/cism/glimmer-cism/AUTHORS
new file mode 100644
index 0000000000..7735c2b62b
--- /dev/null
+++ b/components/cism/glimmer-cism/AUTHORS
@@ -0,0 +1,31 @@
+The following authors (listed alphabetically) have contributed to this version of Glimmer-CISM.
+Affiliations shown with an asterisk (*) are no longer current.
+
+Erin Barker Los Alamos National Laboratory (*)
+Tim Bocek University of Montana, Missoula (*)
+Josh Campbell University of Montana, Missoula
+Katherine J. Evans Oak Ridge National Laboratory
+Jeremy Fyke Los Alamos National Laboratory
+Glen Granzow University of Montana, Missoula
+Magnus Hagdorn School of GeoSciences, University of Edinburgh
+Brian Hand University of Montana, Missoula (*)
+Felix Hebeler University of Zurich(*)
+Matthew Hoffman Los Alamos National Laboratory
+Jesse Johnson University of Montana, Missoula
+Irina Kalashnikova Sandia National Laboratories
+Jean-Francois Lemieux New York University (*)
+William Lipscomb Los Alamos National Laboratory
+Daniel Martin Lawrence Berkeley National Laboratory
+Jeffrey A. Nichols Oak Ridge National Laboratory
+Ryan Nong Sandia National Laboratories (*)
+Matthew R. Norman Oak Ridge National Laboratory
+Tony Payne University of Bristol
+Stephen Price Los Alamos National Laboratory
+Doug Ranken Los Alamos National Laboratory
+Ian Rutt Dept. of Geography, Swansea University
+William Sacks National Center for Atmospheric Research
+Andrew Salinger Sandia National Laboratories
+James B. White III Oak Ridge National Laboratory (*)
+Jon Wolfe National Center for Atmospheric Research (*)
+Patrick Worley Oak Ridge National Laboratory
+Timothy Wylie University of Montana, Missoula (*)
diff --git a/components/cism/glimmer-cism/CMakeLists.txt b/components/cism/glimmer-cism/CMakeLists.txt
new file mode 100644
index 0000000000..dc6b915c00
--- /dev/null
+++ b/components/cism/glimmer-cism/CMakeLists.txt
@@ -0,0 +1,479 @@
+# CMAKE File for CISM building against an installed Trilinos
+
+cmake_minimum_required(VERSION 2.8.4)
+
+OPTION (CISM_BUILD_CISM_DRIVER "Toggle to build cism_driver, on by default" ON)
+
+OPTION (CISM_USE_TRILINOS "Toggle to use Trilinos: defaults to OFF" OFF)
+OPTION (CISM_MPI_MODE "Toggle to Configure with MPI: defaults to ON" ON)
+OPTION (CISM_SERIAL_MODE "Toggle to Configure in Serial mode: defaults to OFF " OFF)
+
+OPTION (CISM_USE_MPI_WITH_SLAP "Toggle to use mpi when using SLAP solver, only relevant if CISM_SERIAL_MODE=ON: defaults to OFF" OFF)
+OPTION (CISM_BUILD_SIMPLE_GLIDE "Toggle to build simple_glide, OFF by default" OFF)
+OPTION (CISM_ENABLE_BISICLES "Toggle to build a BISICLES-capable cism_driver, off by default" OFF)
+
+OPTION (CISM_BUILD_EXTRA_EXECUTABLES "Toggle to other executables, off by default" OFF)
+OPTION (CISM_USE_GPTL_INSTRUMENTATION "Toggle to use GPTL instrumentation, on by default " ON)
+OPTION (CISM_COUPLED "Toggle to build CISM for use with CESM, off by default" OFF)
+OPTION (CISM_USE_DEFAULT_IO "Toggle to use default i/o files rather than running python script, off by default" OFF)
+# OPTION (CISM_USE_CISM_FRONT_END "Toggle to use cism_driver or cism_cesm_interface with cism_front_end, off by default" OFF)
+
+# WJS (1-3-13): We could avoid CISM_GNU by using something like CMAKE_Fortran_COMPILER_ID or CMAKE_COMPILER_IS_GNUCC,
+# but it's not clear to me if those work consistently
+OPTION (CISM_GNU "Toggle to set compilation flags needed for the gnu compiler, off by default" OFF)
+OPTION (CISM_STATIC_LINKING "Toggle to set static linking for executables, off by default" OFF)
+OPTION (CISM_FORCE_FORTRAN_LINKER "Toggle to force using a fortran linker for building executables, off by default" OFF)
+OPTION (CISM_INCLUDE_IMPLICIT_LINK_LIBRARIES "Toggle to explicitly include the CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES on the link line, on by default" ON)
+
+MESSAGE("CISM_USE_TRILINOS = ${CISM_USE_TRILINOS}")
+
+# override CISM_SERIAL_MODE setting, if it conflicts with CISM_MPI_MODE setting:
+IF (CISM_MPI_MODE)
+ SET(CISM_SERIAL_MODE "OFF")
+ SET(CISM_BUILD_EXTRA_EXECUTABLES "OFF")
+ENDIF()
+
+IF (NOT CISM_MPI_MODE AND NOT CISM_SERIAL_MODE)
+ SET(CISM_MPI_MODE "ON")
+ENDIF()
+
+IF (CISM_BUILD_CISM_DRIVER)
+ SET(CISM_USE_CISM_FRONT_END "ON")
+ENDIF()
+
+# set (or override) other options, if CISM_COUPLED is ON:
+IF (CISM_COUPLED)
+ SET(CISM_NO_EXECUTABLE "ON")
+ SET(CISM_BUILD_SIMPLE_GLIDE "OFF")
+ SET(CISM_ENABLE_BISICLES "OFF")
+ SET(CISM_USE_CISM_FRONT_END "OFF")
+ SET(CISM_USE_DEFAULT_IO "ON")
+ ADD_DEFINITIONS(-DCCSMCOUPLED)
+ENDIF()
+
+
+IF (NOT DEFINED CISM_BINARY_DIR)
+ SET(CISM_BINARY_DIR ${CMAKE_BINARY_DIR})
+ENDIF()
+
+MESSAGE("Building in: ${CISM_BINARY_DIR}")
+
+IF (CISM_USE_TRILINOS)
+ OPTION(CISM_TRILINOS_DIR "Required path to installed Trilinos")
+ OPTION(CISM_NETCDF_DIR "Required path to installed Netcdf")
+
+
+ IF (CISM_USE_GPTL_INSTRUMENTATION)
+ IF (DEFINED CISM_TRILINOS_GPTL_DIR)
+ SET(CISM_TRILINOS_DIR ${CISM_TRILINOS_GPTL_DIR})
+ ENDIF()
+ ENDIF()
+
+
+ # Error check up front
+ IF (NOT DEFINED CISM_TRILINOS_DIR)
+ MESSAGE(FATAL_ERROR "\nCISM Error: cmake must define CISM_TRILINOS_DIR:
+ (-D CISM_TRILINOS_DIR=)!")
+ ENDIF()
+
+ # Get Trilinos as one entity
+ SET(CMAKE_PREFIX_PATH ${CISM_TRILINOS_DIR} ${CMAKE_PREFIX_PATH})
+ FIND_PACKAGE(Trilinos REQUIRED)
+
+ IF (${Trilinos_VERSION} VERSION_LESS 10.8.0)
+ MESSAGE(FATAL_ERROR "Trilinos version 10.8 or newer required!")
+ ENDIF()
+
+ MESSAGE("\nFound Trilinos! Here are the details: ")
+ MESSAGE(" Trilinos_DIR = ${Trilinos_DIR}")
+ MESSAGE(" Trilinos_VERSION = ${Trilinos_VERSION}")
+ MESSAGE(" Trilinos_PACKAGE_LIST = ${Trilinos_PACKAGE_LIST}")
+ MESSAGE(" Trilinos_LIBRARIES = ${Trilinos_LIBRARIES}")
+ MESSAGE(" Trilinos_INCLUDE_DIRS = ${Trilinos_INCLUDE_DIRS}")
+ MESSAGE(" Trilinos_LIBRARY_DIRS = ${Trilinos_LIBRARY_DIRS}")
+ MESSAGE(" Trilinos_TPL_LIST = ${Trilinos_TPL_LIST}")
+ MESSAGE(" Trilinos_TPL_INCLUDE_DIRS = ${Trilinos_TPL_INCLUDE_DIRS}")
+ MESSAGE(" Trilinos_TPL_LIBRARIES = ${Trilinos_TPL_LIBRARIES}")
+ MESSAGE(" Trilinos_TPL_LIBRARY_DIRS = ${Trilinos_TPL_LIBRARY_DIRS}")
+ MESSAGE(" Trilinos_BUILD_SHARED_LIBS = ${Trilinos_BUILD_SHARED_LIBS}")
+ MESSAGE(" Trilinos_CXX_COMPILER_FLAGS = ${Trilinos_CXX_COMPILER_FLAGS}")
+ MESSAGE(" Trilinos_Fortran_COMPILER_FLAGS = ${Trilinos_Fortran_COMPILER_FLAGS}")
+ MESSAGE("End of Trilinos details\n")
+
+ # Get libraries for link line from Trilinos build information
+ set(CISM_TRILINOS_LIBS ${Trilinos_LIBRARIES} ${Trilinos_TPL_LIBRARIES} ${Trilinos_EXTRA_LD_FLAGS})
+
+ IF (NOT DEFINED CMAKE_CXX_COMPILER AND NOT DEFINED ENV{CXX})
+ SET(CMAKE_CXX_COMPILER ${Trilinos_CXX_COMPILER})
+ ENDIF()
+ IF (NOT DEFINED CMAKE_C_COMPILER AND NOT DEFINED ENV{CC})
+ SET(CMAKE_C_COMPILER ${Trilinos_C_COMPILER})
+ ENDIF()
+ IF (NOT DEFINED CMAKE_Fortran_COMPILER AND NOT DEFINED ENV{FC})
+ SET(CMAKE_Fortran_COMPILER ${Trilinos_Fortran_COMPILER})
+ ENDIF()
+ENDIF()
+
+ENABLE_LANGUAGE(Fortran)
+
+IF (CISM_INCLUDE_IMPLICIT_LINK_LIBRARIES)
+ # WJS (6-3-14) Until now, these Fortran_IMPLICIT_LINK_LIBRARIES were always
+ # included. However, explicitly appending these implicit link libraries breaks
+ # the build on yellowstone, for some reason. It could be because the trilinos
+ # build is old. I suspect these implicit link libraries are needed when you're
+ # linking with a C++ linker, which is not the case for yellowstone-intel. In
+ # any case, I'm providing an option to exclude these from the build, to allow
+ # the yellowstone-intel build to work.
+ #
+ # Also, note that, although these are added to the variable CISM_TRILINOS_LIBS,
+ # this variable is used even when building without trilinos. I am merely
+ # maintaining the old behavior in this respect.
+ LIST(APPEND CISM_TRILINOS_LIBS ${CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES})
+
+ #message("")
+ #message(" CMake detected the following libraries for linking Fortran with C++ compiler:")
+ #message(" ${CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES} ")
+ENDIF()
+
+# Only include C++ support if it's really needed, to avoid problems
+# caused by broken C++ compilers.
+#IK, 8/3/13: added simple_felix option
+IF (CISM_USE_TRILINOS OR CISM_ENABLE_BISICLES)
+ ENABLE_LANGUAGE(CXX)
+ENDIF()
+
+MESSAGE(">> CISM_NETCDF_DIR set to : ${CISM_NETCDF_DIR}")
+
+IF (NOT DEFINED CISM_NETCDF_DIR)
+ MESSAGE(FATAL_ERROR "\nCISM Error: cmake must define CISM_NETCDF_DIR:
+ (-D CISM_NETCDF_DIR=)!")
+ENDIF()
+
+FIND_PATH(CISM_NETCDFF_FOUND libnetcdff.a ${CISM_NETCDF_DIR}/lib)
+#MESSAGE(" CISM_NETCDFF_FOUND = ${CISM_NETCDFF_FOUND}")
+
+IF (${CISM_NETCDFF_FOUND} STREQUAL ${CISM_NETCDF_DIR}/lib )
+ SET(CISM_NETCDF_LIBS "netcdff;netcdf" CACHE STRING "Netcdf Library Names(s)")
+ELSE()
+ SET(CISM_NETCDF_LIBS "netcdf" CACHE STRING "Netcdf Library Names(s)")
+ENDIF()
+MESSAGE(">> CISM_NETCDF_LIBS Library(s) set to : ${CISM_NETCDF_LIBS}")
+
+IF (DEFINED CISM_MPI_BASE_DIR)
+ IF (NOT DEFINED CISM_MPI_LIB_DIR)
+ SET(CISM_MPI_LIB_DIR ${CISM_MPI_BASE_DIR}/lib)
+ ENDIF()
+ IF (NOT DEFINED CISM_MPI_INC_DIR)
+ SET(CISM_MPI_INC_DIR ${CISM_MPI_BASE_DIR}/include)
+ ENDIF()
+ENDIF()
+MESSAGE(">> CISM_MPI_LIB_DIR set to : ${CISM_MPI_LIB_DIR}")
+MESSAGE(">> CISM_MPI_INC_DIR set to : ${CISM_MPI_INC_DIR}")
+
+
+IF (NOT DEFINED CMAKE_Fortran_MODULE_DIRECTORY)
+ SET(CMAKE_Fortran_MODULE_DIRECTORY ${CISM_BINARY_DIR}/fortran_mod_files)
+ENDIF()
+INCLUDE_DIRECTORIES(${CMAKE_Fortran_MODULE_DIRECTORY})
+
+
+# Note that C++ is excluded here -- we only include C++ support if
+# it's really needed (see ENABLE_LANGUAGE(CXX) command above)
+PROJECT(CISM Fortran C)
+
+
+IF (NOT CISM_USE_DEFAULT_IO)
+ # Auto-generate the *_io.F90 files in the build directory:
+ MESSAGE(">> Calling utils/build/autogenerate-in-build-dir")
+ EXECUTE_PROCESS(COMMAND ${CISM_SOURCE_DIR}/utils/build/autogenerate-in-build-dir
+ ${CISM_SOURCE_DIR}
+ WORKING_DIRECTORY ${CISM_BINARY_DIR}
+ OUTPUT_FILE ${CISM_BINARY_DIR}/autogenerate.log)
+
+ MESSAGE(">> see ${CISM_BINARY_DIR}/autogenerate.log")
+
+ IF (CISM_USE_CISM_FRONT_END)
+ # Auto-generate the *_io.F90 files in the build directory:
+ MESSAGE(">> Calling utils/build/autogenerate-in-build-dir")
+ EXECUTE_PROCESS(COMMAND ${CISM_SOURCE_DIR}/utils/build/autogen-for-glint-and-glad-in-build-dir
+ ${CISM_SOURCE_DIR}
+ WORKING_DIRECTORY ${CISM_BINARY_DIR}
+ OUTPUT_FILE ${CISM_BINARY_DIR}/autogen-for-glint-and-glad.log)
+
+ MESSAGE(">> see ${CISM_BINARY_DIR}/autogen-for-glint-and-glad.log")
+ ENDIF()
+
+ELSE()
+ # Simply copy the default io files into the build directory
+ MESSAGE(">> Calling utils/build/autocopy-io-to-build-dir")
+ EXECUTE_PROCESS(COMMAND ${CISM_SOURCE_DIR}/utils/build/autocopy-io-to-build-dir
+ ${CISM_SOURCE_DIR}
+ WORKING_DIRECTORY ${CISM_BINARY_DIR}
+ OUTPUT_FILE ${CISM_BINARY_DIR}/autocopy-io.log)
+
+ MESSAGE(">> see ${CISM_BINARY_DIR}/autocopy-io.log")
+ENDIF()
+
+# Copy a few needed files to the build directory:
+MESSAGE(">> Calling utils/build/autocopy-to-build-dir")
+EXECUTE_PROCESS(COMMAND ${CISM_SOURCE_DIR}/utils/build/autocopy-to-build-dir
+ ${CISM_SOURCE_DIR}
+ WORKING_DIRECTORY ${CISM_BINARY_DIR}
+ OUTPUT_FILE ${CISM_BINARY_DIR}/autocopy.log)
+
+MESSAGE(">> see ${CISM_BINARY_DIR}/autocopy.log")
+INCLUDE_DIRECTORIES(${CISM_BINARY_DIR}/fortran_autocopy_includes)
+
+# End of setup and error checking
+# NOTE: PROJECT command checks for compilers, so this statement
+# is moved AFTER setting CMAKE_CXX_COMPILER from Trilinos
+
+## Use CMAKE_CXX_FLAGS CMAKE_Fortran_FLAGS to override Trilinos flags
+## USe CISM_CXX_FLAGS CISM_Fortran_FLAGS to append to Trilinos flags
+
+IF (NOT CMAKE_CXX_FLAGS)
+ SET(CMAKE_CXX_FLAGS ${Trilinos_CXX_COMPILER_FLAGS} )
+ENDIF()
+SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CISM_CXX_FLAGS}")
+
+IF (NOT CMAKE_Fortran_FLAGS)
+ SET(CMAKE_Fortran_FLAGS ${Trilinos_Fortran_COMPILER_FLAGS} )
+ENDIF()
+SET(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${CISM_Fortran_FLAGS}")
+
+IF (CISM_USE_TRILINOS)
+ ## CISM requires Trilinos to include the Piro build
+ MESSAGE("-- Looking for Piro within Trilinos:")
+ MESSAGE("Trilinos pkg list: " ${Trilinos_PACKAGE_LIST})
+ LIST(FIND Trilinos_PACKAGE_LIST Piro Piro_List_ID)
+ IF (Piro_List_ID GREATER -1)
+ MESSAGE("-- Looking for Piro: -- found! Continuing.")
+ ELSE()
+ MESSAGE(FATAL_ERROR "-- Looking for Piro: -- not found! Rebuild Trilinos with Piro package.")
+ ENDIF()
+
+ ADD_DEFINITIONS(-DCISM_MPI -DTRILINOS)
+ ADD_DEFINITIONS(-DGLIMMER_MPI)
+ MESSAGE(">> Trilinos build: adding -DTRILINOS to compile lines")
+
+ include_directories(${CISM_NETCDF_DIR}/include)
+ link_directories(${CISM_NETCDF_DIR}/lib)
+
+ELSE()
+
+ IF (${CISM_MPI_MODE})
+ ADD_DEFINITIONS(-DCISM_MPI)
+ ADD_DEFINITIONS(-DGLIMMER_MPI)
+ ENDIF()
+ include_directories(${CISM_NETCDF_DIR}/include ${CISM_MPI_INC_DIR})
+
+ link_directories(${CISM_NETCDF_DIR}/lib ${CISM_MPI_LIB_DIR} ${CISM_SCI_LIB_DIR}
+ ${CISM_HDF5_LIB_DIR} )
+ENDIF()
+
+IF (CISM_ENABLE_BISICLES)
+ MESSAGE("\nBISICLES build configured.\n")
+
+ SET(CISM_DYCORE_DIR ${CISM_SOURCE_DIR}/libdycore)
+ SET(CISM_TO_DYCORE ${CISM_DYCORE_DIR}/glimmer_to_dycore.F90)
+ SET(CISM_BISICLES_DIR ${CISM_DYCORE_DIR}/BISICLES)
+
+ INCLUDE_DIRECTORIES(${CISM_DYCORE_DIR})
+ LINK_DIRECTORIES(${CISM_DYCORE_DIR} ${BISICLES_INTERFACE_DIR} )
+ ADD_DEFINITIONS(-DCISM_HAS_BISICLES)
+ENDIF()
+
+
+IF ( CISM_USE_GPTL_INSTRUMENTATION AND
+ (NOT CISM_ENABLE_BISICLES) AND
+ (NOT CISM_SERIAL_MODE))
+ OPTION(CISM_GPTL_DIR "Optional path to installed gptl library")
+ IF (CISM_GPTL_DIR)
+ message(">> GPTL Library reqested, installed in ${CISM_GPTL_DIR}")
+ ADD_DEFINITIONS(-DCESMTIMERS)
+ include_directories(${CISM_GPTL_DIR})
+ link_directories(${CISM_GPTL_DIR})
+ SET(CISM_GPTL_LIB gptl)
+ ELSE()
+ message(">>GPTL Library not requested: can set CISM_GPTL_DIR to enable")
+ ENDIF()
+ENDIF()
+
+#####Build All Fortran Sources #####
+
+#IF (CISM_USE_CISM_FRONT_END)
+ file(GLOB GLINT_SOURCES libglint/*.F90)
+ file(GLOB GLAD_SOURCES libglad/*.F90)
+ file(GLOB GLISSADE_SOURCES libglissade/*.F90)
+#ENDIF()
+
+
+file(GLOB FORTRANSOURCES
+ libglimmer-solve/SLAP/*.f
+ libglimmer-solve/*.F90
+ libglimmer/*.F90 libglimmer/writestats.c
+ libglide/*.F90
+ cism_driver/eismint_forcing.F90
+ cism_driver/testsfg.F90
+ ${GLINT_SOURCES}
+ ${GLAD_SOURCES}
+ ${GLISSADE_SOURCES}
+ ${CISM_TO_DYCORE})
+
+LIST(REMOVE_ITEM FORTRANSOURCES
+ ${CISM_SOURCE_DIR}/libglimmer-solve/SLAP/dlapqc.f )
+
+
+IF (CISM_USE_TRILINOS OR CISM_MPI_MODE)
+ LIST(REMOVE_ITEM FORTRANSOURCES
+ ${CISM_SOURCE_DIR}/libglimmer/parallel_slap.F90)
+
+ELSE(${CISM_SERIAL_MODE})
+ LIST(REMOVE_ITEM FORTRANSOURCES
+ ${CISM_SOURCE_DIR}/libglimmer/parallel_mpi.F90)
+
+ IF (CISM_USE_MPI_WITH_SLAP)
+ ADD_DEFINITIONS(-D_USE_MPI_WITH_SLAP)
+ ELSE()
+ LIST(REMOVE_ITEM FORTRANSOURCES
+ ${CISM_SOURCE_DIR}/libglimmer/mpi_mod.F90)
+ ENDIF()
+ENDIF()
+
+# Get autogenerated source files, and add them to the FORTRANSOURCES list:
+# changed to always do this:
+#IF (CISM_COUPLED)
+ FILE(GLOB FORTRAN_AUTOGEN_SOURCES ${CISM_BINARY_DIR}/fortran_autogen_srcs/*.F90)
+ message("Autogenerated CISM sources: ${FORTRAN_AUTOGEN_SOURCES}")
+
+ SET(FORTRANSOURCES ${FORTRANSOURCES} ${FORTRAN_AUTOGEN_SOURCES})
+
+# Remove old versions of autogenerated F90 files that may be sitting
+# around in the source tree from an old cmake-based build or an
+# autotools-based build. (Now the cmake-based build doesn't place
+# these in the source tree.)
+# Unlike SOURCEMODFILES, we just hard-code the files to remove for
+# simplicity.
+# This can be removed once we switch to consistently using this new
+# cmake-based build.
+ LIST(REMOVE_ITEM FORTRANSOURCES
+ ${CISM_SOURCE_DIR}/libglide/glide_io.F90
+ ${CISM_SOURCE_DIR}/libglide/glide_lithot_io.F90
+ ${CISM_SOURCE_DIR}/libglint/glint_io.F90
+ ${CISM_SOURCE_DIR}/libglint/glint_mbal_io.F90
+ ${CISM_SOURCE_DIR}/libglimmer/glimmer_vers.F90 )
+#ENDIF()
+
+
+### (For CESM) Remove source files with names already in CISM_SOURCEMOD_DIR
+OPTION(CISM_SOURCEMOD_DIR
+ "Path to SourceMod directory of F90 files to replace Glimmer files")
+
+#MESSAGE("Fortran Source Files: ${FORTRANSOURCES}")
+
+# Note that the following glob does NOT contain .cpp files, because
+# those are built in a separate library - so for now, you can't put
+# .cpp files in your sourceMod directory.
+FILE(GLOB SOURCEMODFILES
+ ${CISM_SOURCEMOD_DIR}/*.F90
+ ${CISM_SOURCEMOD_DIR}/*.F
+ ${CISM_SOURCEMOD_DIR}/*.f90
+ ${CISM_SOURCEMOD_DIR}/*.f
+ ${CISM_SOURCEMOD_DIR}/*.c)
+
+# MESSAGE("glimmer_sourcemod_dir: " ${CISM_SOURCEMOD_DIR})
+# MESSAGE("Fortran Mod Files: ${SOURCEMODFILES}")
+
+FOREACH( MODFILE ${SOURCEMODFILES})
+ STRING(FIND ${MODFILE} / index REVERSE)
+ MATH(EXPR index ${index}+1)
+ STRING(SUBSTRING ${MODFILE} ${index} -1 filename)
+ FOREACH( SOURCEFILE ${FORTRANSOURCES})
+ STRING(REGEX MATCH ${filename} match_found ${SOURCEFILE})
+
+ IF(match_found)
+ MESSAGE("--SourceMod: removing ${SOURCEFILE} in favor of ${MODFILE}")
+ LIST(REMOVE_ITEM FORTRANSOURCES ${SOURCEFILE})
+ ENDIF()
+ ENDFOREACH()
+ENDFOREACH()
+###
+
+# WJS (1-3-13): Ideally, rather than checking CISM_GNU, we would instead check for whether -fno-range-check works,
+# and if so, include that flag (see
+# http://stackoverflow.com/questions/3134660/how-to-apply-different-compiler-options-for-different-compilers-in-cmake).
+# But it doesn't look like there is that capability for fortran compilers yet.
+IF (CISM_GNU)
+ # Allow explicit NaN values in gfortran compiler
+ # Note that this won't work if the user has put nan_mod.F90 in their
+ # SourceMods directory, since it assumes a particular path
+ SET_PROPERTY(SOURCE ${CISM_SOURCE_DIR}/libglimmer/nan_mod.F90
+ APPEND PROPERTY COMPILE_FLAGS -fno-range-check)
+ENDIF()
+
+#IF (CISM_COUPLED)
+ # enable removal of the autogenerated source files, when 'make clean' is done:
+ # commented out for now, since it may create more problems than it solves when building
+ # SET_DIRECTORY_PROPERTIES(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${FORTRAN_AUTOGEN_SOURCES}")
+#ENDIF()
+
+
+SET(CMAKE_INCLUDE_PATH ${CISM_BINARY_DIR}/include)
+
+
+#message("Fortransource ${FORTRANSOURCES}")
+
+# include-dirs: Binary: all the .mod files; SOURCE: config.inc;
+# libglide: glide_mask.inc
+
+include_directories (${CISM_BINARY_DIR} ${CISM_SOURCE_DIR}
+ ${CISM_SOURCE_DIR}/libglide)
+
+
+SET(LIBRARY_OUTPUT_PATH ${CISM_BINARY_DIR}/lib)
+
+### Determine which fortran source files are fixed-form,
+### which may require special compilation flags
+FOREACH (SOURCEFILE ${FORTRANSOURCES} ${SOURCEMODFILES})
+ STRING(REGEX MATCH "\\.[fF]$" match_found ${SOURCEFILE})
+ IF(match_found)
+ LIST(APPEND FIXEDSOURCES ${SOURCEFILE})
+ ENDIF()
+ENDFOREACH()
+SET_SOURCE_FILES_PROPERTIES(${FIXEDSOURCES} PROPERTIES Fortran_FORMAT FIXED)
+
+add_library(glimmercismfortran ${FORTRANSOURCES} ${SOURCEMODFILES})
+
+
+#####Build C++ Sources #####
+
+IF (CISM_USE_TRILINOS)
+ add_subdirectory(libglimmer-trilinos)
+ENDIF()
+
+#####Build simple_glide executable (unless turned off) #####
+OPTION(CISM_NO_EXECUTABLE "Set to ON to just build libraries (default:OFF)" OFF)
+
+
+#IF (CISM_BUILD_SIMPLE_GLIDE AND (NOT CISM_NO_EXECUTABLE))
+# add_subdirectory(example-drivers/simple_glide/src)
+#ENDIF()
+
+IF (CISM_USE_CISM_FRONT_END)
+ add_subdirectory(cism_driver)
+ENDIF()
+
+
+IF (CISM_ENABLE_BISICLES)
+ add_subdirectory(libdycore)
+
+ include_directories (${CISM_DYCORE_DIR}
+ ${CISM_BISICLES_DIR})
+
+ message("glimmer src dir: ${CISM_SOURCE_DIR}")
+ message("glimmer dycore dir: ${CISM_DYCORE_DIR}")
+ get_property(inc_dirs DIRECTORY PROPERTY INCLUDE_DIRECTORIES)
+ message("inc_dirs = ${inc_dirs}")
+ENDIF()
+
+
diff --git a/components/cism/glimmer-cism/COPYING b/components/cism/glimmer-cism/COPYING
new file mode 100644
index 0000000000..42d4133cd5
--- /dev/null
+++ b/components/cism/glimmer-cism/COPYING
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/components/cism/glimmer-cism/COPYING.LESSER b/components/cism/glimmer-cism/COPYING.LESSER
new file mode 100644
index 0000000000..6600f1c98d
--- /dev/null
+++ b/components/cism/glimmer-cism/COPYING.LESSER
@@ -0,0 +1,165 @@
+GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/components/cism/glimmer-cism/ChangeLog b/components/cism/glimmer-cism/ChangeLog
new file mode 100644
index 0000000000..1747042922
--- /dev/null
+++ b/components/cism/glimmer-cism/ChangeLog
@@ -0,0 +1,29 @@
+CISM Changelog
+
+Changes in CISM Version 2.0
+===========================
+
+CISM version 2.0, which originated from Glimmer-CISM and Glimmer, has a number of major changes and additions relative to these previous codes, including:
+
+* addition of robust, parallel, 2D and 3D, higher-order accurate approximations to the Stokes momentum balance (Blatter-Pattyn, L1L2, and SSA, all available within the "Glissade" dynamical core)
+* adition of 3D, parallel mass and temperature transport
+* addition of software interfaces to modern C++ based solver libraries (e.g., Trilinos)
+* replacement of the Autotools build system with Cmake build system
+* addition of new test cases for higher-order models, including several with analytical solutions
+* re-ordering of the time step to be fully consistent with explicit forward Euler scheme
+* addition of a new high level "cism_driver", which replaces and reproduces functionality of several old drivers and allows for more flexible integration of additional and/or external dycores
+* re-arrangement of the directory structure
+* modifications to the Glint coupling software to support coupling with CESM and other climate models that compute surface mass balance external to the ice sheet model
+* new and updated documentation
+
+More information including full documentation of the code can be found at:
+http://oceans11.lanl.gov/cism/
+
+CISM version 2.0 will be hosted at the CISM Github organization:
+https://github.com/cism/cism
+
+The original Glimmer-CISM and Glimmer codes can be found at the Glimmer-CISM Github organization:
+https://github.com/glimmer-cism
+
+updated 10/21/2014
+
diff --git a/components/cism/glimmer-cism/FUNDING b/components/cism/glimmer-cism/FUNDING
new file mode 100644
index 0000000000..bb77c9fcd9
--- /dev/null
+++ b/components/cism/glimmer-cism/FUNDING
@@ -0,0 +1,9 @@
+
+The development of CISM has been supported by the following U.K. and U.S. funding agencies:
+
+Agency, Country Program
+---------------- ------------
+National Environmental Research Council, U.K. Centre for Polar and Ocean Modelling
+National Science Foundation, U.S. Office of Polar Programs
+Department of Energy, U.S. Biological and Environmental Research
+Department of Energy, U.S. Advanced Scientific Computing Research
diff --git a/components/cism/glimmer-cism/LICENSE b/components/cism/glimmer-cism/LICENSE
new file mode 100644
index 0000000000..4ddaf191d6
--- /dev/null
+++ b/components/cism/glimmer-cism/LICENSE
@@ -0,0 +1,268 @@
+The GNU General Public License (GPL)
+
+Version 2, June 1991
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to share
+and change it. By contrast, the GNU General Public License is intended to
+guarantee your freedom to share and change free software--to make sure the
+software is free for all its users. This General Public License applies to
+most of the Free Software Foundation's software and to any other program whose
+authors commit to using it. (Some other Free Software Foundation software is
+covered by the GNU Library General Public License instead.) You can apply it
+to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom to
+distribute copies of free software (and charge for this service if you wish),
+that you receive source code or can get it if you want it, that you can change
+the software or use pieces of it in new free programs; and that you know you
+can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to
+deny you these rights or to ask you to surrender the rights. These
+restrictions translate to certain responsibilities for you if you distribute
+copies of the software, or if you modify it.
+
+For example, if you distribute copies of such a program, whether gratis or for
+a fee, you must give the recipients all the rights that you have. You must
+make sure that they, too, receive or can get the source code. And you must
+show them these terms so they know their rights.
+
+We protect your rights with two steps: (1) copyright the software, and (2)
+offer you this license which gives you legal permission to copy, distribute
+and/or modify the software.
+
+Also, for each author's protection and ours, we want to make certain that
+everyone understands that there is no warranty for this free software. If the
+software is modified by someone else and passed on, we want its recipients to
+know that what they have is not the original, so that any problems introduced
+by others will not reflect on the original authors' reputations.
+
+Finally, any free program is threatened constantly by software patents. We
+wish to avoid the danger that redistributors of a free program will
+individually obtain patent licenses, in effect making the program
+proprietary. To prevent this, we have made it clear that any patent must be
+licensed for everyone's free use or not licensed at all.
+
+The precise terms and conditions for copying, distribution and modification follow.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License applies to any program or other work which contains a notice
+ placed by the copyright holder saying it may be distributed under the terms
+ of this General Public License. The "Program", below, refers to any such
+ program or work, and a "work based on the Program" means either the Program
+ or any derivative work under copyright law: that is to say, a work
+ containing the Program or a portion of it, either verbatim or with
+ modifications and/or translated into another language. (Hereinafter,
+ translation is included without limitation in the term "modification".)
+ Each licensee is addressed as "you".
+
+ Activities other than copying, distribution and modification are not covered
+ by this License; they are outside its scope. The act of running the Program is
+ not restricted, and the output from the Program is covered only if its
+ contents constitute a work based on the Program (independent of having been
+ made by running the Program). Whether that is true depends on what the Program
+ does.
+
+1. You may copy and distribute verbatim copies of the Program's source code as
+ you receive it, in any medium, provided that you conspicuously and
+ appropriately publish on each copy an appropriate copyright notice and
+ disclaimer of warranty; keep intact all the notices that refer to this
+ License and to the absence of any warranty; and give any other recipients
+ of the Program a copy of this License along with the Program.
+
+ You may charge a fee for the physical act of transferring a copy, and you may
+ at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Program or any portion of it,
+ thus forming a work based on the Program, and copy and distribute such
+ modifications or work under the terms of Section 1 above, provided that you
+ also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices stating
+ that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in whole or
+ in part contains or is derived from the Program or any part thereof, to be
+ licensed as a whole at no charge to all third parties under the terms of
+ this License.
+
+ c) If the modified program normally reads commands interactively when run,
+ you must cause it, when started running for such interactive use in the
+ most ordinary way, to print or display an announcement including an
+ appropriate copyright notice and a notice that there is no warranty (or
+ else, saying that you provide a warranty) and that users may redistribute
+ the program under these conditions, and telling the user how to view a copy
+ of this License. (Exception: if the Program itself is interactive but does
+ not normally print such an announcement, your work based on the Program is
+ not required to print an announcement.)
+
+ These requirements apply to the modified work as a whole. If identifiable
+ sections of that work are not derived from the Program, and can be
+ reasonably considered independent and separate works in themselves, then
+ this License, and its terms, do not apply to those sections when you
+ distribute them as separate works. But when you distribute the same
+ sections as part of a whole which is a work based on the Program, the
+ distribution of the whole must be on the terms of this License, whose
+ permissions for other licensees extend to the entire whole, and thus to
+ each and every part regardless of who wrote it.
+
+ Thus, it is not the intent of this section to claim rights or contest your
+ rights to work written entirely by you; rather, the intent is to exercise
+ the right to control the distribution of derivative or collective works
+ based on the Program.
+
+ In addition, mere aggregation of another work not based on the Program with
+ the Program (or with a work based on the Program) on a volume of a storage
+ or distribution medium does not bring the other work under the scope of
+ this License.
+
+3. You may copy and distribute the Program (or a work based on it, under
+ Section 2) in object code or executable form under the terms of Sections 1
+ and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable source
+ code, which must be distributed under the terms of Sections 1 and 2 above
+ on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three years, to
+ give any third party, for a charge no more than your cost of physically
+ performing source distribution, a complete machine-readable copy of the
+ corresponding source code, to be distributed under the terms of Sections 1
+ and 2 above on a medium customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer to
+ distribute corresponding source code. (This alternative is allowed only for
+ noncommercial distribution and only if you received the program in object
+ code or executable form with such an offer, in accord with Subsection b
+ above.)
+
+ The source code for a work means the preferred form of the work for making
+ modifications to it. For an executable work, complete source code means all
+ the source code for all modules it contains, plus any associated interface
+ definition files, plus the scripts used to control compilation and
+ installation of the executable. However, as a special exception, the source
+ code distributed need not include anything that is normally distributed (in
+ either source or binary form) with the major components (compiler, kernel,
+ and so on) of the operating system on which the executable runs, unless
+ that component itself accompanies the executable.
+
+ If distribution of executable or object code is made by offering access to
+ copy from a designated place, then offering equivalent access to copy the
+ source code from the same place counts as distribution of the source code,
+ even though third parties are not compelled to copy the source along with
+ the object code.
+
+4. You may not copy, modify, sublicense, or distribute the Program except as
+ expressly provided under this License. Any attempt otherwise to copy,
+ modify, sublicense or distribute the Program is void, and will
+ automatically terminate your rights under this License. However, parties
+ who have received copies, or rights, from you under this License will not
+ have their licenses terminated so long as such parties remain in full
+ compliance.
+
+5. You are not required to accept this License, since you have not signed
+ it. However, nothing else grants you permission to modify or distribute the
+ Program or its derivative works. These actions are prohibited by law if you
+ do not accept this License. Therefore, by modifying or distributing the
+ Program (or any work based on the Program), you indicate your acceptance of
+ this License to do so, and all its terms and conditions for copying,
+ distributing or modifying the Program or works based on it.
+
+6. Each time you redistribute the Program (or any work based on the Program),
+ the recipient automatically receives a license from the original licensor
+ to copy, distribute or modify the Program subject to these terms and
+ conditions. You may not impose any further restrictions on the recipients'
+ exercise of the rights granted herein. You are not responsible for
+ enforcing compliance by third parties to this License.
+
+7. If, as a consequence of a court judgment or allegation of patent
+ infringement or for any other reason (not limited to patent issues),
+ conditions are imposed on you (whether by court order, agreement or
+ otherwise) that contradict the conditions of this License, they do not
+ excuse you from the conditions of this License. If you cannot distribute so
+ as to satisfy simultaneously your obligations under this License and any
+ other pertinent obligations, then as a consequence you may not distribute
+ the Program at all. For example, if a patent license would not permit
+ royalty-free redistribution of the Program by all those who receive copies
+ directly or indirectly through you, then the only way you could satisfy
+ both it and this License would be to refrain entirely from distribution of
+ the Program.
+
+ If any portion of this section is held invalid or unenforceable under any
+ particular circumstance, the balance of the section is intended to apply
+ and the section as a whole is intended to apply in other circumstances.
+
+ It is not the purpose of this section to induce you to infringe any patents
+ or other property right claims or to contest validity of any such claims;
+ this section has the sole purpose of protecting the integrity of the free
+ software distribution system, which is implemented by public license
+ practices. Many people have made generous contributions to the wide range
+ of software distributed through that system in reliance on consistent
+ application of that system; it is up to the author/donor to decide if he or
+ she is willing to distribute software through any other system and a
+ licensee cannot impose that choice.
+
+ This section is intended to make thoroughly clear what is believed to be a
+ consequence of the rest of this License.
+
+8. If the distribution and/or use of the Program is restricted in certain
+ countries either by patents or by copyrighted interfaces, the original
+ copyright holder who places the Program under this License may add an
+ explicit geographical distribution limitation excluding those countries, so
+ that distribution is permitted only in or among countries not thus
+ excluded. In such case, this License incorporates the limitation as if
+ written in the body of this License.
+
+9. The Free Software Foundation may publish revised and/or new versions of the
+ General Public License from time to time. Such new versions will be similar
+ in spirit to the present version, but may differ in detail to address new
+ problems or concerns.
+
+ Each version is given a distinguishing version number. If the Program
+ specifies a version number of this License which applies to it and "any
+ later version", you have the option of following the terms and conditions
+ either of that version or of any later version published by the Free
+ Software Foundation. If the Program does not specify a version number of
+ this License, you may choose any version ever published by the Free
+ Software Foundation.
+
+10. If you wish to incorporate parts of the Program into other free programs
+ whose distribution conditions are different, write to the author to ask
+ for permission. For software which is copyrighted by the Free Software
+ Foundation, write to the Free Software Foundation; we sometimes make
+ exceptions for this. Our decision will be guided by the two goals of
+ preserving the free status of all derivatives of our free software and of
+ promoting the sharing and reuse of software generally.
+
+NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR
+ THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+ OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+ PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+ OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+ TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+ PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+ REPAIR OR CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL
+ ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+ REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+ INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES
+ ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT
+ LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES
+ SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE
+ WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
diff --git a/components/cism/glimmer-cism/NEWS b/components/cism/glimmer-cism/NEWS
new file mode 100644
index 0000000000..8ad9ef8850
--- /dev/null
+++ b/components/cism/glimmer-cism/NEWS
@@ -0,0 +1,35 @@
+CISM NEWS
+
+CISM Version 2.0
+================
+
+CISM2.0, which originated from Glimmer-CISM and Glimmer, has a number of major changes
+relative to these previous codes:
+
+* addition of robust, parallel, 3D, 1st-order accurate approximation to the Stokes momentum
+ balance ("Glissade" dynamical core)
+* adition of 3D, parallel mass and temperature transport
+* addition of software interfaces to modern C++ based solver libraries (e.g., Trilinos)
+* replacement of the Autotools build system with Cmake build system
+* addition of new test cases for higher-order models
+* re-ordering of the time step to be fully consistent with explicit forward Euler scheme
+* addition of a new high level "cism_driver", which replaces and reproduces functionality of several
+ old drivers and allows for more flexible integration of additional and/or external dycores
+* re-arrangement of the directory structure
+* modifications to the GLINT coupling software to support coupling with CESM and other climate models
+ that compute surface mass balance external to the ice sheet model
+* new and updated documentation
+
+More information including full documentation of the code can be found at:
+http://oceans11.lanl.gov/cism/index.html
+
+The original Glimmer-CISM and Glimmer codes can be found at the Glimmer-CISM Github organization:
+https://github.com/glimmer-cism
+
+updated 8/27/2014
+
+
+
+
+
+
diff --git a/components/cism/glimmer-cism/README b/components/cism/glimmer-cism/README
new file mode 100644
index 0000000000..6bfb03b9f8
--- /dev/null
+++ b/components/cism/glimmer-cism/README
@@ -0,0 +1,58 @@
+
+CISM README file:
+===================
+
+CISM is a land ice model designed to be used as part of an earth-system model
+or as a stand-alone model. Full documentation can be found at:
+http://oceans11.lanl.gov/cism/documentation.html
+
+Licensing:
+=========
+
+CISM is issued under the Lesser GNU General Public Licence (see LICENSE in the main directory).
+
+Note that this licence DOES NOT APPLY to the SLAP linear algebra library, used by the serial
+code, which is included here in its own directory (./libglimmer-solve/SLAP), and is in the public
+domain.
+
+
+Mailing list:
+=============
+
+There are two mailing lists, once for general users and one for developers:
+
+1. The general user mailing list can be signed up for by sending an email to:
+ cism-users+subscribe@googlegroups.com
+
+1. The developers mailing list can be signed up for by sending an email to:
+ cism-devel+subscribe@googlegroups.com
+
+Note that because these lists are managed by GoogleGroups, they will *always* attempt to associate
+you with a Google email address. To insure that the email you would like to use is associated with the
+list, please make sure you entirely log out of any Google services before attempting to sign up for the
+mailing list. The sign-up process will require you to authenticate the email address you wish to use by
+taking you to a website where you will be prompted to enter that information.
+
+
+Discussion Board:
+=================
+
+A discussion board for getting help with running CISM, either in stand-alone mode or as part of CESM, can
+be found at: http://bb.cgd.ucar.edu/forums/ice-sheet-modeling-cism
+
+
+Bug Reporting:
+==============
+
+Pleae report unresolved problems using the bug reporting facility at the CISM Github website
+(under "Issues"): https://github.com/cism/cism/issues
+
+
+Building / Installing CISM:
+===========================
+
+For detailed instructions on how to install and build CISM, please see Chapter 2 of the users
+guide (available at: http://oceans11.lanl.gov/cism/documentation.html)
+
+
+last updated: 10/21/2014
diff --git a/components/cism/glimmer-cism/builds/README b/components/cism/glimmer-cism/builds/README
new file mode 100644
index 0000000000..4ecbc4f4f6
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/README
@@ -0,0 +1,18 @@
+The seacism/builds directory is intended to contain cmake builds of CISM for the
+most common platform/compiler-suite combinations. Each subdirectory of this
+directory should contain a README file and a configure script file whose name
+is -cmake, for example, hopper-pgi-cmake. The README
+file should contain instructions to do the build, provide information on what
+compiler suite will be used, and list what executables will be built, e.g.
+simple_glide and simple_bisicles. It should also list any dependencies on
+other packages. These dependencies should be handled in the configure script.
+
+The difference between this and using the cmake-scripts directory is that these
+builds should be even more out-of-the box, so that a user can cd to the
+appropriate directory, read the brief README file, and be able to quickly
+generate the CISM build. The cmake scripts in the build directories should
+handle loading the appropriate modules as part of simplifying the build
+process, and should be kept current as much as possible. A significant part
+of that is making sure the installed packages that the build relies on are
+current. As an example, the packages that need to be maintained for the
+hopper-pgi build are Trilinos and BISICLES (which also includes Chombo).
diff --git a/components/cism/glimmer-cism/builds/blizzard-gnu/blizzard-gnu-build-and-test.csh b/components/cism/glimmer-cism/builds/blizzard-gnu/blizzard-gnu-build-and-test.csh
new file mode 100644
index 0000000000..d24d993aa4
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/blizzard-gnu/blizzard-gnu-build-and-test.csh
@@ -0,0 +1,154 @@
+#!/bin/csh
+
+
+
+# Master build script for mac laptops. Last updated 2/28/2013 by SFP.
+# This is a hacked version of Kate's original script for use on Hopper.
+# For now, only supports parallel build with Trilinos using gnu and cmake.
+# Only a subset of the small, standard tests are run, on both 1 and 4 procs.
+
+# (1) execute from the builds/blizzard-gnu subdirectory of CISM
+
+#add logic at the top to decide which versions to build
+
+# PARALLEL BUILD WITH CMAKE
+
+# setenv TEST_DIR "/USERS/$USER/work/modeling/cism/seacism-oceans11/tests/higher-order"
+
+# 5/7/2014 DMR -- added performance tests:
+
+## This will automatically submit dome60-500 ijobs. gis_1km and gis_4km will not be submitted
+## automatically because you will have to build and run Felix/Albany on hopper first. Once you do that,
+## you can go to lines #193-194, 197-198, 201-202, and uncomment them.
+setenv PERF_TEST 0
+
+@ run_perf_tests = (($1 == run-perf-tests) || ($2 == run-perf-tests) || ($3 == run-perf-tests) || ($4 == run-perf-tests) || ($5 == run-perf-tests))
+
+if ($run_perf_tests) then
+ setenv PERF_TEST 1
+endif
+
+@ skip_build_set = (($1 == skip-build) || ($2 == skip-build) || ($3 == skip-build) || ($4 == skip-build) || ($5 == skip-build))
+
+@ no_copy_set = (($1 == no-copy) || ($2 == no-copy) || ($3 == no-copy) || ($4 == no-copy) || ($5 == no-copy))
+
+@ skip_tests_set = (($1 == skip-tests) || ($2 == skip-tests) || ($3 == skip-tests) || ($4 == skip-tests) || ($5 == skip-tests))
+
+#**!move this and source it to your .bashrc (wherever your higher-order directory is located)
+#setenv TEST_DIR /lustre/atlas/scratch/$USER/cli062/higher-order
+
+if (! -d $TEST_DIR) mkdir -p $TEST_DIR
+
+setenv TEST_SUITE_DEFAULT_LOC http://oceans11.lanl.gov/cism/livv
+#setenv TEST_SUITE_DEFAULT_LOC /ccs/proj/cli062/test_suite
+
+setenv build_problem 0
+
+set COMPILER_NAME = gnu
+set PLATFORM_NAME = blizzard
+
+# set PLATFORM_NAME = $1
+# set COMPILER_NAME = $2
+
+set CMAKE_SCRIPT = $PLATFORM_NAME'-'$COMPILER_NAME'-cmake'
+set CMAKE_CONF_OUT = 'conf_'$COMPILER_NAME'.out'
+set CMAKE_BUILD_OUT = 'cmake_'$COMPILER_NAME'_build.out'
+#set CISM_RUN_SCRIPT = $PLATFORM_NAME'job'
+#set CISM_RUN_SCRIPT = 'hopjob'
+set CISM_RUN_SCRIPT = 'ijob_linux'
+set CISM_VV_SCRIPT = $PLATFORM_NAME'_VV.bash'
+#set CISM_VV_SCRIPT = 'rhea_VV.bash'
+
+echo
+echo 'To use this script, type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh'
+echo
+#echo 'For a quick test (dome only), type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh quick-test'
+echo
+echo "Call with no-copy to prevent copying of the reg_test and livv defaults."
+echo "Call with run-perf-tests to run the performance tests."
+echo "Call with skip-tests to skip testing (builds executable and copies it to TEST_DIR)."
+
+
+echo
+echo 'See the LIVV documentation for instructions on setting up the test directory (TEST_DIR).'
+echo
+
+
+#echo 'The following environment variables must be set: TEST_DIR, GLIMMER_TRILINOS_DIR'
+#echo 'Examples (place in .cshrc or .bashrc):'
+#echo 'csh, tcsh: setenv GLIMMER_TRILINOS_DIR "/Users/$USER/Trilinos/gcc-build/install"'
+#echo 'bash: export GLIMMER_TRILINOS_DIR="/Users/$USER/Trilinos/gcc-build/install"'
+echo
+echo 'Setting TEST_DIR to the location: '
+echo 'TEST_DIR =' $TEST_DIR
+echo 'TEST_DIR must also be set in your .bashrc file.'
+
+# PARALLEL BUILD WITH CMAKE
+
+
+if ($skip_build_set == 0) then
+
+echo
+echo "Configuring and building in directory: " $PWD
+echo
+
+echo 'Configuring '$COMPILER_NAME' cmake build...'
+source ./$CMAKE_SCRIPT >& $CMAKE_CONF_OUT
+echo 'Making parallel '$COMPILER_NAME'...'
+make -j 8 >& $CMAKE_BUILD_OUT
+
+#if ( -e example-drivers/simple_glide/src/simple_glide ) then
+# echo 'Copying '$COMPILER_NAME' parallel simple_glide_'$COMPILER_NAME' to test directory'
+# cp -f example-drivers/simple_glide/src/simple_glide $TEST_DIR/simple_glide_$COMPILER_NAME
+#else
+# echo "cmake '$COMPILER_NAME' build failed, no executable"
+# @ build_problem = 1
+#endif
+
+if ( -e cism_driver/cism_driver ) then
+ echo 'Copying '$COMPILER_NAME' parallel cism_driver_'$COMPILER_NAME' to test directory'
+ cp -f cism_driver/cism_driver $TEST_DIR/cism_driver_$COMPILER_NAME
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+endif # skip_build_set
+
+if ($build_problem == 1) then
+ echo "No job submitted -- cmake build failed."
+else # execute tests:
+
+ # Make copy of test suite in $TEST_DIR:
+if (! ($no_copy_set)) then
+ echo "Copying default reg_test and LIVV to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e reg_test_default.tgz ) rm -f reg_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/reg_test_default.tgz
+ tar xfz reg_test_default.tgz
+ popd > /dev/null
+
+ if ($PERF_TEST) then
+ echo "Copying default perf_test to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e perf_test_default.tgz ) rm -f perf_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/perf_test_default.tgz
+ tar xfz perf_test_default.tgz
+ popd > /dev/null
+ endif
+
+ cp -rf ../../tests/higher-order/livv $TEST_DIR
+endif
+
+if ($skip_tests_set) then
+ echo "Skipping tests."
+ exit
+endif
+
+csh $TEST_DIR/livv/run_livv_default_tests.csh $TEST_DIR $CISM_RUN_SCRIPT $PERF_TEST $CISM_VV_SCRIPT
+echo "Back in build-and-test script, exiting."
+exit
+
+
diff --git a/components/cism/glimmer-cism/builds/blizzard-gnu/blizzard-gnu-cmake b/components/cism/glimmer-cism/builds/blizzard-gnu/blizzard-gnu-cmake
new file mode 100755
index 0000000000..a581c86dc6
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/blizzard-gnu/blizzard-gnu-cmake
@@ -0,0 +1,69 @@
+# cmake configuration script that works on the Linux box in Matt's office (blueskies) with GCC
+# Others will need to modify the Netcdf path.
+# This config script is setup to perform a parallel build with Trilinos.
+#
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+
+# After this executes, do:
+# make -j 8
+#
+
+#echo
+#echo Run this script by typing: source linux-gnu-cism-cmake
+#echo
+#echo Set CISM_TRILINOS_DIR to your Trilinos installation directory.
+#echo
+
+# remove old build data:
+rm -f ./CMakeCache.txt
+rm -rf ./CMakeFiles
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_GNU=ON \
+\
+ -D CISM_TRILINOS_DIR=/opt/trilinos-11.4.1_GNU4.8.3 \
+ -D CISM_HDF5_LIB_DIR=/opt/hdf5_gcc4.8.3 \
+ -D CISM_NETCDF_DIR=/opt/netcdf4.3.2_gcc4.8.3 \
+\
+ -D CMAKE_Fortran_FLAGS="-g -O2 -ffree-line-length-none -fPIC -fno-range-check" \
+\
+ -D CMAKE_CXX_COMPILER=mpicxx \
+ -D CMAKE_C_COMPILER=mpicc \
+ -D CMAKE_Fortran_COMPILER=mpif90 \
+\
+ -D CISM_EXTRA_LIBS:STRING="-lblas -lcurl" \
+\
+ -D CISM_MPI_INC_DIR=/opt/mpi3.1.2_gnu4.8.3/include \
+ -D CISM_MPI_LIB_DIR=/opt/mpi3.1.2_gnu4.8.3/lib \
+\
+ -D CMAKE_VERBOSE_MAKEFILE=OFF \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ..
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# -D CISM_NETCDF_LIBS="netcdff" \
diff --git a/components/cism/glimmer-cism/builds/blizzard-gnu/blizzard-gnu-cmake-debug b/components/cism/glimmer-cism/builds/blizzard-gnu/blizzard-gnu-cmake-debug
new file mode 100755
index 0000000000..528011b303
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/blizzard-gnu/blizzard-gnu-cmake-debug
@@ -0,0 +1,83 @@
+# Requires (command line or in .bashrc):
+# module load cmake
+#
+# module unload cmake python netcdf hdf5
+# module swap PrgEnv-pgi PrgEnv-gnu
+# module load netcdf-hdf5parallel/4.2.0 cmake/2.8.6 python
+#
+# cmake configuration script that works on jaguar with GCC
+# This script needs to be run from a subdirectory (e.g. build-gnu)
+# of the main seacism repository (reflected in the several
+# instances of # ".." below).
+#
+# After this executes, do:
+# make -j 8
+# cp example-drivers/simple_glide/src/sgcmake .
+
+# remove old build data:
+rm -f ./CMakeCache.txt
+rm -rf ./CMakeFiles
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_GNU=ON \
+\
+ -D CISM_TRILINOS_DIR=/opt/trilinos-11.4.1_GNU4.8.3 \
+ -D CISM_HDF5_LIB_DIR=/opt/hdf5_gcc4.8.3 \
+ -D CISM_NETCDF_DIR=/opt/netcdf4.3.2_gcc4.8.3 \
+\
+ -D CMAKE_Fortran_FLAGS="-g -fbounds-check -fcheck-array-temporaries -ffree-line-length-none" \
+\
+ -D CMAKE_CXX_COMPILER=mpicxx \
+ -D CMAKE_C_COMPILER=mpicc \
+ -D CMAKE_Fortran_COMPILER=mpif90 \
+\
+ -D CISM_EXTRA_LIBS:STRING="-lblas -lcurl" \
+\
+ -D CISM_MPI_INC_DIR=/opt/mpi3.1.2_gnu4.8.3/include \
+ -D CISM_MPI_LIB_DIR=/opt/mpi3.1.2_gnu4.8.3/lib \
+\
+ -D CMAKE_VERBOSE_MAKEFILE=OFF \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ..
+
+# Note: last argument above ".." is path to top seacism directory
+
+# Prg Env that worked in titan 1/17/2013
+#Currently Loaded Modulefiles:
+# 1) modules/3.2.6.6 22) audit/1.0.0-1.0401.34509.4.34.gem
+# 2) xtpe-network-gemini 23) rca/1.0.0-2.0401.34092.9.59.gem
+# 3) xtpe-interlagos 24) krca/1.0.0-2.0401.33562.3.95.gem
+# 4) eswrap/1.0.15 25) dvs/0.9.0-1.0401.1327.13.34.gem
+# 5) lustredu/1.2 26) csa/3.0.0-1_2.0401.33458.3.110.gem
+# 6) DefApps 27) job/1.5.5-0.1_2.0401.34507.6.2.gem
+# 7) altd/1.0 28) xpmem/0.1-2.0401.32557.3.12.gem
+# 8) torque/4.1.4 29) gni-headers/2.1-1.0401.5618.16.1.gem
+# 9) moab/7.1.3 30) dmapp/3.2.1-1.0401.5585.5.2.gem
+# 10) cray-mpich2/5.5.5 31) pmi/4.0.0-1.0000.9282.69.4.gem
+# 11) subversion/1.6.17 32) ugni/4.0-1.0401.5617.15.1.gem
+# 12) atp/1.5.2 33) udreg/2.3.2-1.0401.5828.5.1.gem
+# 13) xe-sysroot/4.1.20 34) xt-libsci/11.1.01
+# 14) switch/1.0-1.0401.34518.4.34.gem 35) gcc/4.7.2
+# 15) shared-root/1.0-1.0401.34936.4.9.gem 36) xt-asyncpe/5.16
+# 16) pdsh/2.2-1.0401.34516.3.1.gem 37) PrgEnv-gnu/4.1.20
+# 17) nodehealth/3.0-1.0401.35104.16.2.gem 38) cmake/2.8.6
+# 18) lbcd/2.1-1.0401.34512.5.1.gem 39) python/2.7.2
+# 19) hosts/1.0-1.0401.34511.5.34.gem 40) hdf5-parallel/1.8.8
+# 20) configuration/1.0-1.0401.34510.3.3.gem 41) netcdf-hdf5parallel/4.2.0
+# 21) ccm/2.2.0-1.0401.34937.13.25
+
+
diff --git a/components/cism/glimmer-cism/builds/edison-intel/PKG_CONFIG_PATH_fix.csh b/components/cism/glimmer-cism/builds/edison-intel/PKG_CONFIG_PATH_fix.csh
new file mode 100644
index 0000000000..aeff66e806
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/edison-intel/PKG_CONFIG_PATH_fix.csh
@@ -0,0 +1,2 @@
+setenv PKG_CONFIG_PATH /opt/cray/rca/1.0.0-2.0501.48090.7.46.ari/lib64/pkgconfig:/opt/cray/alps/5.1.1-2.0501.8471.1.1.ari/lib64/pkgconfig:/opt/cray/csa/3.0.0-1_2.0501.47112.1.91.ari/lib64/pkgconfig:/opt/cray/xpmem/0.1-2.0501.48424.3.3.ari/lib64/pkgconfig:/opt/cray/gni-headers/3.0-1.0501.8317.12.1.ari/lib64/pkgconfig:/opt/cray/dmapp/7.0.1-1.0501.8315.8.4.ari/lib64/pkgconfig:/opt/cray/pmi/5.0.3-1.0000.9981.128.2.ari/lib64/pkgconfig:/opt/cray/ugni/5.0-1.0501.8253.10.22.ari/lib64/pkgconfig:/opt/cray/udreg/2.3.2-1.0501.7914.1.13.ari/lib64/pkgconfig:/opt/cray/iobuf/2.0.5/lib/pkgconfig:/opt/cray/switch/1.0-1.0501.47124.1.93.ari/lib64/pkgconfig:/opt/cray/atp/1.7.2/lib/pkgconfig:/opt/cray/libsci/12.2.0/INTEL/130/x86_64/lib/pkgconfig
+
diff --git a/components/cism/glimmer-cism/builds/edison-intel/edison-bisicles-intel-cmake b/components/cism/glimmer-cism/builds/edison-intel/edison-bisicles-intel-cmake
new file mode 100644
index 0000000000..075ca316b0
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/edison-intel/edison-bisicles-intel-cmake
@@ -0,0 +1,119 @@
+# run this script by typing: source edison-bisicles-intel-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on edison using the INTEL compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to ./../../../BISICLES/CISM-interface/interface )
+
+# This script should be run from the builds/edison-intel subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source edison-bisicles-intel-cmake
+echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload cray-hdf5-parallel
+module unload cray-netcdf-hdf5parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+#module --silent purge
+
+module load modules
+module load cmake
+module load PrgEnv-intel
+module load cray-hdf5-parallel
+module load cray-netcdf-hdf5parallel
+module load python
+module load cray-shmem
+module load cray-mpich
+module unload darshan
+
+# (hopefully) temporary fix for PKG_CONFIG_PATH problem
+source PKG_CONFIG_PATH_fix.csh
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=ON \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \
+ -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos/edison-intel-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-albany/install \
+\
+ -D CISM_NETCDF_DIR=$NETCDF_DIR \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CISM_MPI_BASE_DIR=$CRAY_MPICH2_DIR \
+ -D CISM_SCI_LIB_DIR=$CRAY_LIBSCI_PREFIX_DIR/lib \
+ -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-edison-intel \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -DNO_RESCALE" \
+ -D BISICLES_LIB_SUBDIR=libintel \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="$HDF5_DIR" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=$HDF5_DIR/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_intel_parallel -lz" \
+
+# -D CISM_FMAIN=/opt/pgi/13.6.0/linux86-64/13.6/lib/f90main.o \
diff --git a/components/cism/glimmer-cism/builds/edison-intel/edison-intel-cmake b/components/cism/glimmer-cism/builds/edison-intel/edison-intel-cmake
new file mode 100644
index 0000000000..3758525c81
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/edison-intel/edison-intel-cmake
@@ -0,0 +1,121 @@
+# run this script by typing: source edison-intel-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on edison using the INTEL compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to ./../../../BISICLES/CISM-interface/interface )
+
+# This script should be run from the builds/edison-intel subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source edison-intel-cmake
+echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload cray-hdf5-parallel
+module unload cray-netcdf-hdf5parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+#module --silent purge
+
+module load modules
+module load cmake
+module load PrgEnv-intel
+module load cray-hdf5-parallel
+module load cray-netcdf-hdf5parallel
+module load python
+module load cray-shmem
+module load cray-mpich
+
+module unload darshan
+
+# (hopefully) temporary fix for PKG_CONFIG_PATH problem
+source PKG_CONFIG_PATH_fix.csh
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \
+ -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos/edison-intel-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-albany/install \
+\
+ -D CISM_NETCDF_DIR=$NETCDF_DIR \
+ -D CISM_FMAIN=/opt/intel/composer_xe_2013.5.192/compiler/lib/intel64/for_main.o \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CISM_MPI_BASE_DIR=$CRAY_MPICH2_DIR \
+ -D CISM_SCI_LIB_DIR=$CRAY_LIBSCI_PREFIX_DIR/lib \
+ -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-edison-intel \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2 -openmp" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -openmp" \
+ -D BISICLES_LIB_SUBDIR=libintel \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="$HDF5_DIR" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=$HDF5_DIR/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_intel_parallel -lz" \
+
+# -D CISM_FMAIN=/opt/pgi/13.6.0/linux86-64/13.6/lib/f90main.o \
diff --git a/components/cism/glimmer-cism/builds/edison-intel/edison-petsc-bisicles-intel-cmake b/components/cism/glimmer-cism/builds/edison-intel/edison-petsc-bisicles-intel-cmake
new file mode 100644
index 0000000000..c93c270246
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/edison-intel/edison-petsc-bisicles-intel-cmake
@@ -0,0 +1,120 @@
+# run this script by typing: source edison-petsc-bisicles-intel-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on edison using the INTEL compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to ./../../../BISICLES/CISM-interface/interface )
+
+# This script should be run from the builds/edison-intel subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source edison-petsc-bisicles-intel-cmake
+echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload cray-hdf5-parallel
+module unload cray-netcdf-hdf5parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+#module --silent purge
+
+module load modules
+module load cmake
+module load PrgEnv-intel
+module load cray-hdf5-parallel
+module load cray-netcdf-hdf5parallel
+module load python
+module load cray-shmem
+module load cray-mpich
+module unload darshan
+
+# (hopefully) temporary fix for PKG_CONFIG_PATH problem
+source PKG_CONFIG_PATH_fix.csh
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=ON \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \
+ -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos/edison-intel-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-albany/install \
+\
+ -D CISM_NETCDF_DIR=$NETCDF_DIR \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CISM_MPI_BASE_DIR=$CRAY_MPICH2_DIR \
+ -D CISM_SCI_LIB_DIR=$CRAY_LIBSCI_PREFIX_DIR/lib \
+ -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-edison-intel \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -DNO_RESCALE" \
+ -D CISM_EXTRA_LIBS:STRING="-L$PETSC_DIR/$PETSC_ARCH/lib -lpetsc -lHYPRE -lpthread -ldl -lssl -lcrypto" \
+ -D BISICLES_LIB_SUBDIR=libintel \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="$HDF5_DIR" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=$HDF5_DIR/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_intel_parallel -lz" \
+
+# -D CISM_FMAIN=/opt/pgi/13.6.0/linux86-64/13.6/lib/f90main.o \
diff --git a/components/cism/glimmer-cism/builds/hopper-gnu-felix/hopper-gnu-bisicles-cmake b/components/cism/glimmer-cism/builds/hopper-gnu-felix/hopper-gnu-bisicles-cmake
new file mode 100755
index 0000000000..66c13af737
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-gnu-felix/hopper-gnu-bisicles-cmake
@@ -0,0 +1,121 @@
+# run this script by typing: source hopper-gnu-bisicles-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on hopper using the gnu compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to /global/u2/d/dmartin/BISICLES/code/interface)
+
+
+# This script should be run from the builds/hopper-pgi subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source hopper-gnu-bisicles-cmake
+echo
+
+#echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide
+#echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module.
+#echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines:
+#echo module unload python
+#echo module load python/2.7.1
+#echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+module --silent purge
+
+#module unload cmake netcdf-hdf5parallel/4.2.0 python
+#module swap PrgEnv-pgi PrgEnv-gnu; module load cmake/2.8.7 python netcdf-hdf5parallel/4.2.0 usg-default-modules/1.0
+
+module load modules/3.2.6.6
+module load cmake/2.8.7
+module load PrgEnv-gnu/4.2.34
+module load cray-shmem/6.0.1
+module load cray-mpich/6.0.1
+module load cray-hdf5-parallel/1.8.11
+module load cray-netcdf-hdf5parallel
+module load python/2.7.5
+
+
+module load torque/4.2.3.h5_notcpretry
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=ON \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+ -D ALBANY_BUILD_DIR="/project/projectdirs/piscees/albany/albany-cism-build" \
+ -D ALBANY_BASE_DIR="/project/projectdirs/piscees/albany/albany-felix-src" \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf-hdf5parallel/4.3.0/GNU/48 \
+ -D CISM_MPI_BASE_DIR=/opt/cray/mpt/6.0.1/gni/mpich2-gnu/48 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2 -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -DNO_RESCALE -ffree-line-length-none " \
+ -D BISICLES_LIB_SUBDIR=libgnu \
+ -D CISM_GNU:BOOL=ON \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ ../..
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/hopper-gnu-felix/hopper-gnu-felix-cmake b/components/cism/glimmer-cism/builds/hopper-gnu-felix/hopper-gnu-felix-cmake
new file mode 100755
index 0000000000..305f07a22f
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-gnu-felix/hopper-gnu-felix-cmake
@@ -0,0 +1,115 @@
+# run this script by typing: source hopper-gnu-felix-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on hopper using the PGI compiler suite. It relies on a build of Trilinos
+# located in /global/project/projectdirs/piscees, and a build of BISICLES
+# located in the ranken home directory: /global/u1/r/ranken/BISICLES
+
+
+# This script should be run from the builds/hopper-pgi subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source hopper-gnu-felix-cmake
+echo
+
+#echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide
+#echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module.
+#echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines:
+#echo module unload python
+#echo module load python/2.7.1
+#echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+module --silent purge
+
+#module unload cmake netcdf-hdf5parallel/4.2.0 python
+#module swap PrgEnv-pgi PrgEnv-gnu; module load cmake/2.8.7 python netcdf-hdf5parallel/4.2.0 usg-default-modules/1.0
+
+module load modules/3.2.6.6
+module load cmake/2.8.7
+module load PrgEnv-gnu/4.1.40
+module load hdf5-parallel/1.8.8
+module load python/2.7.1
+module load cray-shmem/5.5.2
+module load cray-mpich2/5.5.2
+module load torque/4.2.3.h5_notcpretry
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+ -D ALBANY_BUILD_DIR="/project/projectdirs/piscees/albany/albany-cism-build" \
+ -D ALBANY_BASE_DIR="/project/projectdirs/piscees/albany/albany-felix-src" \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf/4.3.0/GNU/47 \
+ -D CISM_MPI_BASE_DIR=/opt/cray/mpt/5.6.4/gni/mpich2-gnu/47 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2" \
+ -D CISM_Fortran_FLAGS="-ffree-line-length-none" \
+ -D CISM_GNU:BOOL=ON \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ ../..
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/hopper-gnu/hopper-bisicles-gnu-cmake b/components/cism/glimmer-cism/builds/hopper-gnu/hopper-bisicles-gnu-cmake
new file mode 100644
index 0000000000..7f9d6e104d
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-gnu/hopper-bisicles-gnu-cmake
@@ -0,0 +1,122 @@
+# run this script by typing: source hopper-bisicles-gnu-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on hopper using the gnu compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to /global/u2/d/dmartin/BISICLES/code/interface)
+
+
+# This script should be run from the builds/hopper-pgi subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source hopper-bisicles-gnu-cmake
+echo
+echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide
+echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module.
+echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines:
+echo module unload python
+echo module load python/2.7.1
+echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+#module --silent purge
+
+module load PrgEnv-gnu/4.1.40
+
+module load modules/3.2.6.6
+module load cmake/2.8.7
+module load hdf5-parallel/1.8.8
+module load python/2.7.1
+module load cray-shmem/6.0.1
+module load cray-mpich/6.0.1
+module load torque/4.2.3.h5_notcpretry
+
+#temporary fix for HDF5 error:
+module swap gcc gcc/4.7.2
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=ON \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D ALBANY_FELIX_DYCORE:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf/4.3.0/GNU/47 \
+ -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5/1.8.11/GNU/47/lib \
+ -D CISM_MPI_BASE_DIR=/opt/cray/mpt/5.6.4/gni/mpich2-gnu/47 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2 -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -DNO_RESCALE -ffree-line-length-none " \
+ -D BISICLES_LIB_SUBDIR=libgnu \
+ -D CISM_GNU:BOOL=ON \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-build-and-test-serial.csh b/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-build-and-test-serial.csh
new file mode 100644
index 0000000000..f879e914c3
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-build-and-test-serial.csh
@@ -0,0 +1,155 @@
+#!/bin/csh
+
+
+
+# Master build script for mac laptops. Last updated 2/28/2013 by SFP.
+# This is a hacked version of Kate's original script for use on Hopper.
+# For now, only supports parallel build with Trilinos using gnu and cmake.
+# Only a subset of the small, standard tests are run, on both 1 and 4 procs.
+
+# (1) execute from the builds/hopper-gnu subdirectory of CISM
+
+#add logic at the top to decide which versions to build
+
+# PARALLEL BUILD WITH CMAKE
+
+# setenv TEST_DIR "/USERS/$USER/work/modeling/cism/seacism-oceans11/tests/higher-order"
+
+# 5/7/2014 DMR -- added performance tests:
+
+## This will automatically submit dome60-500 ijobs. gis_1km and gis_4km will not be submitted
+## automatically because you will have to build and run Felix/Albany on hopper first. Once you do that,
+## you can go to lines #193-194, 197-198, 201-202, and uncomment them.
+setenv PERF_TEST 0
+
+@ run_perf_tests = (($1 == run-perf-tests) || ($2 == run-perf-tests) || ($3 == run-perf-tests) || ($4 == run-perf-tests) || ($5 == run-perf-tests))
+
+if ($run_perf_tests) then
+ setenv PERF_TEST 1
+endif
+
+@ skip_build_set = (($1 == skip-build) || ($2 == skip-build) || ($3 == skip-build) || ($4 == skip-build) || ($5 == skip-build))
+
+@ no_copy_set = (($1 == no-copy) || ($2 == no-copy) || ($3 == no-copy) || ($4 == no-copy) || ($5 == no-copy))
+
+@ skip_tests_set = (($1 == skip-tests) || ($2 == skip-tests) || ($3 == skip-tests) || ($4 == skip-tests) || ($5 == skip-tests))
+
+#**!move this and source it to your .bashrc (wherever your higher-order directory is located)
+#setenv TEST_DIR /lustre/atlas/scratch/$USER/cli062/higher-order
+
+if (! -d $TEST_DIR) mkdir -p $TEST_DIR
+
+setenv TEST_SUITE_DEFAULT_LOC http://oceans11.lanl.gov/cism/livv
+#setenv TEST_SUITE_DEFAULT_LOC /ccs/proj/cli062/test_suite
+
+setenv build_problem 0
+
+set COMPILER_NAME = gnu
+set PLATFORM_NAME = hopper
+
+# set PLATFORM_NAME = $1
+# set COMPILER_NAME = $2
+
+set CMAKE_SCRIPT = $PLATFORM_NAME'-'$COMPILER_NAME'-serial-cmake'
+set CMAKE_CONF_OUT = 'conf_'$COMPILER_NAME'.out'
+set CMAKE_BUILD_OUT = 'cmake_'$COMPILER_NAME'_build.out'
+#set CISM_RUN_SCRIPT = $PLATFORM_NAME'job'
+set CISM_RUN_SCRIPT = 'hopjob'
+#set CISM_RUN_SCRIPT = 'ijob'
+#set CISM_VV_SCRIPT = $PLATFORM_NAME'_VV.bash'
+set CISM_VV_SCRIPT = 'carver_VV.bash'
+#set CISM_VV_SCRIPT = 'rhea_VV.bash'
+
+echo
+echo 'To use this script, type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh'
+echo
+#echo 'For a quick test (dome only), type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh quick-test'
+echo
+echo "Call with no-copy to prevent copying of the reg_test and livv defaults."
+echo "Call with run-perf-tests to run the performance tests."
+echo "Call with skip-tests to skip testing (builds executable and copies it to TEST_DIR)."
+
+
+echo
+echo 'See the LIVV documentation for instructions on setting up the test directory (TEST_DIR).'
+echo
+
+
+#echo 'The following environment variables must be set: TEST_DIR, GLIMMER_TRILINOS_DIR'
+#echo 'Examples (place in .cshrc or .bashrc):'
+#echo 'csh, tcsh: setenv GLIMMER_TRILINOS_DIR "/Users/$USER/Trilinos/gcc-build/install"'
+#echo 'bash: export GLIMMER_TRILINOS_DIR="/Users/$USER/Trilinos/gcc-build/install"'
+echo
+echo 'Setting TEST_DIR to the location: '
+echo 'TEST_DIR =' $TEST_DIR
+echo 'TEST_DIR must also be set in your .bashrc file.'
+
+# PARALLEL BUILD WITH CMAKE
+
+
+if ($skip_build_set == 0) then
+
+echo
+echo "Configuring and building in directory: " $PWD
+echo
+
+echo 'Configuring '$COMPILER_NAME' cmake build...'
+source ./$CMAKE_SCRIPT >& $CMAKE_CONF_OUT
+echo 'Making parallel '$COMPILER_NAME'...'
+make -j 8 >& $CMAKE_BUILD_OUT
+
+if ( -e example-drivers/simple_glide/src/simple_glide ) then
+ echo 'Copying '$COMPILER_NAME' parallel simple_glide_'$COMPILER_NAME' to test directory'
+ cp -f example-drivers/simple_glide/src/simple_glide $TEST_DIR/simple_glide_$COMPILER_NAME
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+if ( -e cism_driver/cism_driver ) then
+ echo 'Copying '$COMPILER_NAME' parallel cism_driver_'$COMPILER_NAME' to test directory'
+ cp -f cism_driver/cism_driver $TEST_DIR/cism_driver_$COMPILER_NAME
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+endif # skip_build_set
+
+if ($build_problem == 1) then
+ echo "No job submitted -- cmake build failed."
+else # execute tests:
+
+ # Make copy of test suite in $TEST_DIR:
+if (! ($no_copy_set)) then
+ echo "Copying default reg_test and LIVV to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e reg_test_default.tgz ) rm -f reg_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/reg_test_default.tgz
+ tar xfz reg_test_default.tgz
+ popd > /dev/null
+
+ if ($PERF_TEST) then
+ echo "Copying default perf_test to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e perf_test_default.tgz ) rm -f perf_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/perf_test_default.tgz
+ tar xfz perf_test_default.tgz
+ popd > /dev/null
+ endif
+
+ cp -rf ../../tests/higher-order/livv $TEST_DIR
+endif
+
+if ($skip_tests_set) then
+ echo "Skipping tests."
+ exit
+endif
+
+csh $TEST_DIR/livv/run_livv_default_tests.csh $TEST_DIR $CISM_RUN_SCRIPT $PERF_TEST $CISM_VV_SCRIPT
+echo "Back in build-and-test script, exiting."
+exit
+
+
diff --git a/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-build-and-test.csh b/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-build-and-test.csh
new file mode 100644
index 0000000000..36339c26c5
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-build-and-test.csh
@@ -0,0 +1,155 @@
+#!/bin/csh
+
+
+
+# Master build script for mac laptops. Last updated 2/28/2013 by SFP.
+# This is a hacked version of Kate's original script for use on Hopper.
+# For now, only supports parallel build with Trilinos using gnu and cmake.
+# Only a subset of the small, standard tests are run, on both 1 and 4 procs.
+
+# (1) execute from the builds/hopper-gnu subdirectory of CISM
+
+#add logic at the top to decide which versions to build
+
+# PARALLEL BUILD WITH CMAKE
+
+# setenv TEST_DIR "/USERS/$USER/work/modeling/cism/seacism-oceans11/tests/higher-order"
+
+# 5/7/2014 DMR -- added performance tests:
+
+## This will automatically submit dome60-500 ijobs. gis_1km and gis_4km will not be submitted
+## automatically because you will have to build and run Felix/Albany on hopper first. Once you do that,
+## you can go to lines #193-194, 197-198, 201-202, and uncomment them.
+setenv PERF_TEST 0
+
+@ run_perf_tests = (($1 == run-perf-tests) || ($2 == run-perf-tests) || ($3 == run-perf-tests) || ($4 == run-perf-tests) || ($5 == run-perf-tests))
+
+if ($run_perf_tests) then
+ setenv PERF_TEST 1
+endif
+
+@ skip_build_set = (($1 == skip-build) || ($2 == skip-build) || ($3 == skip-build) || ($4 == skip-build) || ($5 == skip-build))
+
+@ no_copy_set = (($1 == no-copy) || ($2 == no-copy) || ($3 == no-copy) || ($4 == no-copy) || ($5 == no-copy))
+
+@ skip_tests_set = (($1 == skip-tests) || ($2 == skip-tests) || ($3 == skip-tests) || ($4 == skip-tests) || ($5 == skip-tests))
+
+#**!move this and source it to your .bashrc (wherever your higher-order directory is located)
+#setenv TEST_DIR /lustre/atlas/scratch/$USER/cli062/higher-order
+
+if (! -d $TEST_DIR) mkdir -p $TEST_DIR
+
+setenv TEST_SUITE_DEFAULT_LOC http://oceans11.lanl.gov/cism/livv
+#setenv TEST_SUITE_DEFAULT_LOC /ccs/proj/cli062/test_suite
+
+setenv build_problem 0
+
+set COMPILER_NAME = gnu
+set PLATFORM_NAME = hopper
+
+# set PLATFORM_NAME = $1
+# set COMPILER_NAME = $2
+
+set CMAKE_SCRIPT = $PLATFORM_NAME'-'$COMPILER_NAME'-cmake'
+set CMAKE_CONF_OUT = 'conf_'$COMPILER_NAME'.out'
+set CMAKE_BUILD_OUT = 'cmake_'$COMPILER_NAME'_build.out'
+#set CISM_RUN_SCRIPT = $PLATFORM_NAME'job'
+set CISM_RUN_SCRIPT = 'hopjob'
+#set CISM_RUN_SCRIPT = 'ijob'
+#set CISM_VV_SCRIPT = $PLATFORM_NAME'_VV.bash'
+set CISM_VV_SCRIPT = 'carver_VV.bash'
+#set CISM_VV_SCRIPT = 'rhea_VV.bash'
+
+echo
+echo 'To use this script, type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh'
+echo
+#echo 'For a quick test (dome only), type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh quick-test'
+echo
+echo "Call with no-copy to prevent copying of the reg_test and livv defaults."
+echo "Call with run-perf-tests to run the performance tests."
+echo "Call with skip-tests to skip testing (builds executable and copies it to TEST_DIR)."
+
+
+echo
+echo 'See the LIVV documentation for instructions on setting up the test directory (TEST_DIR).'
+echo
+
+
+#echo 'The following environment variables must be set: TEST_DIR, GLIMMER_TRILINOS_DIR'
+#echo 'Examples (place in .cshrc or .bashrc):'
+#echo 'csh, tcsh: setenv GLIMMER_TRILINOS_DIR "/Users/$USER/Trilinos/gcc-build/install"'
+#echo 'bash: export GLIMMER_TRILINOS_DIR="/Users/$USER/Trilinos/gcc-build/install"'
+echo
+echo 'Setting TEST_DIR to the location: '
+echo 'TEST_DIR =' $TEST_DIR
+echo 'TEST_DIR must also be set in your .bashrc file.'
+
+# PARALLEL BUILD WITH CMAKE
+
+
+if ($skip_build_set == 0) then
+
+echo
+echo "Configuring and building in directory: " $PWD
+echo
+
+echo 'Configuring '$COMPILER_NAME' cmake build...'
+source ./$CMAKE_SCRIPT >& $CMAKE_CONF_OUT
+echo 'Making parallel '$COMPILER_NAME'...'
+make -j 8 >& $CMAKE_BUILD_OUT
+
+if ( -e example-drivers/simple_glide/src/simple_glide ) then
+ echo 'Copying '$COMPILER_NAME' parallel simple_glide_'$COMPILER_NAME' to test directory'
+ cp -f example-drivers/simple_glide/src/simple_glide $TEST_DIR/simple_glide_$COMPILER_NAME
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+if ( -e cism_driver/cism_driver ) then
+ echo 'Copying '$COMPILER_NAME' parallel cism_driver_'$COMPILER_NAME' to test directory'
+ cp -f cism_driver/cism_driver $TEST_DIR/cism_driver_$COMPILER_NAME
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+endif # skip_build_set
+
+if ($build_problem == 1) then
+ echo "No job submitted -- cmake build failed."
+else # execute tests:
+
+ # Make copy of test suite in $TEST_DIR:
+if (! ($no_copy_set)) then
+ echo "Copying default reg_test and LIVV to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e reg_test_default.tgz ) rm -f reg_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/reg_test_default.tgz
+ tar xfz reg_test_default.tgz
+ popd > /dev/null
+
+ if ($PERF_TEST) then
+ echo "Copying default perf_test to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e perf_test_default.tgz ) rm -f perf_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/perf_test_default.tgz
+ tar xfz perf_test_default.tgz
+ popd > /dev/null
+ endif
+
+ cp -rf ../../tests/higher-order/livv $TEST_DIR
+endif
+
+if ($skip_tests_set) then
+ echo "Skipping tests."
+ exit
+endif
+
+csh $TEST_DIR/livv/run_livv_default_tests.csh $TEST_DIR $CISM_RUN_SCRIPT $PERF_TEST $CISM_VV_SCRIPT
+echo "Back in build-and-test script, exiting."
+exit
+
+
diff --git a/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-cmake b/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-cmake
new file mode 100644
index 0000000000..4be3adaf36
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-cmake
@@ -0,0 +1,126 @@
+# run this script by typing: source hopper-gnu-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on hopper using the gnu compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to /global/u2/d/dmartin/BISICLES/code/interface)
+
+
+# This script should be run from the builds/hopper-pgi subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+
+echo
+echo Run this script by typing: source hopper-gnu-cmake
+echo
+
+module unload cmake
+module unload hdf5
+module unload hdf5-parallel cray-hdf5-parallel
+module unload netcdf cray-netcdf-hdf5parallel
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+module unload boost gcc
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+
+module load modules/3.2.6.6
+module load cmake/2.8.10.2
+module load PrgEnv-gnu/4.2.34
+module load gcc/4.8.1
+module load cray-shmem/6.0.1
+module load cray-mpich/6.0.1
+module load cray-hdf5-parallel/1.8.11
+module load cray-netcdf-hdf5parallel
+module load python/2.7.5
+module load boost
+
+module load torque/4.2.3.h5_notcpretry
+
+#temporary fix for HDF5 error:
+#module swap gcc gcc/4.7.2
+
+module list
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D ALBANY_FELIX_DYCORE:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \
+ -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos/hopper-gnu-cism-albany-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf-hdf5parallel/4.3.0/GNU/48 \
+ -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5/1.8.11/GNU/48/lib \
+ -D CISM_MPI_BASE_DIR=/opt/cray/mpt/6.0.1/gni/mpich2-gnu/48 \
+\
+ -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-hopper-gnu_4.8.1 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2 -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -ffree-line-length-none " \
+ -D BISICLES_LIB_SUBDIR=libgnu \
+ -D CISM_GNU:BOOL=ON \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
+
+
diff --git a/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-serial-cmake b/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-serial-cmake
new file mode 100644
index 0000000000..c1fcf742db
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-gnu/hopper-gnu-serial-cmake
@@ -0,0 +1,125 @@
+# run this script by typing: source hopper-gnu-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on hopper using the gnu compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to /global/u2/d/dmartin/BISICLES/code/interface)
+
+
+# This script should be run from the builds/hopper-pgi subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source hopper-bisicles-gnu-cmake
+echo
+echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide
+echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module.
+echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines:
+echo module unload python
+echo module load python/2.7.1
+echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+#module --silent purge
+
+module load PrgEnv-gnu/4.1.40
+
+module load modules/3.2.6.6
+module load cmake/2.8.7
+module load hdf5-parallel/1.8.8
+module load python/2.7.1
+module load cray-shmem/6.0.1
+module load cray-mpich/6.0.1
+module load torque/4.2.3.h5_notcpretry
+
+#temporary fix for HDF5 error:
+module swap gcc gcc/4.7.2
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D ALBANY_FELIX_DYCORE:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \
+ -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/Trilinos/hopper-gnu-cism-albany-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos/trilinos-albany-build/install \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf/4.3.0/GNU/47 \
+ -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5/1.8.11/GNU/47/lib \
+ -D CISM_MPI_BASE_DIR=/opt/cray/mpt/5.6.4/gni/mpich2-gnu/47 \
+\
+ -D CISM_GPTL_DIR=/project/projectdirs/piscees/cism_gptl/libgptl/libgptl-hopper-gnu_4.7.2 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2 -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -ffree-line-length-none " \
+ -D BISICLES_LIB_SUBDIR=libgnu \
+ -D CISM_GNU:BOOL=ON \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/hopper-pgi/README b/components/cism/glimmer-cism/builds/hopper-pgi/README
new file mode 100644
index 0000000000..fdba356696
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-pgi/README
@@ -0,0 +1,59 @@
+README file for the hopper-pgi build directory.
+
+The cmake configure file hopper-pgi-cmake can be used to build parallel versions of
+simple_glide and simple_bisicles, 2 programs that are part of CISM (the Community
+Ice Sheet Model). The PGI compiler suite is used for this build.
+
+Build Instructions:
+
+Standard Build (uses Trilinos, builds simple_glide, doesn't build simple_bisicles):
+
+In the builds/hopper-pgi directory, configure for the build using:
+
+make clean
+source hopper-pgi-cmake
+
+
+The configuration process should complete with a final message:
+-- Build files have been written to: /seacism/builds/hopper-pgi
+
+The next step is to use the make program to do the build:
+make -j 8
+
+---------
+
+In the file hopper-pgi-cmake, the first 4 lines of the cmake call can be modified
+to configure different builds. These lines are:
+ -D NO_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+ -D BUILD_SIMPLE_BISICLES:BOOL=OFF \
+
+For instance, to build simple_bisicles (and simple_glide), use:
+ -D NO_TRILINOS:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+ -D BUILD_SIMPLE_BISICLES:BOOL=ON \
+
+For a serial build of simple_glide, use:
+ -D NO_TRILINOS:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=OFF \
+ -D CISM_SERIAL_MODE:BOOL=ON \
+ -D BUILD_SIMPLE_BISICLES:BOOL=OFF \
+
+
+Dependencies:
+The packages this build depends on (Trilinos, BISICLES, and Chombo) have already
+been built. The paths to these packages can be found in hopper-pgi-cmake.
+
+Testing:
+
+simple_glide quick test:
+In seacism/tests/higher-order/dome, do:
+1) type dome.py, this will give a 'simple glide not found' error
+2) qsub -I -V -q interactive -l mppwidth=4
+3) aprun -n 4 ...hopper-config/example_drivers/simple_glide/src/simple_glide dome.9_5_2012.config
+
+
+simple_bisicles quick test:
+TBD.
diff --git a/components/cism/glimmer-cism/builds/hopper-pgi/hopper-bisicles-pgi-cmake b/components/cism/glimmer-cism/builds/hopper-pgi/hopper-bisicles-pgi-cmake
new file mode 100644
index 0000000000..8d4cc6b39b
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-pgi/hopper-bisicles-pgi-cmake
@@ -0,0 +1,128 @@
+# run this script by typing: source hopper-pgi-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on hopper using the PGI compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to /global/u2/d/dmartin/BISICLES/code/interface)
+
+
+# This script should be run from the builds/hopper-pgi subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source hopper-bisicles-pgi-cmake
+echo
+echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide
+echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module.
+echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines:
+echo module unload python
+echo module load python/2.7.1
+echo
+#echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide
+#echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module.
+#echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines:
+#echo module unload python
+#echo module load python/2.7.1
+#echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload cray-hdf5-parallel
+module unload cray-netcdf-hdf5parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+#module --silent purge
+
+module load modules
+module load cmake/2.8.10.1
+module load PrgEnv-pgi/4.2.34
+module load cray-hdf5-parallel/1.8.11
+module load cray-netcdf-hdf5parallel/4.3.0
+module load python/2.7.5
+module load cray-shmem/6.0.1
+module load cray-mpich/6.0.1
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=ON \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \
+ -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-gptl/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-albany/install \
+\
+ -D CISM_NETCDF_DIR=$NETCDF_DIR \
+ -D CISM_FMAIN=/opt/pgi/13.6.0/linux86-64/13.6/lib/f90main.o \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CISM_MPI_BASE_DIR=$CRAY_MPICH2_DIR \
+ -D CISM_SCI_LIB_DIR=$CRAY_LIBSCI_PREFIX_DIR/lib \
+ -D CISM_GPTL_DIR=/project/projectdirs/ccsm1/libgptl/libgptl-pgi \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2 --diag_suppress 554,111,611 -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -DNO_RESCALE" \
+ -D BISICLES_LIB_SUBDIR=libpgi \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=OFF \
+ ../..
+
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="$HDF5_DIR" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=$HDF5_DIR/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/hopper-pgi/hopper-petsc-bisicles-pgi-cmake b/components/cism/glimmer-cism/builds/hopper-pgi/hopper-petsc-bisicles-pgi-cmake
new file mode 100644
index 0000000000..a65c4b4c3c
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-pgi/hopper-petsc-bisicles-pgi-cmake
@@ -0,0 +1,122 @@
+# run this script by typing: source hopper-pgi-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on hopper using the PGI compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to /global/u2/d/dmartin/BISICLES/code/interface)
+
+
+# This script should be run from the builds/hopper-pgi subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source hopper-petsc-bisicles-pgi-cmake
+echo
+#echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide
+#echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module.
+#echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines:
+#echo module unload python
+#echo module load python/2.7.1
+#echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload cray-hdf5-parallel
+module unload cray-netcdf-hdf5parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+#module --silent purge
+
+module load modules
+module load cmake/2.8.10.1
+module load PrgEnv-pgi/4.2.34
+module load cray-hdf5-parallel/1.8.11
+module load cray-netcdf-hdf5parallel/4.3.0
+module load python/2.7.5
+module load cray-shmem/6.0.1
+module load cray-mpich/6.0.1
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=ON \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \
+ -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-gptl/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-albany/install \
+\
+ -D CISM_NETCDF_DIR=$NETCDF_DIR \
+ -D CISM_FMAIN=/opt/pgi/13.6.0/linux86-64/13.6/lib/f90main.o \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CISM_MPI_BASE_DIR=$CRAY_MPICH2_DIR \
+ -D CISM_SCI_LIB_DIR=$CRAY_LIBSCI_PREFIX_DIR/lib \
+ -D CISM_GPTL_DIR=/project/projectdirs/ccsm1/libgptl/libgptl-pgi \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2 -g --diag_suppress 554,111,611 -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -g -DNO_RESCALE" \
+ -D CISM_EXTRA_LIBS:STRING="-L$PETSC_DIR/$PETSC_ARCH/lib -lpetsc -lHYPRE -lparmetis -lmetis -llapack -lblas -lpthread -ldl" \
+ -D BISICLES_LIB_SUBDIR=libpgi \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=OFF \
+ ../..
+
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="$HDF5_DIR" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=$HDF5_DIR/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/hopper-pgi/hopper-pgi-build-and-test.csh b/components/cism/glimmer-cism/builds/hopper-pgi/hopper-pgi-build-and-test.csh
new file mode 100644
index 0000000000..e493d23b68
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-pgi/hopper-pgi-build-and-test.csh
@@ -0,0 +1,155 @@
+#!/bin/csh
+
+
+
+# Master build script for mac laptops. Last updated 2/28/2013 by SFP.
+# This is a hacked version of Kate's original script for use on Hopper.
+# For now, only supports parallel build with Trilinos using gnu and cmake.
+# Only a subset of the small, standard tests are run, on both 1 and 4 procs.
+
+# (1) execute from the builds/hopper-gnu subdirectory of CISM
+
+#add logic at the top to decide which versions to build
+
+# PARALLEL BUILD WITH CMAKE
+
+# setenv TEST_DIR "/USERS/$USER/work/modeling/cism/seacism-oceans11/tests/higher-order"
+
+# 5/7/2014 DMR -- added performance tests:
+
+## This will automatically submit dome60-500 ijobs. gis_1km and gis_4km will not be submitted
+## automatically because you will have to build and run Felix/Albany on hopper first. Once you do that,
+## you can go to lines #193-194, 197-198, 201-202, and uncomment them.
+setenv PERF_TEST 0
+
+@ run_perf_tests = (($1 == run-perf-tests) || ($2 == run-perf-tests) || ($3 == run-perf-tests) || ($4 == run-perf-tests) || ($5 == run-perf-tests))
+
+if ($run_perf_tests) then
+ setenv PERF_TEST 1
+endif
+
+@ skip_build_set = (($1 == skip-build) || ($2 == skip-build) || ($3 == skip-build) || ($4 == skip-build) || ($5 == skip-build))
+
+@ no_copy_set = (($1 == no-copy) || ($2 == no-copy) || ($3 == no-copy) || ($4 == no-copy) || ($5 == no-copy))
+
+@ skip_tests_set = (($1 == skip-tests) || ($2 == skip-tests) || ($3 == skip-tests) || ($4 == skip-tests) || ($5 == skip-tests))
+
+#**!move this and source it to your .bashrc (wherever your higher-order directory is located)
+#setenv TEST_DIR /lustre/atlas/scratch/$USER/cli062/higher-order
+
+if (! -d $TEST_DIR) mkdir -p $TEST_DIR
+
+setenv TEST_SUITE_DEFAULT_LOC http://oceans11.lanl.gov/cism/livv
+#setenv TEST_SUITE_DEFAULT_LOC /ccs/proj/cli062/test_suite
+
+setenv build_problem 0
+
+set COMPILER_NAME = pgi
+set PLATFORM_NAME = hopper
+
+# set PLATFORM_NAME = $1
+# set COMPILER_NAME = $2
+
+set CMAKE_SCRIPT = $PLATFORM_NAME'-'$COMPILER_NAME'-cmake'
+set CMAKE_CONF_OUT = 'conf_'$COMPILER_NAME'.out'
+set CMAKE_BUILD_OUT = 'cmake_'$COMPILER_NAME'_build.out'
+#set CISM_RUN_SCRIPT = $PLATFORM_NAME'job'
+set CISM_RUN_SCRIPT = 'hopjob'
+#set CISM_RUN_SCRIPT = 'ijob'
+#set CISM_VV_SCRIPT = $PLATFORM_NAME'_VV.bash'
+set CISM_VV_SCRIPT = 'carver_VV.bash'
+#set CISM_VV_SCRIPT = 'rhea_VV.bash'
+
+echo
+echo 'To use this script, type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh'
+echo
+#echo 'For a quick test (dome only), type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh quick-test'
+echo
+echo "Call with no-copy to prevent copying of the reg_test and livv defaults."
+echo "Call with run-perf-tests to run the performance tests."
+echo "Call with skip-tests to skip testing (builds executable and copies it to TEST_DIR)."
+
+
+echo
+echo 'See the LIVV documentation for instructions on setting up the test directory (TEST_DIR).'
+echo
+
+
+#echo 'The following environment variables must be set: TEST_DIR, GLIMMER_TRILINOS_DIR'
+#echo 'Examples (place in .cshrc or .bashrc):'
+#echo 'csh, tcsh: setenv GLIMMER_TRILINOS_DIR "/Users/$USER/Trilinos/gcc-build/install"'
+#echo 'bash: export GLIMMER_TRILINOS_DIR="/Users/$USER/Trilinos/gcc-build/install"'
+echo
+echo 'Setting TEST_DIR to the location: '
+echo 'TEST_DIR =' $TEST_DIR
+echo 'TEST_DIR must also be set in your .bashrc file.'
+
+# PARALLEL BUILD WITH CMAKE
+
+
+if ($skip_build_set == 0) then
+
+echo
+echo "Configuring and building in directory: " $PWD
+echo
+
+echo 'Configuring '$COMPILER_NAME' cmake build...'
+source ./$CMAKE_SCRIPT >& $CMAKE_CONF_OUT
+echo 'Making parallel '$COMPILER_NAME'...'
+make -j 8 >& $CMAKE_BUILD_OUT
+
+if ( -e example-drivers/simple_glide/src/simple_glide ) then
+ echo 'Copying '$COMPILER_NAME' parallel simple_glide_'$COMPILER_NAME' to test directory'
+ cp -f example-drivers/simple_glide/src/simple_glide $TEST_DIR/simple_glide_$COMPILER_NAME
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+if ( -e cism_driver/cism_driver ) then
+ echo 'Copying '$COMPILER_NAME' parallel cism_driver_'$COMPILER_NAME' to test directory'
+ cp -f cism_driver/cism_driver $TEST_DIR/cism_driver_$COMPILER_NAME
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+endif # skip_build_set
+
+if ($build_problem == 1) then
+ echo "No job submitted -- cmake build failed."
+else # execute tests:
+
+ # Make copy of test suite in $TEST_DIR:
+if (! ($no_copy_set)) then
+ echo "Copying default reg_test and LIVV to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e reg_test_default.tgz ) rm -f reg_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/reg_test_default.tgz
+ tar xfz reg_test_default.tgz
+ popd > /dev/null
+
+ if ($PERF_TEST) then
+ echo "Copying default perf_test to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e perf_test_default.tgz ) rm -f perf_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/perf_test_default.tgz
+ tar xfz perf_test_default.tgz
+ popd > /dev/null
+ endif
+
+ cp -rf ../../tests/higher-order/livv $TEST_DIR
+endif
+
+if ($skip_tests_set) then
+ echo "Skipping tests."
+ exit
+endif
+
+csh $TEST_DIR/livv/run_livv_default_tests.csh $TEST_DIR $CISM_RUN_SCRIPT $PERF_TEST $CISM_VV_SCRIPT
+echo "Back in build-and-test script, exiting."
+exit
+
+
diff --git a/components/cism/glimmer-cism/builds/hopper-pgi/hopper-pgi-cmake b/components/cism/glimmer-cism/builds/hopper-pgi/hopper-pgi-cmake
new file mode 100755
index 0000000000..35641cfa71
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/hopper-pgi/hopper-pgi-cmake
@@ -0,0 +1,123 @@
+# run this script by typing: source hopper-pgi-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on hopper using the PGI compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to /global/u2/d/dmartin/BISICLES/code/interface)
+
+
+# This script should be run from the builds/hopper-pgi subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source hopper-pgi-cmake
+echo
+echo Warning: Python problem. After the last hopper system updates 1/28/13, in order to run simple_glide
+echo or simple_bisicles, you need to replace the python/2.7.3 module with the python/2.7.1 module.
+echo The easiest way to do this: In your .cshrc.ext or .bashrc.ext add the lines:
+echo module unload python
+echo module load python/2.7.1
+echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload cray-hdf5-parallel
+module unload cray-netcdf-hdf5parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+#module --silent purge
+
+module load modules
+module load cmake/2.8.10.1
+module load PrgEnv-pgi/4.2.34
+module load cray-hdf5-parallel/1.8.11
+module load cray-netcdf-hdf5parallel/4.3.0
+module load python/2.7.5
+module load cray-shmem/6.0.1
+module load cray-mpich/6.0.1
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \
+ -D CISM_TRILINOS_GPTL_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-gptl/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/project/projectdirs/piscees/trilinos-default/hopper-pgi-albany/install \
+\
+ -D CISM_NETCDF_DIR=$NETCDF_DIR \
+ -D CISM_FMAIN=/opt/pgi/13.6.0/linux86-64/13.6/lib/f90main.o \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CISM_MPI_BASE_DIR=$CRAY_MPICH2_DIR \
+ -D CISM_SCI_LIB_DIR=$CRAY_LIBSCI_PREFIX_DIR/lib \
+ -D CISM_GPTL_DIR=/project/projectdirs/ccsm1/libgptl/libgptl-pgi \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2 --diag_suppress 554,111,611 -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-O2" \
+ -D BISICLES_LIB_SUBDIR=libpgi \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+# Note: last argument above "../.." is path to top seacism directory
+
+
+# -D CISM_TRILINOS_DIR=/global/project/projectdirs/piscees/trilinos-default/hopper-pgi/install \
+# -D CISM_TRILINOS_DIR=/project/projectdirs/piscees/trilinos/hopper-pgi-ci-nophal/install \
+
+
+# -D CMAKE_PREFIX_PATH="$HDF5_DIR" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=$HDF5_DIR/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/linux-gnu-bisicles/build_simple_bisicles b/components/cism/glimmer-cism/builds/linux-gnu-bisicles/build_simple_bisicles
new file mode 100755
index 0000000000..62dd7b3d80
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/linux-gnu-bisicles/build_simple_bisicles
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+CHOMBO_DIR=${PWD}/../../../Chombo
+BISICLES_DIR=${PWD}/../../../BISICLES/code/interface
+CISM_DIR=${PWD}/../..
+CISM_CMAKE_BUILD_DIR=${PWD}
+
+#first run cmake script
+# ./linux-gnu-bisicles-cmake
+
+#now run script in BISICLES directory
+cd ${BISICLES_DIR}
+./build_simple_bisicles
+
+#finally, return here and move executable to a more convenient place
+#(in lieu of a formal "install" target
+cd ${CISM_CMAKE_BUILD_DIR}
+mv ${CISM_CMAKE_BUILD_DIR}/example-drivers/simple_bisicles/src/simple_bisicles ${CISM_DIR}/bin
diff --git a/components/cism/glimmer-cism/builds/linux-gnu-bisicles/linux-gnu-bisicles-cmake b/components/cism/glimmer-cism/builds/linux-gnu-bisicles/linux-gnu-bisicles-cmake
new file mode 100755
index 0000000000..bdeae0aca1
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/linux-gnu-bisicles/linux-gnu-bisicles-cmake
@@ -0,0 +1,63 @@
+# run this script by typing: source hopper-pgi-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+
+
+# This script should be run from the builds/linux-gnu-bisicles subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+#set Netcdf installation directory here
+#setenv NETCDF_HOME /home/loren/users/dmartin/util/netcdf/netcdf-4.1.2
+#setenv NETCDF_HOME /usr/local/netcdf
+setenv NETCDF_HOME ${NETCDFHOME}
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=ON \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+ -D CISM_NETCDF_DIR=${NETCDF_HOME} \
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=mpiCC \
+ -D CMAKE_C_COMPILER=mpicc \
+ -D CMAKE_Fortran_COMPILER=mpif90 \
+\
+ -D CISM_HDF5_LIB_DIR=${ANAG_HDF5_DIR}/lib \
+ -D CISM_HDF5_LIBS="-DH5_USE_16_API -lhdf5 -lz -lstdc++" \
+ -D CMAKE_PREFIX_PATH="${ANAG_HDF5_DIR}" \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-g -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-g -ffree-line-length-none -fno-range-check -DNO_RESCALE" \
+ -D CISM_EXTRA_LIBS:STRING="-lblas" \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D BISICLES_LIB_SUBDIR=libgnu \
+ ../..
+
+# Note: last argument above "../.." is path to top seacism directory
diff --git a/components/cism/glimmer-cism/builds/linux-gnu-bisicles/linux-gnu-bisicles-petsc-cmake b/components/cism/glimmer-cism/builds/linux-gnu-bisicles/linux-gnu-bisicles-petsc-cmake
new file mode 100755
index 0000000000..dc1d729fec
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/linux-gnu-bisicles/linux-gnu-bisicles-petsc-cmake
@@ -0,0 +1,63 @@
+# run this script by typing: source hopper-pgi-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+
+
+# This script should be run from the builds/linux-gnu-bisicles subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+#set Netcdf installation directory here
+#setenv NETCDF_HOME /home/loren/users/dmartin/util/netcdf/netcdf-4.1.2
+#setenv NETCDF_HOME /usr/local/netcdf
+setenv NETCDF_HOME ${NETCDFHOME}
+
+cmake \
+ -D CISM_NETCDF_DIR=${NETCDFHOME} \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=ON \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=mpiCC \
+ -D CMAKE_C_COMPILER=mpicc \
+ -D CMAKE_Fortran_COMPILER=/usr/bin/mpif90 \
+\
+ -D CISM_HDF5_LIB_DIR=${ANAG_HDF5_DIR}/lib \
+ -D CISM_HDF5_LIBS="-DH5_USE_16_API -lhdf5 -lz -lstdc++" \
+ -D CMAKE_PREFIX_PATH="${ANAG_HDF5_DIR}" \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-g -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-g -ffree-line-length-none -fno-range-check -DNO_RESCALE" \
+ -D CISM_EXTRA_LIBS:STRING="-L$PETSC_DIR/$PETSC_ARCH/lib -lpetsc -llapack -lblas" \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D BISICLES_LIB_SUBDIR=libgnu \
+ ../..
+
+# Note: last argument above "../.." is path to top seacism directory
diff --git a/components/cism/glimmer-cism/builds/linux-gnu-cism/linux-gnu-cism-cmake b/components/cism/glimmer-cism/builds/linux-gnu-cism/linux-gnu-cism-cmake
new file mode 100755
index 0000000000..4cb94b9be6
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/linux-gnu-cism/linux-gnu-cism-cmake
@@ -0,0 +1,67 @@
+# cmake configuration script that works on the Linux box in Matt's office (blueskies) with GCC
+# Others will need to modify the Netcdf path.
+# This config script is setup to perform a parallel build with Trilinos.
+#
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+
+# After this executes, do:
+# make -j 8
+#
+
+echo
+echo Run this script by typing: source linux-gnu-cism-cmake
+echo
+echo Set CISM_TRILINOS_DIR to your Trilinos installation directory.
+echo
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_GNU=ON \
+\
+ -D CISM_TRILINOS_DIR=$CISM_TRILINOS_DIR \
+ -D CISM_NETCDF_DIR="/usr" \
+ -D CISM_NETCDF_LIBS="netcdff" \
+\
+ -D CMAKE_Fortran_FLAGS="-g -O2 -ffree-line-length-none -fPIC -fno-range-check" \
+\
+ -D CMAKE_CXX_COMPILER=mpicxx \
+ -D CMAKE_C_COMPILER=mpicc \
+ -D CMAKE_Fortran_COMPILER=mpif90 \
+\
+ -D CISM_EXTRA_LIBS:STRING="-lblas" \
+\
+ -D CISM_MPI_INC_DIR=/usr/lib/openmpi/lib \
+ -D CISM_MPI_LIB_DIR=/usr/lib/openmpi/lib \
+\
+ -D CMAKE_VERBOSE_MAKEFILE=OFF \
+ ../..
+
+# Note: last argument above "../.." is path to top seacism directory
+
diff --git a/components/cism/glimmer-cism/builds/linux-gnu-cism/linux-gnu-cism-cmake-serial b/components/cism/glimmer-cism/builds/linux-gnu-cism/linux-gnu-cism-cmake-serial
new file mode 100755
index 0000000000..c2b9f3e551
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/linux-gnu-cism/linux-gnu-cism-cmake-serial
@@ -0,0 +1,71 @@
+# cmake configuration script that works on the Linux box in Matt's office (blueskies) with GCC
+# Others will need to modify the Netcdf path.
+# This config script is setup to perform a serial build without Trilinos.
+#
+# BUILD OPTIONS:
+# The call to cmake below include several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# NO_TRILINOS -- OFF by default, set to on for builds without Trilinos
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_BUILD_SIMPLE_GLIDE -- ON by default, set to OFF to not build simple_glide
+# CISM_BUILD_SIMPLE_BISICLES -- OFF by default, set to ON to build simple_bisicles
+# Setting NO_TRILINOS to ON will generate a much smaller executable for this build.
+# CISM_BUILD_EXTRA_EXECUTABLES -- OFF by default, set to ON to build eis_glide and others
+# CISM_USE_GPTL_INSTRUMENTATION -- OFF by default, set to ON to use GPTL instrumentation
+
+# Serial Build Notes: Setting NO_TRILINOS=ON, CISM_MPI_MODE=OFF, CISM_SERIAL_MODE=ON will
+# configure for a serial build. (Note that the openmpi compilers will be used, but act as
+# pass-throughs to the underlying serial compilers in this case. If MPI is not installed,
+# set the serial compilers directly.)
+
+# NOTE: There is currently an incompatibility between simple_bisicles and GPTL. If
+# the CISM_BUILD_SIMPLE_BISICLES is ON, the GPTL instrumentation is turned OFF.
+
+
+# After this executes, do:
+# make -j 8
+#
+
+echo
+echo Run this script by typing: source linux-gnu-cism-cmake-serial
+echo
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=OFF \
+ -D CISM_SERIAL_MODE:BOOL=ON \
+ -D CISM_BUILD_CISM_DRIVER=ON \
+ -D CISM_BUILD_SIMPLE_GLIDE:BOOL=OFF \
+ -D CISM_BUILD_SIMPLE_BISICLES:BOOL=OFF \
+ -D CISM_BUILD_GLINT_EXAMPLE:BOOL=OFF \
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_USE_DEFAULT_IO:BOOL=OFF \
+ -D CISM_USE_CISM_FRONT_END:BOOL=ON \
+\
+ -D CISM_GNU=ON \
+\
+ -D CISM_NETCDF_DIR="/usr" \
+ -D CISM_NETCDF_LIBS="netcdff" \
+\
+ -D CMAKE_Fortran_FLAGS="-g -O2 -ffree-line-length-none -fPIC -fno-range-check" \
+\
+ -D CMAKE_CXX_COMPILER=g++ \
+ -D CMAKE_C_COMPILER=gcc \
+ -D CMAKE_Fortran_COMPILER=gfortran \
+\
+ -D CISM_EXTRA_LIBS:STRING="-lblas" \
+\
+ -D CMAKE_VERBOSE_MAKEFILE=OFF \
+ ../..
+
+# Note: last argument above "../.." is path to top seacism directory
+
diff --git a/components/cism/glimmer-cism/builds/linux-gnu-felix/linux-gnu-felix-cmake b/components/cism/glimmer-cism/builds/linux-gnu-felix/linux-gnu-felix-cmake
new file mode 100755
index 0000000000..a77cc9e5aa
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/linux-gnu-felix/linux-gnu-felix-cmake
@@ -0,0 +1,41 @@
+# cmake configuration script that works on Andy's Linux box with GCC
+# Others will need to modify the Trilinos and Netcdf paths.
+# This script needs to be run from a subdirectory (e.g. build-linux)
+# of the main seacism repository (reflected in the several instances of
+# ".." below).
+
+# After this executes, do:
+# make -j 8
+# cp example-drivers/simple_glide/src/sgcmake .
+#
+
+rm ./CMakeCache.txt
+
+echo
+echo "Doing CMake Configuration step"
+
+TRILINOS_DIR=/home/ikalash/Trilinos_Albany/Trilinos/build/install
+ALBANY_DIR=/home/ikalash/Desktop/clean/Albany/cism-build/install
+NETCDF_DIR=/home/ikalash/Install/netcdf-4.0.1
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+# Note: last argument above ".." is path to top seacism directory
+# -D ALBANY_BUILD_DIR="/home/ikalash/Desktop/clean/Albany_clean/build_cism" \
+# -D ALBANY_BASE_DIR="/home/ikalash/Desktop/clean/Albany_clean" \
+
+#About the DEBUG_OUTPUT_VERBOSITY:
+#-D DEBUG_OUTPUT_VERBOSITY:INT=O \ No debug output
+#-D DEBUG_OUTPUT_VERBOSITY:INT=1 \ Minimal debug output
+#-D DEBUG_OUTPUT_VERBOSITY:INT=2 \ Maximal debug output
+#It is set to 1 by default.
+#It is set to 1 by default.
diff --git a/components/cism/glimmer-cism/builds/mac-gnu-serial/mac-gnu-cmake-serial b/components/cism/glimmer-cism/builds/mac-gnu-serial/mac-gnu-cmake-serial
new file mode 100644
index 0000000000..99da0a7219
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/mac-gnu-serial/mac-gnu-cmake-serial
@@ -0,0 +1,87 @@
+# run this script by typing: source mac-gnu-cmake-serial
+# After this script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds simple_glide, and if you enable it below,
+# and cism_driver on a Mac using the Gnu compiler suite.
+# This build is serial. See the mac-gnu directory for a parallel build.
+# Trilinos is not used.
+
+# This script should be run from the builds/mac-gnu subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+#
+
+# BUILD OPTIONS:
+# The call to cmake below include several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_SERIAL_MODE -- OFF by default, set to ON here for a serial build.
+# CISM_MPI_MODE -- ON by default, set to OFF here for a serial build.
+# CISM_USE_TRILINOS -- Set OFF below to exclude Trilinos from the build.
+# CISM_BUILD_SIMPLE_GLIDE -- ON by default, set to OFF to not build simple_glide
+# CISM_BUILD_SIMPLE_BISICLES -- OFF by default, set to ON to build simple_bisicles
+# Setting NO_TRILINOS to ON will generate a much smaller executable for this build.
+# CISM_BUILD_EXTRA_EXECUTABLES -- OFF by default, set to ON to build eis_glide and others
+# CISM_USE_GPTL_INSTRUMENTATION -- OFF by default, set to ON to use GPTL instrumentation
+
+# Serial Build Notes: Setting CISM_USE_TRILINOS=OFF, CISM_MPI_MODE=OFF, CISM_SERIAL_MODE=ON will
+# configure for a serial build. (Note that the openmpi compilers will be used if specified, but act as
+# pass-throughs to the underlying serial compilers in this case. If MPI is not installed,
+# set the serial compilers directly.)
+
+# You may need to manually set the NETCDF_PATH variable below to point to your NetCDF installation.
+# If you have NetCDF installed with MacPorts, you can use that by setting it to "/opt/local"
+# (assuming default MacPorts installation location).
+
+# NOTE: There is currently an incompatibility between simple_bisicles and GPTL. If
+# the CISM_BUILD_SIMPLE_BISICLES is ON, the GPTL instrumentation is turned OFF.
+
+echo
+echo Run this script by typing: source mac-cmake
+echo
+
+# remove old build data:
+rm -f ./CMakeCache.txt
+rm -rf ./CMakeFiles
+
+echo
+echo "Doing CMake Configuration step"
+
+NETCDF_PATH="/opt/local"
+
+cmake \
+ -D CISM_SERIAL_MODE:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=OFF \
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_BUILD_SIMPLE_GLIDE:BOOL=OFF \
+ -D CISM_BUILD_SIMPLE_BISICLES:BOOL=OFF \
+ -D CISM_BUILD_GLINT_EXAMPLE:BOOL=OFF \
+ -D CISM_BUILD_EXTRA_EXECUTABLES:BOOL=OFF \
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_USE_DEFAULT_IO:BOOL=OFF \
+\
+ -D CISM_GNU=ON \
+\
+ -D CISM_NETCDF_DIR=$NETCDF_PATH \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=gxx \
+ -D CMAKE_C_COMPILER=gcc \
+ -D CMAKE_Fortran_COMPILER=gfortran \
+\
+\
+ -D CMAKE_CXX_FLAGS:STRING="-g " \
+ -D CMAKE_Fortran_FLAGS="-g -O3 -ffree-line-length-none -fbacktrace" \
+\
+ -D CISM_EXTRA_LIBS="-lblas" \
+\
+\
+ ../..
+
+# Note: last argument above "../.." is path to top seacism directory
diff --git a/components/cism/glimmer-cism/builds/mac-gnu/mac-gnu-build-and-test.csh b/components/cism/glimmer-cism/builds/mac-gnu/mac-gnu-build-and-test.csh
new file mode 100644
index 0000000000..c703aa2018
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/mac-gnu/mac-gnu-build-and-test.csh
@@ -0,0 +1,160 @@
+#!/bin/csh
+
+
+
+# Master build script for mac laptops. Last updated 2/28/2013 by SFP.
+# This is a hacked version of Kate's original script for use on Hopper.
+# For now, only supports parallel build with Trilinos using gnu and cmake.
+# Only a subset of the small, standard tests are run, on both 1 and 4 procs.
+
+# (1) execute from the builds/titan-gnu subdirectory of CISM
+
+#add logic at the top to decide which versions to build
+
+# PARALLEL BUILD WITH CMAKE
+
+# setenv TEST_DIR "/USERS/$USER/work/modeling/cism/seacism-oceans11/tests/higher-order"
+
+# 5/7/2014 DMR -- added performance tests:
+
+## This will automatically submit dome60-500 ijobs. gis_1km and gis_4km will not be submitted
+## automatically because you will have to build and run Felix/Albany on hopper first. Once you do that,
+## you can go to lines #193-194, 197-198, 201-202, and uncomment them.
+setenv PERF_TEST 0
+
+
+setenv QUICK_TEST nope
+if (($1 == quick-test) || ($2 == quick-test) || ($3 == quick-test) || ($4 == quick-test) || ($5 == quick-test)) then
+ setenv QUICK_TEST quick-test
+endif
+
+@ run_perf_tests = (($1 == run-perf-tests) || ($2 == run-perf-tests) || ($3 == run-perf-tests) || ($4 == run-perf-tests) || ($5 == run-perf-tests))
+
+if ($run_perf_tests) then
+ setenv PERF_TEST 1
+endif
+
+@ skip_build_set = (($1 == skip-build) || ($2 == skip-build) || ($3 == skip-build) || ($4 == skip-build) || ($5 == skip-build))
+
+@ no_copy_set = (($1 == no-copy) || ($2 == no-copy) || ($3 == no-copy) || ($4 == no-copy) || ($5 == no-copy))
+
+@ skip_tests_set = (($1 == skip-tests) || ($2 == skip-tests) || ($3 == skip-tests) || ($4 == skip-tests) || ($5 == skip-tests))
+
+#**!move this and source it to your .bashrc (wherever your higher-order directory is located)
+#setenv TEST_DIR /lustre/atlas/scratch/$USER/cli062/higher-order
+
+if (! -d $TEST_DIR) mkdir -p $TEST_DIR
+
+setenv TEST_SUITE_DEFAULT_LOC http://oceans11.lanl.gov/cism/livv
+#setenv TEST_SUITE_DEFAULT_LOC /ccs/proj/cli062/test_suite
+
+setenv build_problem 0
+
+set COMPILER_NAME = gnu
+set PLATFORM_NAME = mac
+
+# set PLATFORM_NAME = $1
+# set COMPILER_NAME = $2
+
+set CMAKE_SCRIPT = $PLATFORM_NAME'-'$COMPILER_NAME'-cmake'
+set CMAKE_CONF_OUT = 'conf_'$COMPILER_NAME'.out'
+set CMAKE_BUILD_OUT = 'cmake_'$COMPILER_NAME'_build.out'
+#set CISM_RUN_SCRIPT = $PLATFORM_NAME'job'
+#set CISM_RUN_SCRIPT = 'hopjob'
+set CISM_RUN_SCRIPT = './macjob'
+#set CISM_VV_SCRIPT = $PLATFORM_NAME'_VV.bash'
+set CISM_VV_SCRIPT = 'mac_VV.bash'
+
+echo
+echo 'To use this script, type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh'
+echo
+#echo 'For a quick test (dome only), type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh quick-test'
+echo
+echo "Call with no-copy to prevent copying of the reg_test and livv defaults."
+echo "Call with run-perf-tests to run the performance tests."
+echo "Call with skip-tests to skip testing (builds executable and copies it to TEST_DIR)."
+
+
+echo
+echo 'See the LIVV documentation for instructions on setting up the test directory (TEST_DIR).'
+echo
+
+
+#echo 'The following environment variables must be set: TEST_DIR, GLIMMER_TRILINOS_DIR'
+#echo 'Examples (place in .cshrc or .bashrc):'
+#echo 'csh, tcsh: setenv GLIMMER_TRILINOS_DIR "/Users/$USER/Trilinos/gcc-build/install"'
+#echo 'bash: export GLIMMER_TRILINOS_DIR="/Users/$USER/Trilinos/gcc-build/install"'
+echo
+echo 'Setting TEST_DIR to the location: '
+echo 'TEST_DIR =' $TEST_DIR
+echo 'TEST_DIR must also be set in your .bashrc file.'
+
+# PARALLEL BUILD WITH CMAKE
+
+
+if ($skip_build_set == 0) then
+
+echo
+echo "Configuring and building in directory: " $PWD
+echo
+
+echo 'Configuring '$COMPILER_NAME' cmake build...'
+source ./$CMAKE_SCRIPT >& $CMAKE_CONF_OUT
+echo 'Making parallel '$COMPILER_NAME'...'
+make -j 8 >& $CMAKE_BUILD_OUT
+
+#if ( -e example-drivers/simple_glide/src/simple_glide ) then
+# echo 'Copying '$COMPILER_NAME' parallel simple_glide_'$COMPILER_NAME' to test directory'
+# cp -f example-drivers/simple_glide/src/simple_glide $TEST_DIR/simple_glide_$COMPILER_NAME
+#else
+# echo "cmake '$COMPILER_NAME' build failed, no executable"
+# @ build_problem = 1
+#endif
+
+if ( -e cism_driver/cism_driver ) then
+ echo 'Copying '$COMPILER_NAME' parallel cism_driver_'$COMPILER_NAME' to test directory'
+ cp -f cism_driver/cism_driver $TEST_DIR/cism_driver_$COMPILER_NAME
+
+ #copy cism_driver to simple_glide, until macjob scripts have been changed:
+ cp -f cism_driver/cism_driver $TEST_DIR/simple_glide_$COMPILER_NAME
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+endif # skip_build_set
+
+if ($build_problem == 1) then
+ echo "No job submitted -- cmake build failed."
+else # execute tests:
+
+ # Make copy of test suite in $TEST_DIR:
+if (! ($no_copy_set)) then
+ echo "Copying default reg_test and LIVV to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e reg_test_default.tgz ) rm -f reg_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/reg_test_default.tgz
+ tar xfz reg_test_default.tgz
+ popd > /dev/null
+
+ if ($PERF_TEST) then
+ echo "Copying default perf_test to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e perf_test_default.tgz ) rm -f perf_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/perf_test_default.tgz
+ tar xfz perf_test_default.tgz
+ popd > /dev/null
+ endif
+
+ cp -rf ../../tests/higher-order/livv $TEST_DIR
+endif
+
+if ($skip_tests_set) then
+ echo "Skipping tests."
+ exit
+endif
+
+csh $TEST_DIR/livv/run_livv_default_tests.csh $TEST_DIR $CISM_RUN_SCRIPT $PERF_TEST $CISM_VV_SCRIPT $QUICK_TEST
+
diff --git a/components/cism/glimmer-cism/builds/mac-gnu/mac-gnu-cmake b/components/cism/glimmer-cism/builds/mac-gnu/mac-gnu-cmake
new file mode 100644
index 0000000000..17f11eaa8f
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/mac-gnu/mac-gnu-cmake
@@ -0,0 +1,78 @@
+# run this script by typing: source mac-gnu-cmake
+# After this script completes, type: make -j 8
+
+# This script is also used by mac-gnu-build-and-test.csh.
+
+# This cmake configuration script builds cism_driver on a Mac using the Gnu compiler suite.
+# If Trilinos is used, it relies on a build of Trilinos located in $CISM_TRILINOS_DIR (set below).
+# If BISICLES is used, it relies on a build of BISICLES located in $BISICLES_INTERFACE_DIR (set below).
+
+# This script should be run from the builds/mac-gnu subdirectory
+# of the main CISM repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+
+# Serial Build Notes: Setting CISM_USE_TRILINOS=OFF, CISM_MPI_MODE=OFF, CISM_SERIAL_MODE=ON will
+# configure for a serial build. (Note that the openmpi compilers will be used, but act as
+# pass-throughs to the underlying serial compilers in this case. If MPI is not installed,
+# set the serial compilers directly.)
+
+echo
+echo Run this script by typing: source mac-gnu-cmake
+echo
+echo Set CISM_TRILINOS_DIR to your Trilinos installation directory.
+echo
+
+# remove old build data:
+rm -f ./CMakeCache.txt
+rm -rf ./CMakeFiles
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_GNU=ON \
+\
+ -D CISM_TRILINOS_DIR=$CISM_TRILINOS_DIR \
+ -D CISM_NETCDF_DIR=/opt/local \
+ -D CISM_MPI_BASE_DIR=/opt/local \
+ -D CISM_MPI_INC_DIR=/opt/local/lib \
+ -D CISM_EXTRA_LIBS="-lblas" \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=mpicxx \
+ -D CMAKE_C_COMPILER=mpicc \
+ -D CMAKE_Fortran_COMPILER=mpif90 \
+\
+ -D CMAKE_CXX_FLAGS="" \
+ -D CMAKE_Fortran_FLAGS="-g -O2 -ffree-line-length-none" \
+\
+ -D BISICLES_INTERFACE_DIR=~/BISICLES/CISM-interface/interface \
+ ../..
+
+# Note: last argument above "../.." is path to top CISM directory
diff --git a/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-build-and-test-serial.csh b/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-build-and-test-serial.csh
new file mode 100644
index 0000000000..5baef63823
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-build-and-test-serial.csh
@@ -0,0 +1,284 @@
+#!/bin/csh
+
+
+
+# Master build script for mac laptops. Last updated 2/28/2013 by SFP.
+# This is a hacked version of Kate's original script for use on Hopper.
+# For now, only supports parallel build with Trilinos using gnu and cmake.
+# Only a subset of the small, standard tests are run, on both 1 and 4 procs.
+
+# (1) execute from the builds/mac-gnu subdirectory of CISM
+
+#add logic at the top to decide which versions to build
+
+# PARALLEL BUILD WITH CMAKE
+
+# setenv TEST_DIR "/USERS/$USER/work/modeling/cism/seacism-oceans11/tests/higher-order"
+
+# 5/7/2014 DMR -- added performance tests:
+
+## This will automatically submit dome60-500 ijobs. gis_1km and gis_4km will not be submitted
+## automatically because you will have to build and run Felix/Albany on hopper first. Once you do that,
+## you can go to lines #193-194, 197-198, 201-202, and uncomment them.
+setenv PERF_TEST 0
+
+@ run_perf_tests = (($1 == run-perf-tests) || ($2 == run-perf-tests) || ($3 == run-perf-tests) || ($4 == run-perf-tests))
+if ($run_perf_tests) then
+ setenv PERF_TEST 1
+endif
+
+#**!move this and source it to your .bashrc (wherever your higher-order directory is located)
+#setenv TEST_DIR /global/scratch2/sd/$USER/cism2/higher-order
+
+if (! -d $TEST_DIR) mkdir -p $TEST_DIR
+
+setenv TEST_SUITE_DEFAULT_LOC http://oceans11.lanl.gov/cism/livv
+#setenv TEST_SUITE_DEFAULT_LOC /ccs/proj/cli062/test_suite
+
+setenv build_problem 0
+
+set COMPILER_NAME = gnu
+set PLATFORM_NAME = titan
+
+# set PLATFORM_NAME = $1
+# set COMPILER_NAME = $2
+
+set CMAKE_SCRIPT = $PLATFORM_NAME'-'$COMPILER_NAME'-serial-cmake'
+set CMAKE_CONF_OUT = 'conf_'$COMPILER_NAME'.out'
+set CMAKE_BUILD_OUT = 'cmake_'$COMPILER_NAME'_build.out'
+#set CISM_RUN_SCRIPT = $PLATFORM_NAME'job'
+#set CISM_RUN_SCRIPT = 'hopjob'
+set CISM_RUN_SCRIPT = 'ijob'
+#set CISM_VV_SCRIPT = $PLATFORM_NAME'_VV.bash'
+set CISM_VV_SCRIPT = 'rhea_VV.bash'
+
+echo
+echo 'To use this script, type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh'
+echo
+#echo 'For a quick test (dome only), type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh quick-test'
+echo
+echo "Call with no-copy to prevent copying of the reg_test and livv defaults."
+echo "Call with run-perf-tests to run the performance tests."
+echo "Call with skip-tests to skip testing (builds executable and copies it to TEST_DIR)."
+
+
+echo
+echo 'See the LIVV documentation for instructions on setting up the test directory (TEST_DIR).'
+echo
+
+
+#echo 'The following environment variables must be set: TEST_DIR, GLIMMER_TRILINOS_DIR'
+#echo 'Examples (place in .cshrc or .bashrc):'
+#echo 'csh, tcsh: setenv GLIMMER_TRILINOS_DIR "/Users/$USER/Trilinos/gcc-build/install"'
+#echo 'bash: export GLIMMER_TRILINOS_DIR="/Users/$USER/Trilinos/gcc-build/install"'
+echo
+echo 'Setting TEST_DIR to the location: '
+echo 'TEST_DIR =' $TEST_DIR
+echo 'TEST_DIR must also be set in your .bashrc file.'
+
+# PARALLEL BUILD WITH CMAKE
+
+echo
+echo "Configuring and building in directory: " $PWD
+echo
+
+echo 'Configuring '$COMPILER_NAME' cmake build...'
+source ./$CMAKE_SCRIPT >& $CMAKE_CONF_OUT
+echo 'Making serial '$COMPILER_NAME'...'
+make -j 8 >& $CMAKE_BUILD_OUT
+
+#if ( -e example-drivers/simple_glide/src/simple_glide ) then
+# echo 'Copying '$COMPILER_NAME' simple_glide_serial to test directory'
+# cp -f example-drivers/simple_glide/src/simple_glide $TEST_DIR/simple_glide_serial
+#else
+# echo "cmake '$COMPILER_NAME' build failed, no executable"
+# @ build_problem = 1
+#endif
+
+if ( -e cism_driver/cism_driver ) then
+ echo 'Copying '$COMPILER_NAME' cism_driver_serial to test directory'
+ cp -f cism_driver/cism_driver $TEST_DIR/cism_driver_serial
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+
+if ($build_problem == 1 ) then
+ echo "No job submitted -- cmake build failed."
+else # execute tests:
+
+
+
+@ no_copy_set = (($1 == no-copy) || ($2 == no-copy) || ($3 == no-copy) || ($4 == no-copy))
+
+ # Make copy of test suite in $TEST_DIR:
+if (!($no_copy_set)) then
+ echo "Copying default reg_test and LIVV to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e reg_test_default.tgz ) rm -f reg_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/reg_test_default.tgz
+ tar xfz reg_test_default.tgz
+ popd > /dev/null
+
+ if ($PERF_TEST) then
+ echo "Copying default perf_test to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e perf_test_default.tgz ) rm -f perf_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/perf_test_default.tgz
+ tar xfz perf_test_default.tgz
+ popd > /dev/null
+ endif
+
+ cp -rf ../../tests/higher-order/livv $TEST_DIR
+endif
+
+ if (($1 == "skip-tests") || ($2 == "skip-tests") || ($3 == "skip-tests") || ($4 == "skip-tests")) then
+ echo "Skipping tests."
+ exit
+ endif
+
+ echo 'Submitting test jobs to compute nodes.'
+
+ setenv run_all_tests 1
+ if (($1 == "quick-test") || ($2 == "quick-test") || ($3 == "quick-test") || ($4 == "quick-test")) then
+ setenv run_all_tests 0
+ endif
+
+
+
+
+ #diagnostic dome test case
+ cd $TEST_DIR/reg_test/dome30/diagnostic
+ qsub $CISM_RUN_SCRIPT
+
+
+ if ($run_all_tests == 1) then
+
+ #evolving dome test case
+ cd $TEST_DIR/reg_test/dome30/evolving
+ qsub $CISM_RUN_SCRIPT
+
+ # confined shelf to periodic BC
+ cd $TEST_DIR/reg_test/confined-shelf
+ qsub $CISM_RUN_SCRIPT
+
+ # circular shelf to periodic BC
+ cd $TEST_DIR/reg_test/circular-shelf
+ qsub $CISM_RUN_SCRIPT
+
+ # ISMIP test case A, 80 km
+ cd $TEST_DIR/reg_test/ismip-hom-a/80km
+ qsub $CISM_RUN_SCRIPT
+
+ # ISMIP test case A, 20 km
+ cd $TEST_DIR/reg_test/ismip-hom-a/20km
+ qsub $CISM_RUN_SCRIPT
+
+ ## ISMIP test case C, 80 km - not operational for glide
+ cd $TEST_DIR/reg_test/ismip-hom-c/80km
+ qsub $CISM_RUN_SCRIPT
+
+ ## ISMIP test case C, 20 km - not operational for glide
+ cd $TEST_DIR/reg_test/ismip-hom-c/20km
+ qsub $CISM_RUN_SCRIPT
+ endif
+
+ if ($PERF_TEST == 0 ) then
+ echo "No performance suite jobs were submitted."
+ else
+ echo 'Submitting performance jobs to compute nodes.'
+ echo 'Go to rhea to complete Visualization and Verification (LIVV)'
+
+ #dome 60 test case
+ cd $TEST_DIR/perf_test/dome60
+ qsub $CISM_RUN_SCRIPT
+
+ #dome 120 test case
+ cd $TEST_DIR/perf_test/dome120
+ qsub $CISM_RUN_SCRIPT
+
+ #dome 240 test case
+ cd $TEST_DIR/perf_test/dome240
+ qsub $CISM_RUN_SCRIPT
+
+ #dome 500 test case
+ cd $TEST_DIR/perf_test/dome500
+ qsub $CISM_RUN_SCRIPT
+
+ #dome 1000 test case - not operational currently
+ # cd $TEST_DIR/perf_test/dome1000
+ # qsub $CISM_RUN_SCRIPT
+
+ #gis 4km test case
+ # cd $TEST_DIR/perf_test/gis_4km
+ # qsub $CISM_RUN_SCRIPT
+
+ #gis 2km test case
+ # cd $TEST_DIR/perf_test/gis_2km
+ # qsub $CISM_RUN_SCRIPT
+
+ #gis 1km test case
+ # cd $TEST_DIR/perf_test/gis_1km
+ # qsub $CISM_RUN_SCRIPT
+ endif
+endif
+
+
+ echo
+ echo "Test Suite jobs started -- using qstat to monitor."
+ echo
+
+ set still_running = 1
+ set counter = 0
+ set timeout_error = 0
+
+ set run_list = "dome_30_test dome_30_evolve conf_shelf circ_shelf ishoma_80 ishoma_20 dome_60_test dome_120_test dome_240_test dome_500_test dome_1000_test"
+
+ while ($still_running)
+ set ls_out = `qstat | grep $USER`
+
+ set found = 0
+ foreach cur ($run_list)
+ foreach elem ($ls_out)
+ if ("$cur" == "$elem") then
+ if (($counter % 5) == 0) echo "Still running: $cur"
+ set found = 1
+ endif
+ # if ($found == 1) break
+ end
+ end
+ if ($found == 0) then
+ echo "All jobs completed."
+ set still_running = 0
+ else
+ sleep 60
+ endif
+ @ counter = $counter + 1
+ if ($counter == 120) then
+ set still_running = 0
+ set timeout_error = 1
+ echo "Timeout error -- jobs are taking too long. Exiting script."
+ endif
+ if (($counter % 5) == 0) echo "Minutes: $counter"
+ end
+
+ if ($timeout_error == 0) then
+ echo "Total minutes: $counter"
+ echo
+
+ echo "Call disabled to: $CISM_VV_SCRIPT, which is located in:"
+ echo "$TEST_DIR/livv"
+ echo
+ echo "Perform this step on rhea after the Test Suite jobs have completed."
+ # cd $TEST_DIR/livv
+ # bash $CISM_VV_SCRIPT from-script $1
+ endif
+
+ echo
+ # echo "If there were errors finding ncl, add the ncl installation directory to your PATH in ~/.bashrc."
+ echo
+
+endif
diff --git a/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-build-and-test.csh b/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-build-and-test.csh
new file mode 100644
index 0000000000..1167f96ba8
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-build-and-test.csh
@@ -0,0 +1,154 @@
+#!/bin/csh
+
+
+
+# Master build script for mac laptops. Last updated 2/28/2013 by SFP.
+# This is a hacked version of Kate's original script for use on Hopper.
+# For now, only supports parallel build with Trilinos using gnu and cmake.
+# Only a subset of the small, standard tests are run, on both 1 and 4 procs.
+
+# (1) execute from the builds/titan-gnu subdirectory of CISM
+
+#add logic at the top to decide which versions to build
+
+# PARALLEL BUILD WITH CMAKE
+
+# setenv TEST_DIR "/USERS/$USER/work/modeling/cism/seacism-oceans11/tests/higher-order"
+
+# 5/7/2014 DMR -- added performance tests:
+
+## This will automatically submit dome60-500 ijobs. gis_1km and gis_4km will not be submitted
+## automatically because you will have to build and run Felix/Albany on hopper first. Once you do that,
+## you can go to lines #193-194, 197-198, 201-202, and uncomment them.
+setenv PERF_TEST 0
+
+@ run_perf_tests = (($1 == run-perf-tests) || ($2 == run-perf-tests) || ($3 == run-perf-tests) || ($4 == run-perf-tests) || ($5 == run-perf-tests))
+
+if ($run_perf_tests) then
+ setenv PERF_TEST 1
+endif
+
+@ skip_build_set = (($1 == skip-build) || ($2 == skip-build) || ($3 == skip-build) || ($4 == skip-build) || ($5 == skip-build))
+
+@ no_copy_set = (($1 == no-copy) || ($2 == no-copy) || ($3 == no-copy) || ($4 == no-copy) || ($5 == no-copy))
+
+@ skip_tests_set = (($1 == skip-tests) || ($2 == skip-tests) || ($3 == skip-tests) || ($4 == skip-tests) || ($5 == skip-tests))
+
+#**!move this and source it to your .bashrc (wherever your higher-order directory is located)
+#setenv TEST_DIR /lustre/atlas/scratch/$USER/cli062/higher-order
+
+if (! -d $TEST_DIR) mkdir -p $TEST_DIR
+
+setenv TEST_SUITE_DEFAULT_LOC http://oceans11.lanl.gov/cism/livv
+#setenv TEST_SUITE_DEFAULT_LOC /ccs/proj/cli062/test_suite
+
+setenv build_problem 0
+
+set COMPILER_NAME = gnu
+set PLATFORM_NAME = titan
+
+# set PLATFORM_NAME = $1
+# set COMPILER_NAME = $2
+
+set CMAKE_SCRIPT = $PLATFORM_NAME'-'$COMPILER_NAME'-cmake'
+set CMAKE_CONF_OUT = 'conf_'$COMPILER_NAME'.out'
+set CMAKE_BUILD_OUT = 'cmake_'$COMPILER_NAME'_build.out'
+#set CISM_RUN_SCRIPT = $PLATFORM_NAME'job'
+#set CISM_RUN_SCRIPT = 'hopjob'
+set CISM_RUN_SCRIPT = 'ijob'
+#set CISM_VV_SCRIPT = $PLATFORM_NAME'_VV.bash'
+set CISM_VV_SCRIPT = 'rhea_VV.bash'
+
+echo
+echo 'To use this script, type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh'
+echo
+#echo 'For a quick test (dome only), type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh quick-test'
+echo
+echo "Call with no-copy to prevent copying of the reg_test and livv defaults."
+echo "Call with run-perf-tests to run the performance tests."
+echo "Call with skip-tests to skip testing (builds executable and copies it to TEST_DIR)."
+
+
+echo
+echo 'See the LIVV documentation for instructions on setting up the test directory (TEST_DIR).'
+echo
+
+
+#echo 'The following environment variables must be set: TEST_DIR, GLIMMER_TRILINOS_DIR'
+#echo 'Examples (place in .cshrc or .bashrc):'
+#echo 'csh, tcsh: setenv GLIMMER_TRILINOS_DIR "/Users/$USER/Trilinos/gcc-build/install"'
+#echo 'bash: export GLIMMER_TRILINOS_DIR="/Users/$USER/Trilinos/gcc-build/install"'
+echo
+echo 'Setting TEST_DIR to the location: '
+echo 'TEST_DIR =' $TEST_DIR
+echo 'TEST_DIR must also be set in your .bashrc file.'
+
+# PARALLEL BUILD WITH CMAKE
+
+
+if ($skip_build_set == 0) then
+
+echo
+echo "Configuring and building in directory: " $PWD
+echo
+
+echo 'Configuring '$COMPILER_NAME' cmake build...'
+source ./$CMAKE_SCRIPT >& $CMAKE_CONF_OUT
+echo 'Making parallel '$COMPILER_NAME'...'
+make -j 8 >& $CMAKE_BUILD_OUT
+
+#if ( -e example-drivers/simple_glide/src/simple_glide ) then
+# echo 'Copying '$COMPILER_NAME' parallel simple_glide_'$COMPILER_NAME' to test directory'
+# cp -f example-drivers/simple_glide/src/simple_glide $TEST_DIR/simple_glide_$COMPILER_NAME
+#else
+# echo "cmake '$COMPILER_NAME' build failed, no executable"
+# @ build_problem = 1
+#endif
+
+if ( -e cism_driver/cism_driver ) then
+ echo 'Copying '$COMPILER_NAME' parallel cism_driver_'$COMPILER_NAME' to test directory'
+ cp -f cism_driver/cism_driver $TEST_DIR/cism_driver_$COMPILER_NAME
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+endif # skip_build_set
+
+if ($build_problem == 1) then
+ echo "No job submitted -- cmake build failed."
+else # execute tests:
+
+ # Make copy of test suite in $TEST_DIR:
+if (! ($no_copy_set)) then
+ echo "Copying default reg_test and LIVV to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e reg_test_default.tgz ) rm -f reg_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/reg_test_default.tgz
+ tar xfz reg_test_default.tgz
+ popd > /dev/null
+
+ if ($PERF_TEST) then
+ echo "Copying default perf_test to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e perf_test_default.tgz ) rm -f perf_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/perf_test_default.tgz
+ tar xfz perf_test_default.tgz
+ popd > /dev/null
+ endif
+
+ cp -rf ../../tests/higher-order/livv $TEST_DIR
+endif
+
+if ($skip_tests_set) then
+ echo "Skipping tests."
+ exit
+endif
+
+csh $TEST_DIR/livv/run_livv_default_tests.csh $TEST_DIR $CISM_RUN_SCRIPT $PERF_TEST $CISM_VV_SCRIPT
+echo "Back in build-and-test script, exiting."
+exit
+
+
diff --git a/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-cmake b/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-cmake
new file mode 100644
index 0000000000..e50b805c56
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-cmake
@@ -0,0 +1,124 @@
+# run this script by typing: source titan-gnu-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on titan using the GNU compiler suite.
+
+# This script should be run from the builds/titan-gnu subdirectory
+# of the main CISM repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+
+echo
+echo Run this script by typing: source titan-gnu-cmake
+echo
+
+
+# do this to reduce spurious errors reported purge below:
+module unload PrgEnv-gnu PrgEnv-pgi
+
+module --silent purge
+
+#module unload cmake
+#module unload cray-hdf5
+#module unload cray-hdf5-parallel
+#module unload netcdf
+#module unload python
+#module unload cray-shmem
+#module unload cray-mpich2
+#module unload netcdf-hdf5parallel cray-netcdf-hdf5parallel boost gcc
+#module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+
+module load modules
+module load cmake/2.8.10.2
+module load PrgEnv-gnu
+module load gcc/4.8.2
+module load cray-shmem
+module load cray-mpich
+module load cray-netcdf-hdf5parallel/4.3.0
+module load python
+module load boost/1.54.0
+
+echo module list
+
+# remove old build data:
+rm -rf ./CMakeCache.txt
+rm -rf ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/lustre/atlas/world-shared/cli900/cesm/software/Trilinos/Trilinos-11.10.2_gptl/titan-gnu-ci-nophal/install \
+ -D CISM_TRILINOS_GPTL_DIR=/lustre/atlas/world-shared/cli900/cesm/software/Trilinos/Trilinos-11.10.2_gptl/titan-gnu-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/lustre/atlas/world-shared/cli900/cesm/software/Trilinos/Trilinos-11.10.2_gptl/titan-gnu-ci-nophal/install \
+\
+ -D CISM_GPTL_DIR=/lustre/atlas/world-shared/cli900/cesm/software/libgptl/libgptl-titan-gnu \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf-hdf5parallel/4.3.0/GNU/48 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -ffree-line-length-none -fno-range-check" \
+ -D BISICLES_LIB_SUBDIR=libgnu \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_USE_CXX_IMPLICIT_LIBS:BOOL=OFF \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# -D CISM_TRILINOS_DIR=/tmp/proj/cli054/trilinos-10.12/FEB2013_FAST_PGI/install \
+
+# -D CMAKE_CXX_FLAGS:STRING="-O2 --diag_suppress 554,111,611 -DH5_USE_16_API" \
+# -D CISM_Fortran_FLAGS:STRING="-O2" \
+
+
+# -D CISM_FMAIN=/opt/pgi/13.7.0/linux86-64/13.7/lib/f90main.o \
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-cmake-newtrilinos b/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-cmake-newtrilinos
new file mode 100644
index 0000000000..0901aaf86f
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-cmake-newtrilinos
@@ -0,0 +1,124 @@
+# run this script by typing: source titan-gnu-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on titan using the GNU compiler suite.
+
+# This script should be run from the builds/titan-gnu subdirectory
+# of the main CISM repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+
+echo
+echo Run this script by typing: source titan-gnu-cmake
+echo
+
+
+# do this to reduce spurious errors reported purge below:
+module unload PrgEnv-gnu PrgEnv-pgi
+
+module --silent purge
+
+#module unload cmake
+#module unload cray-hdf5
+#module unload cray-hdf5-parallel
+#module unload netcdf
+#module unload python
+#module unload cray-shmem
+#module unload cray-mpich2
+#module unload netcdf-hdf5parallel cray-netcdf-hdf5parallel boost gcc
+#module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+
+module load modules
+module load cmake/2.8.10.2
+module load PrgEnv-gnu
+module load gcc/4.8.2
+module load cray-shmem
+module load cray-mpich
+module load cray-netcdf-hdf5parallel/4.3.0
+module load python
+module load boost/1.54.0
+
+echo module list
+
+# remove old build data:
+rm -rf ./CMakeCache.txt
+rm -rf ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/ccs/proj/cli101/software/Trilinos/Trilinos_gptl/titan-gnu-ci-nophal//install \
+ -D CISM_TRILINOS_GPTL_DIR=/ccs/proj/cli101/software/Trilinos/Trilinos_gptl/titan-gnu-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/ccs/proj/cli101/software/Trilinos/trilinos_gptl/titan-gnu-ci-nophal/install \
+\
+ -D CISM_GPTL_DIR=/ccs/proj/cli062/cism_gptl/libgptl/libgptl-titan-gnu \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf-hdf5parallel/4.3.0/GNU/48 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -ffree-line-length-none -fno-range-check" \
+ -D BISICLES_LIB_SUBDIR=libgnu \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_USE_CXX_IMPLICIT_LIBS:BOOL=OFF \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# -D CISM_TRILINOS_DIR=/tmp/proj/cli054/trilinos-10.12/FEB2013_FAST_PGI/install \
+
+# -D CMAKE_CXX_FLAGS:STRING="-O2 --diag_suppress 554,111,611 -DH5_USE_16_API" \
+# -D CISM_Fortran_FLAGS:STRING="-O2" \
+
+
+# -D CISM_FMAIN=/opt/pgi/13.7.0/linux86-64/13.7/lib/f90main.o \
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-serial-cmake b/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-serial-cmake
new file mode 100644
index 0000000000..1e2cf794c5
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-gnu/titan-gnu-serial-cmake
@@ -0,0 +1,120 @@
+# run this script by typing: source titan-gnu-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on titan using the GNU compiler suite.
+
+# This script should be run from the builds/titan-gnu subdirectory
+# of the main CISM repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+
+echo
+echo Run this script by typing: source titan-gnu-cmake
+echo
+
+module unload PrgEnv-gnu PrgEnv-pgi
+
+module --silent purge
+
+#module unload cmake
+#module unload cray-hdf5
+#module unload cray-hdf5-parallel
+#module unload netcdf
+#module unload python
+#module unload cray-shmem
+#module unload cray-mpich2
+#module unload netcdf-hdf5parallel cray-netcdf-hdf5parallel boost gcc
+#module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+
+module load modules
+module load cmake/2.8.10.2
+module load PrgEnv-gnu
+module load gcc/4.8.2
+module load cray-shmem
+module load cray-mpich
+module load cray-netcdf-hdf5parallel/4.3.0
+module load python
+module load boost/1.54.0
+
+# remove old build data:
+rm -f ./CMakeCache.txt
+rm -rf ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/lustre/atlas/world-shared/cli900/cesm/software/Trilinos/Trilinos-11.10.2_gptl/titan-gnu-ci-nophal/install \
+ -D CISM_TRILINOS_GPTL_DIR=/lustre/atlas/world-shared/cli900/cesm/software/Trilinos/Trilinos-11.10.2_gptl/titan-gnu-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/lustre/atlas/world-shared/cli900/cesm/software/Trilinos/Trilinos-11.10.2_gptl/titan-gnu-ci-nophal/install \
+\
+ -D CISM_GPTL_DIR=/lustre/atlas/world-shared/cli900/cesm/software/libgptl/libgptl-titan-gnu \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf-hdf5parallel/4.3.0/GNU/48 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -ffree-line-length-none -fno-range-check" \
+ -D BISICLES_LIB_SUBDIR=libgnu \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_USE_CXX_IMPLICIT_LIBS:BOOL=OFF \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# -D CISM_TRILINOS_DIR=/tmp/proj/cli054/trilinos-10.12/FEB2013_FAST_PGI/install \
+
+# -D CMAKE_CXX_FLAGS:STRING="-O2 --diag_suppress 554,111,611 -DH5_USE_16_API" \
+# -D CISM_Fortran_FLAGS:STRING="-O2" \
+
+
+# -D CISM_FMAIN=/opt/pgi/13.7.0/linux86-64/13.7/lib/f90main.o \
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/titan-pgi/titan-bisicles-pgi-cmake b/components/cism/glimmer-cism/builds/titan-pgi/titan-bisicles-pgi-cmake
new file mode 100644
index 0000000000..c1aa4c78cc
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-pgi/titan-bisicles-pgi-cmake
@@ -0,0 +1,105 @@
+# run this script by typing: source titan-bisicles-pgi-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on titan using the PGI compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to a relative path from this directory)
+
+
+# This script should be run from the builds/titan-pgi subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source titan-bisicles-pgi-cmake
+echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich2
+
+#module --silent purge
+
+module load modules
+module load cmake
+module load PrgEnv-pgi/
+module load hdf5-parallel
+module load netcdf-hdf5parallel
+module load python
+module load cray-shmem
+module load cray-mpich
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=ON \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf-hdf5parallel/4.2.0/pgi/119 \
+ -D CISM_FMAIN=/opt/pgi/13.7.0/linux86-64/13.7/lib/f90main.o \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2 --diag_suppress 554,111,611 -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -DNO_RESCALE" \
+ -D BISICLES_LIB_SUBDIR=libpgi \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/titan-pgi/titan-glissade-only-pgi-cmake-cesmtimers b/components/cism/glimmer-cism/builds/titan-pgi/titan-glissade-only-pgi-cmake-cesmtimers
new file mode 100755
index 0000000000..b0a8fb0f39
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-pgi/titan-glissade-only-pgi-cmake-cesmtimers
@@ -0,0 +1,131 @@
+# run this script by typing: source titan-pgi-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds simple_glide and simple_bisicles
+# on titan using the PGI compiler suite.
+
+# This script should be run from the builds/titan-pgi subdirectory
+# of the main CISM repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below include several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# NO_TRILINOS -- OFF by default, set to on for builds without Trilinos
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_BUILD_SIMPLE_GLIDE -- ON by default, set to OFF to not build simple_glide
+# CISM_BUILD_SIMPLE_BISICLES -- OFF by default, set to ON to build simple_bisicles
+# Setting NO_TRILINOS to ON will generate a much smaller executable for this build.
+# CISM_BUILD_EXTRA_EXECUTABLES -- OFF by default, set to ON to build eis_glide and others
+# CISM_USE_GPTL_INSTRUMENTATION -- OFF by default, set to ON to use GPTL instrumentation
+
+# NOTE: There is currently an incompatibility between simple_bisicles and GPTL. If
+# the CISM_BUILD_SIMPLE_BISICLES is ON, the GPTL instrumentation is turned OFF.
+
+# help user get the correct modules loaded:
+
+
+# module unload modules
+
+echo
+echo Run this script by typing: source titan-pgi-cmake
+echo
+
+module unload cmake
+module unload cray-hdf5
+module unload cray-hdf5-parallel
+module unload netcdf
+module unload python
+module unload cray-shmem
+module unload cray-mpich cray-mpich2
+module unload netcdf-hdf5parallel boost pgi
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+
+# Commented out on titan, because of project path that get cleared
+# if you do this:
+# module --silent purge
+
+module load modules
+module load cmake/2.8.10.2
+module load PrgEnv-pgi/4.1.40
+module load pgi/13.10.0
+module load cray-shmem
+module load cray-mpich
+module load cray-hdf5-parallel/1.8.11
+module load cray-netcdf-hdf5parallel/4.3.0
+module load python
+module load boost/1.53.0
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \
+\
+ -D CISM_BUILD_SIMPLE_GLIDE:BOOL=ON \
+ -D CISM_BUILD_SIMPLE_BISICLES:BOOL=OFF \
+ -D CISM_BUILD_GLINT_EXAMPLE:BOOL=OFF \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+\
+ -D CISM_TRILINOS_DIR=/ccs/proj/cli062/Trilinos/cism-standard/default-pgi/install \
+ -D CISM_TRILINOS_GPTL_DIR=/ccs/proj/cli062/cism_gptl/Trilinos/titan-pgi-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR= \
+\
+ -D CISM_GPTL_DIR=/ccs/proj/cli062/cism_gptl/libgptl/libgptl-titan-pgi \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf-hdf5parallel/4.3.0/pgi/121 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-fast -Kieee --diag_suppress 554,111,611" \
+ -D CISM_Fortran_FLAGS:STRING="-fast -Kieee" \
+ -D BISICLES_LIB_SUBDIR=libpgi \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_USE_CXX_IMPLICIT_LIBS:BOOL=OFF \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# -D GLIMMER_FMAIN=/opt/pgi/13.10.0/linux86-64/13.10/lib/f90main.o \
+
+# -D GLIMMER_TRILINOS_DIR=/tmp/proj/cli054/trilinos-10.12/FEB2013_FAST_PGI/install \
+
+# -D CMAKE_CXX_FLAGS:STRING="-O2 --diag_suppress 554,111,611 -DH5_USE_16_API" \
+# -D GLIMMER_Fortran_FLAGS:STRING="-O2" \
+
+
+# -D GLIMMER_FMAIN=/opt/pgi/13.7.0/linux86-64/13.7/lib/f90main.o \
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/titan-pgi/titan-petsc-bisicles-pgi-cmake b/components/cism/glimmer-cism/builds/titan-pgi/titan-petsc-bisicles-pgi-cmake
new file mode 100644
index 0000000000..df5665d26c
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-pgi/titan-petsc-bisicles-pgi-cmake
@@ -0,0 +1,106 @@
+# run this script by typing: source titan-petsc-bisicles-pgi-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on titan using the PGI compiler suite. It no longer relies on a build
+# of Trilinos, but does need a BISICLES build located in BISICLES_INTERFACE_DIR
+# (currently set to a relative path from this directory)
+
+
+# This script should be run from the builds/titan-pgi subdirectory
+# of the main seacism repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+echo
+echo Run this script by typing: source titan-petsc-bisicles-pgi-cmake
+echo
+
+module unload cmake
+module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+module unload hdf5
+module unload hdf5-parallel
+module unload netcdf
+#module unload python
+#module unload cray-shmem
+#module unload cray-mpich2
+
+#module --silent purge
+
+module load modules
+module load cmake
+module load PrgEnv-pgi/
+module load cray-hdf5-parallel
+module load cray-netcdf-hdf5parallel/4.2.1.1
+#module load python
+#module load cray-shmem
+#module load cray-mpich
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=ON \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf-hdf5parallel/4.2.0/pgi/119 \
+ -D CISM_FMAIN=/opt/pgi/13.7.0/linux86-64/13.7/lib/f90main.o \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-O2 -g --diag_suppress 554,111,611 -DH5_USE_16_API" \
+ -D CISM_Fortran_FLAGS:STRING="-O2 -g -DNO_RESCALE" \
+ -D CISM_EXTRA_LIBS:STRING="-L$PETSC_DIR/$PETSC_ARCH/lib -lpetsc -lHYPRE -lmetis -lparmetis -llapack -lblas" \
+ -D BISICLES_LIB_SUBDIR=libpgi \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/titan-pgi/titan-pgi-build-and-test.csh b/components/cism/glimmer-cism/builds/titan-pgi/titan-pgi-build-and-test.csh
new file mode 100644
index 0000000000..2854bc3440
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-pgi/titan-pgi-build-and-test.csh
@@ -0,0 +1,143 @@
+#!/bin/csh
+
+# PARALLEL BUILD WITH CMAKE using PGI
+
+# setenv TEST_DIR "/USERS/$USER/work/modeling/cism/seacism-oceans11/tests/higher-order"
+
+# 5/7/2014 DMR -- added performance tests:
+
+## This will automatically submit dome60-500 ijobs. gis_1km and gis_4km will not be submitted
+## automatically because you will have to build and run Felix/Albany on hopper first. Once you do that,
+## you can go to lines #193-194, 197-198, 201-202, and uncomment them.
+setenv PERF_TEST 0
+
+@ run_perf_tests = (($1 == run-perf-tests) || ($2 == run-perf-tests) || ($3 == run-perf-tests) || ($4 == run-perf-tests) || ($5 == run-perf-tests))
+
+if ($run_perf_tests) then
+ setenv PERF_TEST 1
+endif
+
+@ skip_build_set = (($1 == skip-build) || ($2 == skip-build) || ($3 == skip-build) || ($4 == skip-build) || ($5 == skip-build))
+
+@ no_copy_set = (($1 == no-copy) || ($2 == no-copy) || ($3 == no-copy) || ($4 == no-copy) || ($5 == no-copy))
+
+@ skip_tests_set = (($1 == skip-tests) || ($2 == skip-tests) || ($3 == skip-tests) || ($4 == skip-tests) || ($5 == skip-tests))
+
+#**!move this and source it to your .bashrc (wherever your higher-order directory is located)
+#setenv TEST_DIR /global/scratch2/sd/$USER/cism2/higher-order
+
+if (! -d $TEST_DIR) mkdir -p $TEST_DIR
+
+setenv TEST_SUITE_DEFAULT_LOC http://oceans11.lanl.gov/cism/livv
+#setenv TEST_SUITE_DEFAULT_LOC /ccs/proj/cli062/test_suite
+
+setenv build_problem 0
+
+set COMPILER_NAME = pgi
+set PLATFORM_NAME = titan
+
+# set PLATFORM_NAME = $1
+# set COMPILER_NAME = $2
+
+set CMAKE_SCRIPT = $PLATFORM_NAME'-'$COMPILER_NAME'-cmake'
+set CMAKE_CONF_OUT = 'conf_'$COMPILER_NAME'.out'
+set CMAKE_BUILD_OUT = 'cmake_'$COMPILER_NAME'_build.out'
+#set CISM_RUN_SCRIPT = $PLATFORM_NAME'job'
+#set CISM_RUN_SCRIPT = 'hopjob'
+set CISM_RUN_SCRIPT = 'ijob'
+#set CISM_VV_SCRIPT = $PLATFORM_NAME'_VV.bash'
+set CISM_VV_SCRIPT = 'rhea_VV.bash'
+
+echo
+echo 'To use this script, type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh'
+echo
+#echo 'For a quick test (dome only), type: csh '$PLATFORM_NAME'-'$COMPILER_NAME'-build-and-test.csh quick-test'
+echo
+echo "Call with no-copy to prevent copying of the reg_test and livv defaults."
+echo "Call with run-perf-tests to run the performance tests."
+echo "Call with skip-tests to skip testing (builds executable and copies it to TEST_DIR)."
+
+
+echo
+echo 'See the LIVV documentation for instructions on setting up the test directory (TEST_DIR).'
+echo
+
+
+#echo 'The following environment variables must be set: TEST_DIR, GLIMMER_TRILINOS_DIR'
+#echo 'Examples (place in .cshrc or .bashrc):'
+#echo 'csh, tcsh: setenv GLIMMER_TRILINOS_DIR "/Users/$USER/Trilinos/gcc-build/install"'
+#echo 'bash: export GLIMMER_TRILINOS_DIR="/Users/$USER/Trilinos/gcc-build/install"'
+echo
+echo 'Setting TEST_DIR to the location: '
+echo 'TEST_DIR =' $TEST_DIR
+echo 'TEST_DIR must also be set in your .bashrc file.'
+
+# PARALLEL BUILD WITH CMAKE
+
+
+if ($skip_build_set == 0) then
+
+echo
+echo "Configuring and building in directory: " $PWD
+echo
+
+echo 'Configuring '$COMPILER_NAME' cmake build...'
+source ./$CMAKE_SCRIPT >& $CMAKE_CONF_OUT
+echo 'Making parallel '$COMPILER_NAME'...'
+make -j 8 >& $CMAKE_BUILD_OUT
+
+#if ( -e example-drivers/simple_glide/src/simple_glide ) then
+# echo 'Copying '$COMPILER_NAME' parallel simple_glide_'$COMPILER_NAME' to test directory'
+# cp -f example-drivers/simple_glide/src/simple_glide $TEST_DIR/simple_glide_$COMPILER_NAME
+#else
+# echo "cmake '$COMPILER_NAME' build failed, no executable"
+# @ build_problem = 1
+#endif
+
+if ( -e cism_driver/cism_driver ) then
+ echo 'Copying '$COMPILER_NAME' parallel cism_driver_'$COMPILER_NAME' to test directory'
+ cp -f cism_driver/cism_driver $TEST_DIR/cism_driver_$COMPILER_NAME
+else
+ echo "cmake '$COMPILER_NAME' build failed, no executable"
+ @ build_problem = 1
+endif
+
+endif # skip_build_set
+
+if ($build_problem == 1) then
+ echo "No job submitted -- cmake build failed."
+else # execute tests:
+
+ # Make copy of test suite in $TEST_DIR:
+if (! ($no_copy_set)) then
+ echo "Copying default reg_test and LIVV to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e reg_test_default.tgz ) rm -f reg_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/reg_test_default.tgz
+ tar xfz reg_test_default.tgz
+ popd > /dev/null
+
+ if ($PERF_TEST) then
+ echo "Copying default perf_test to $TEST_DIR"
+ pushd . > /dev/null
+ cd $TEST_DIR
+ if ( -e perf_test_default.tgz ) rm -f perf_test_default.tgz
+ wget $TEST_SUITE_DEFAULT_LOC/perf_test_default.tgz
+ tar xfz perf_test_default.tgz
+ popd > /dev/null
+ endif
+
+ cp -rf ../../tests/higher-order/livv $TEST_DIR
+endif
+
+if ($skip_tests_set) then
+ echo "Skipping tests."
+ exit
+endif
+
+csh $TEST_DIR/livv/run_livv_default_tests.csh $TEST_DIR $CISM_RUN_SCRIPT $PERF_TEST $CISM_VV_SCRIPT
+echo "Back in build-and-test script, exiting."
+exit
+
+
diff --git a/components/cism/glimmer-cism/builds/titan-pgi/titan-pgi-cmake b/components/cism/glimmer-cism/builds/titan-pgi/titan-pgi-cmake
new file mode 100644
index 0000000000..b846c74b23
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-pgi/titan-pgi-cmake
@@ -0,0 +1,129 @@
+# run this script by typing: source titan-gnu-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on titan using the GNU compiler suite.
+
+# This script should be run from the builds/titan-gnu subdirectory
+# of the main CISM repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+
+echo
+echo Run this script by typing: source titan-gnu-cmake
+echo
+
+# this unload reduces purge spurious error messages:
+module unload PrgEnv-gnu PrgEnv-pgi
+module --silent purge
+
+#module unload cmake
+#module unload cray-hdf5
+#module unload cray-hdf5-parallel
+#module unload netcdf
+#module unload python
+#module unload cray-shmem
+#module unload cray-mpich2
+#module unload netcdf-hdf5parallel cray-netcdf-hdf5parallel boost gcc
+#module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+
+module load modules
+module load cmake/2.8.10.2
+module load PrgEnv-pgi
+
+#module load cray-shmem
+#module load cray-mpich
+#module load netcdf-hdf5parallel/4.3.0
+#module load python
+#module load boost/1.54.0
+
+module load cray-shmem
+module load cray-mpich
+module load cray-hdf5-parallel/1.8.11
+module load cray-netcdf-hdf5parallel/4.3.0
+module load python
+module load boost/1.54.0
+
+# remove old build data:
+rm -f ./CMakeCache.txt
+rm -rf ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/lustre/atlas/world-shared/cli900/cesm/software/Trilinos/Trilinos-11.10.2_gptl/titan-pgi-ci-nophal/install \
+ -D CISM_TRILINOS_GPTL_DIR=/lustre/atlas/world-shared/cli900/cesm/software/Trilinos/Trilinos-11.10.2_gptl/titan-pgi-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/lustre/atlas/world-shared/cli900/cesm/software/Trilinos/Trilinos-11.10.2_gptl/titan-pgi-ci-nophal/install \
+
+ -D CISM_GPTL_DIR=/lustre/atlas/world-shared/cli900/cesm/software/libgptl/libgptl-titan-pgi \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf-hdf5parallel/4.3.0/pgi/121 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-fast -Kieee --diag_suppress 554,111,611" \
+ -D CISM_Fortran_FLAGS:STRING="-fast -Kieee" \
+ -D CISM_FMAIN=/opt/pgi/13.10.0/linux86-64/13.10/lib/f90main.o \
+ -D BISICLES_LIB_SUBDIR=libpgi \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_USE_CXX_IMPLICIT_LIBS:BOOL=OFF \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+# -D CISM_FMAIN=/opt/pgi/13.10.0/linux86-64/13.10/lib/f90main.o \
+
+# -D CISM_TRILINOS_DIR=/tmp/proj/cli054/trilinos-10.12/FEB2013_FAST_PGI/install \
+
+# -D CMAKE_CXX_FLAGS:STRING="-O2 --diag_suppress 554,111,611 -DH5_USE_16_API" \
+# -D CISM_Fortran_FLAGS:STRING="-O2" \
+
+
+# -D CISM_FMAIN=/opt/pgi/13.7.0/linux86-64/13.7/lib/f90main.o \
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/titan-pgi/titan-pgi-cmake-newtrilinos b/components/cism/glimmer-cism/builds/titan-pgi/titan-pgi-cmake-newtrilinos
new file mode 100644
index 0000000000..7799f28237
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/titan-pgi/titan-pgi-cmake-newtrilinos
@@ -0,0 +1,129 @@
+# run this script by typing: source titan-gnu-cmake
+# After thus script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script builds cism_driver
+# on titan using the GNU compiler suite.
+
+# This script should be run from the builds/titan-gnu subdirectory
+# of the main CISM repository (reflected in the two instances
+# of "../.." below).
+
+# BUILD OPTIONS:
+# The call to cmake below includes several input ON/OFF switch parameters, to
+# provide a simple way to select different build options. These are:
+# CISM_BUILD_CISM_DRIVER -- ON by default, set to OFF to only build the CISM libraries.
+# CISM_ENABLE_BISICLES -- OFF by default, set to ON to build a BISICLES-capable cism_driver.
+# CISM_ENABLE_FELIX -- OFF by default, set to ON to build a FELIX-capable cism_driver.
+# CISM_USE_TRILINOS -- OFF by default, set to on for builds with Trilinos.
+# CISM_MPI_MODE -- ON by default, only set to OFF for serial builds.
+# CISM_SERIAL_MODE -- OFF by default, set to ON for serial builds.
+# CISM_USE_GPTL_INSTRUMENTATION -- ON by default, set to OFF to not use GPTL instrumentation.
+# CISM_COUPLED -- OFF by default, set to ON to build with CESM.
+
+echo
+echo Run this script by typing: source titan-gnu-cmake
+echo
+
+# this unload reduces purge spurious error messages:
+module unload PrgEnv-gnu PrgEnv-pgi
+module --silent purge
+
+#module unload cmake
+#module unload cray-hdf5
+#module unload cray-hdf5-parallel
+#module unload netcdf
+#module unload python
+#module unload cray-shmem
+#module unload cray-mpich2
+#module unload netcdf-hdf5parallel cray-netcdf-hdf5parallel boost gcc
+#module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel PrgEnv-pathscale PrgEnv-pgi
+
+module load modules
+module load cmake/2.8.10.2
+module load PrgEnv-pgi
+
+#module load cray-shmem
+#module load cray-mpich
+#module load netcdf-hdf5parallel/4.3.0
+#module load python
+#module load boost/1.54.0
+
+module load cray-shmem
+module load cray-mpich
+module load cray-hdf5-parallel/1.8.11
+module load cray-netcdf-hdf5parallel/4.3.0
+module load python
+module load boost/1.54.0
+
+# remove old build data:
+rm -f ./CMakeCache.txt
+rm -rf ./CMakeFiles
+
+# run a script that creates some CISM source files:
+#pushd .
+#cd ..
+#../cmake-scripts/autogenerate-script
+#popd
+
+echo
+echo "Doing CMake Configuration step"
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=ON \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=ON \
+ -D CISM_COUPLED:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=/ccs/proj/cli101/software/Trilinos/Trilinos_gptl/titan-pgi-ci-nophal/install \
+ -D CISM_TRILINOS_GPTL_DIR=/ccs/proj/cli101/software/Trilinos/Trilinos_gptl/titan-pgi-ci-nophal/install \
+ -D CISM_TRILINOS_ALBANY_DIR=/ccs/proj/cli101/software/Trilinos/Trilinos_gptl/titan-pgi-ci-nophal/install \
+\
+ -D CISM_GPTL_DIR=/ccs/proj/cli062/cism_gptl/libgptl/libgptl-titan-pgi \
+ -D CISM_NETCDF_DIR=/opt/cray/netcdf-hdf5parallel/4.3.0/pgi/121 \
+\
+ -D CMAKE_INSTALL_PREFIX:PATH=$PWD/install \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
+ -D CMAKE_VERBOSE_CONFIGURE:BOOL=ON \
+\
+ -D CMAKE_CXX_COMPILER=CC \
+ -D CMAKE_C_COMPILER=cc \
+ -D CMAKE_Fortran_COMPILER=ftn \
+\
+ -D CMAKE_CXX_FLAGS:STRING="-fast -Kieee --diag_suppress 554,111,611" \
+ -D CISM_Fortran_FLAGS:STRING="-fast -Kieee" \
+ -D CISM_FMAIN=/opt/pgi/13.10.0/linux86-64/13.10/lib/f90main.o \
+ -D BISICLES_LIB_SUBDIR=libpgi \
+ -D BISICLES_INTERFACE_DIR=$PWD/../../../BISICLES/CISM-interface/interface \
+ -D CISM_MPI_LIBS:STRING="mpichf90" \
+ -D CISM_USE_CXX_IMPLICIT_LIBS:BOOL=OFF \
+ -D CISM_STATIC_LINKING:BOOL=ON \
+ ../..
+
+# -D CISM_FMAIN=/opt/pgi/13.10.0/linux86-64/13.10/lib/f90main.o \
+
+# -D CISM_TRILINOS_DIR=/tmp/proj/cli054/trilinos-10.12/FEB2013_FAST_PGI/install \
+
+# -D CMAKE_CXX_FLAGS:STRING="-O2 --diag_suppress 554,111,611 -DH5_USE_16_API" \
+# -D CISM_Fortran_FLAGS:STRING="-O2" \
+
+
+# -D CISM_FMAIN=/opt/pgi/13.7.0/linux86-64/13.7/lib/f90main.o \
+
+# Note: last argument above "../.." is path to top seacism directory
+
+# ADD:
+
+# -D CMAKE_PREFIX_PATH="/opt/cray/hdf5/1.8.8/pgi/119;/opt/cray/hdf5-parallel/1.8.8/pgi/119" \
+
+# -D TPL_ENABLE_MPI:BOOL=ON \
+
+
+# -D CISM_HDF5_LIB_DIR=/opt/cray/hdf5-parallel/1.8.8/pgi/119/lib \
+# -D CISM_HDF5_LIBS="-lhdf5_pgi_parallel -lz" \
diff --git a/components/cism/glimmer-cism/builds/yellowstone-intel-serial/yellowstone-intel-cmake-serial b/components/cism/glimmer-cism/builds/yellowstone-intel-serial/yellowstone-intel-cmake-serial
new file mode 100755
index 0000000000..755e758378
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/yellowstone-intel-serial/yellowstone-intel-cmake-serial
@@ -0,0 +1,69 @@
+# Run this script by typing: source yellowstone-intel-cmake
+# After this script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script is set up to perform a serial build
+
+module purge
+module load ncarenv/1.0
+module load ncarbinlibs/1.0
+module load intel/13.1.2
+module load mkl/11.0.1
+module load netcdf/4.3.0
+module load ncarcompilers/1.0
+module load cmake/2.8.10.2
+module load python
+module load all-python-libs
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+echo
+echo "Doing CMake Configuration step"
+
+# Note: the compilation flags were taken from the defaults for a CESM build on
+# yellowstone-intel (using Machines_140218). Some of these options (e.g.,
+# -convert big_endian and -assume byterecl) are probably unnecessary for a
+# standalone build, but I am keeping things consistent with the CESM build for
+# simplicity.
+
+# A few non-intuitive things:
+#
+# - CISM_FORCE_FORTRAN_LINKER: without this, cmake tries to use a C++ linker, which doesn't work
+#
+# - CISM_INCLUDE_IMPLICIT_LINK_LIBRARIES: (this is a note that applies to the
+# parallel build with trilinos, and may or may not apply to this serial
+# build): if this is on (the default), some libraries are included on the link
+# line which can't be found (e.g., hdf5). This may be related to the fact that
+# trilinos on yellowstone is old, and/or the fact that cmake wants to use a
+# C++ linker but we're telling it to use a fortran linker.
+
+cmake \
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=OFF \
+ -D CISM_SERIAL_MODE:BOOL=ON \
+ -D CISM_BUILD_SIMPLE_GLIDE:BOOL=ON \
+ -D CISM_BUILD_SIMPLE_BISICLES:BOOL=OFF \
+ -D CISM_BUILD_GLINT_EXAMPLE:BOOL=OFF \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_USE_DEFAULT_IO:BOOL=OFF \
+ -D CISM_USE_CISM_FRONT_END:BOOL=OFF \
+\
+ -D CISM_NETCDF_DIR=$NETCDF \
+ -D CISM_FORCE_FORTRAN_LINKER:BOOL=ON \
+ -D CISM_INCLUDE_IMPLICIT_LINK_LIBRARIES:BOOL=OFF \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=OFF \
+\
+ -D CMAKE_CXX_COMPILER=icpc \
+ -D CMAKE_C_COMPILER=icc \
+ -D CMAKE_Fortran_COMPILER=ifort \
+\
+ -D CMAKE_Fortran_FLAGS:STRING="-fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -xHost -O2" \
+ -D CMAKE_C_FLAGS:STRING="-O2 -fp-model precise -xHost" \
+ -D CMAKE_CXX_FLAGS:STRING="-O2 -fp-model precise -xHost" \
+ ../..
+
+# Note: last argument above "../.." is path to top seacism directory
diff --git a/components/cism/glimmer-cism/builds/yellowstone-intel/yellowstone-intel-cmake b/components/cism/glimmer-cism/builds/yellowstone-intel/yellowstone-intel-cmake
new file mode 100755
index 0000000000..fc61a0bf4a
--- /dev/null
+++ b/components/cism/glimmer-cism/builds/yellowstone-intel/yellowstone-intel-cmake
@@ -0,0 +1,72 @@
+# Run this script by typing: source yellowstone-intel-cmake
+# After this script completes, type: make -j 8
+# If rebuilding, type 'make clean' before running 'make -j 8'
+
+# This cmake configuration script is set up to perform a parallel build with Trilinos
+
+module purge
+module load ncarenv/1.0
+module load ncarbinlibs/1.0
+module load intel/13.1.2
+module load mkl/11.0.1
+module load trilinos/11.0.3
+module load netcdf-mpi/4.3.0
+module load ncarcompilers/1.0
+module load pnetcdf/1.3.0
+module load cmake/2.8.10.2
+module load python
+module load all-python-libs
+
+# remove old build data:
+rm ./CMakeCache.txt
+rm -r ./CMakeFiles
+
+echo
+echo "Doing CMake Configuration step"
+
+# Note: the compilation flags were taken from the defaults for a CESM build on
+# yellowstone-intel (using Machines_140218). Some of these options (e.g.,
+# -convert big_endian and -assume byterecl) are probably unnecessary for a
+# standalone build, but I am keeping things consistent with the CESM build for
+# simplicity.
+
+# A few non-intuitive things:
+#
+# - CISM_FORCE_FORTRAN_LINKER: without this, cmake tries to use a C++ linker, which doesn't work
+#
+# - CISM_INCLUDE_IMPLICIT_LINK_LIBRARIES: if this is on (the default), some
+# libraries are included on the link line which can't be found (e.g.,
+# hdf5). This may be related to the fact that trilinos on yellowstone is old,
+# and/or the fact that cmake wants to use a C++ linker but we're telling it to
+# use a fortran linker.
+
+cmake \
+ -D CISM_BUILD_CISM_DRIVER:BOOL=ON \
+ -D CISM_ENABLE_BISICLES=OFF \
+ -D CISM_ENABLE_FELIX=OFF \
+\
+ -D CISM_USE_TRILINOS:BOOL=OFF \
+ -D CISM_MPI_MODE:BOOL=ON \
+ -D CISM_SERIAL_MODE:BOOL=OFF \
+\
+ -D CISM_USE_GPTL_INSTRUMENTATION:BOOL=OFF \
+ -D CISM_COUPLED:BOOL=OFF \
+ -D CISM_USE_CISM_FRONT_END:BOOL=OFF \
+\
+ -D CISM_TRILINOS_DIR=$TRILINOS_PATH \
+ -D CISM_NETCDF_DIR=$NETCDF \
+ -D CISM_FORCE_FORTRAN_LINKER:BOOL=ON \
+ -D CISM_INCLUDE_IMPLICIT_LINK_LIBRARIES:BOOL=OFF \
+ -D CMAKE_VERBOSE_MAKEFILE:BOOL=OFF \
+\
+ -D CMAKE_CXX_COMPILER=mpiicpc \
+ -D CMAKE_C_COMPILER=mpicc \
+ -D CMAKE_Fortran_COMPILER=mpif90 \
+\
+ -D CMAKE_Fortran_FLAGS:STRING="-fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -xHost -O2" \
+ -D CMAKE_C_FLAGS:STRING="-O2 -fp-model precise -xHost" \
+ -D CMAKE_CXX_FLAGS:STRING="-O2 -fp-model precise -xHost" \
+ ../..
+
+# Note: last argument above "../.." is path to top seacism directory
+# Note: last argument above "../.." is path to top seacism directory
diff --git a/components/cism/glimmer-cism/cism_driver/CMakeLists.txt b/components/cism/glimmer-cism/cism_driver/CMakeLists.txt
new file mode 100644
index 0000000000..ef2c20a2b9
--- /dev/null
+++ b/components/cism/glimmer-cism/cism_driver/CMakeLists.txt
@@ -0,0 +1,88 @@
+# cism_driver and CISM front-end build
+
+IF (${CISM_USE_TRILINOS})
+ LIST(INSERT CISM_TRILINOS_LIBS 0 glimmercismcpp)
+ENDIF()
+
+# Need include directories from Trilinos but also mod files from glimmer
+include_directories (${CISM_BINARY_DIR}/include ${PYTHON_INC_DIR}
+ ${Trilinos_INCLUDE_DIRS} ${Trilinos_TPL_INCLUDE_DIRS})
+
+link_directories (${Trilinos_LIBRARY_DIRS} ${Trilinos_TPL_LIBRARY_DIRS}
+ ${CISM_DYCORE_DIR}
+ ${BISICLES_INTERFACE_DIR}/${BISICLES_LIB_SUBDIR}
+ ${CISM_HDF5_LIB_DIR} ${PYTHON_LIB_DIR} )
+
+# DMR 6/2/14 -- Moved simple_forcing.F90 and testsfg.F90 to the glimmercismfortran lib.
+# These are local source files needed to make the cism_driver executable and CISM front-end
+#add_executable(cism_driver cism_driver.F90 cism_front_end.F90 gcm_cism_interface.F90
+# gcm_to_cism_glint.F90 cism_external_dycore_interface.F90
+# ../example-drivers/simple_glide/src/simple_forcing.F90
+# ../example-drivers/simple_glide/src/testsfg.F90)
+
+add_executable(cism_driver cism_driver.F90 cism_front_end.F90 gcm_cism_interface.F90
+ gcm_to_cism_glint.F90 cism_external_dycore_interface.F90)
+
+#SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
+IF (CISM_STATIC_LINKING)
+ SET_TARGET_PROPERTIES(cism_driver PROPERTIES LINK_SEARCH_START_STATIC 1)
+ SET_TARGET_PROPERTIES(cism_driver PROPERTIES LINK_SEARCH_END_STATIC 1)
+ENDIF()
+
+IF (CISM_FORCE_FORTRAN_LINKER)
+ SET_PROPERTY(TARGET cism_driver PROPERTY LINKER_LANGUAGE Fortran)
+ENDIF()
+
+MESSAGE("CISM_BISICLES_DIR: " ${CISM_BISICLES_DIR})
+MESSAGE("CISM_HDF5_LIBS: " ${CISM_HDF5_LIBS})
+
+# Executable depends on several glimmer libraries and Trilinos,
+# and potentially an f90main.o file (on jaguar with PGI at least)
+
+IF (${CISM_ENABLE_BISICLES})
+ set(CISM_USE_EXTERNAL_DYCORE ON)
+ ELSE()
+ set(CISM_USE_EXTERNAL_DYCORE OFF)
+ENDIF()
+MESSAGE("CISM_USE_EXTERNAL_DYCORE: " ${CISM_USE_EXTERNAL_DYCORE})
+IF (NOT ${CISM_USE_EXTERNAL_DYCORE})
+ link_directories (${Trilinos_LIBRARY_DIRS} ${Trilinos_TPL_LIBRARY_DIRS}
+ ${CISM_HDF5_LIB_DIR} ${PYTHON_LIB_DIR} )
+
+ target_link_libraries(cism_driver
+ ${CISM_FMAIN}
+ glimmercismfortran
+ ${PYTHON_LIBS}
+ ${CISM_NETCDF_LIBS}
+ ${CISM_HDF5_LIBS}
+ ${CISM_MPI_LIBS}
+ ${CISM_TRILINOS_LIBS}
+ ${CISM_EXTRA_LIBS}
+ ${CISM_GPTL_LIB}
+ )
+ELSEIF (${CISM_ENABLE_BISICLES})
+MESSAGE("CISM_DYCORE_DIR: " ${CISM_DYCORE_DIR})
+ link_directories (${Trilinos_LIBRARY_DIRS} ${Trilinos_TPL_LIBRARY_DIRS}
+ ${CISM_DYCORE_DIR}
+ ${BISICLES_INTERFACE_DIR}/${BISICLES_LIB_SUBDIR}
+ ${CISM_HDF5_LIB_DIR} ${PYTHON_LIB_DIR} )
+ target_link_libraries(cism_driver
+ ${CISM_FMAIN}
+ glimmercismfortran
+ DyCoreToGlimmer
+ libBisicles.a
+ libChomboLibs.a
+ ${PYTHON_LIBS}
+ ${CISM_NETCDF_LIBS}
+ ${CISM_HDF5_LIBS}
+ ${CISM_MPI_LIBS}
+ ${CISM_TRILINOS_LIBS}
+ ${CISM_EXTRA_LIBS}
+ ${CISM_GPTL_LIB}
+ )
+ENDIF()
+
+# Helpful(?) message near end of configuration step
+MESSAGE("")
+MESSAGE(" Executable cism_driver should appear in dir: build_dir/cism_driver")
+MESSAGE("")
diff --git a/components/cism/glimmer-cism/cism_driver/cism_cesm_interface.F90 b/components/cism/glimmer-cism/cism_driver/cism_cesm_interface.F90
new file mode 100644
index 0000000000..8b02e0ae2f
--- /dev/null
+++ b/components/cism/glimmer-cism/cism_driver/cism_cesm_interface.F90
@@ -0,0 +1,40 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! cism_cesm_interface.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+
+module cism_cesm_interface_module
+
+contains
+
+ subroutine cism_cesm_interface()
+
+ use cism_front_end_module
+
+ call cism_front_end()
+
+ end subroutine cism_cesm_interface
+
+end module cism_cesm_interface_module
diff --git a/components/cism/glimmer-cism/cism_driver/cism_driver.F90 b/components/cism/glimmer-cism/cism_driver/cism_driver.F90
new file mode 100644
index 0000000000..abe82e15b9
--- /dev/null
+++ b/components/cism/glimmer-cism/cism_driver/cism_driver.F90
@@ -0,0 +1,53 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! cism_driver.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+program cism_driver
+
+ use parallel
+! use glimmer_commandline
+! use glide
+ use gcm_cism_interface
+ use parallel
+
+ integer :: which_gcm = GCM_DATA_MODEL
+ type(gcm_to_cism_type) :: g2c
+
+ if (command_argument_count() == 0) then
+ print *,""
+ print *,"Call cism_driver with either 1 or 2 arguments. Examples:"
+ print *,"cism_driver ice_sheet.config"
+ print *,"cism_driver ice_sheet.config climate.config"
+ print *,""
+ stop
+ end if
+
+ call parallel_initialise
+
+ call gci_init_interface(which_gcm,g2c)
+ call gci_run_model(g2c)
+ call gci_finalize_interface(g2c)
+
+ call parallel_finalise
+end program cism_driver
diff --git a/components/cism/glimmer-cism/cism_driver/cism_external_dycore_interface.F90 b/components/cism/glimmer-cism/cism_driver/cism_external_dycore_interface.F90
new file mode 100644
index 0000000000..b9fc824b77
--- /dev/null
+++ b/components/cism/glimmer-cism/cism_driver/cism_external_dycore_interface.F90
@@ -0,0 +1,125 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! cism_external_dycore_interface.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+module cism_external_dycore_interface
+
+contains
+
+subroutine cism_init_external_dycore(external_dycore_type,model)
+
+ use parallel
+ use glimmer_global
+ use glide
+ use glissade
+ use eismint_forcing
+ use glimmer_log
+ use glimmer_config
+ use glimmer_commandline
+ use glimmer_writestats
+ use glimmer_filenames, only : filenames_init
+
+ use glide_diagnostics
+
+#if defined CISM_HAS_BISICLES || defined CISM_HAS_FELIX
+#define CISM_HAS_EXTERNAL_DYCORE 1
+#endif
+
+#ifdef CISM_HAS_EXTERNAL_DYCORE
+ use glimmer_to_dycore
+#endif
+
+
+ implicit none
+
+ integer*4 :: external_dycore_type
+ type(glide_global_type), intent(inout) :: model
+
+ real(kind=dp) :: cur_time, time_inc
+
+ ! for external dycore:
+ integer*4 external_dycore_model_index
+ ! integer argc
+ integer*4 p_index
+
+
+#ifdef CISM_HAS_EXTERNAL_DYCORE
+ ! print *,"Initializing external dycore interface."
+ call gtd_init_dycore_interface()
+
+ call parallel_barrier()
+ ! print *,"Initializing external dycore."
+ call gtd_init_dycore(model,external_dycore_model_index)
+ model%options%external_dycore_model_index = external_dycore_model_index
+ call parallel_barrier()
+#else
+ print *,"ERROR: The program was not built with an external dynamic core."
+#endif
+
+end subroutine cism_init_external_dycore
+
+
+subroutine cism_run_external_dycore(external_dycore_model_index,cur_time,time_inc)
+ use parallel
+ use glimmer_global
+ use glide
+ use glissade
+ use eismint_forcing
+ use glimmer_log
+ use glimmer_config
+ use glimmer_commandline
+ use glimmer_writestats
+ use glimmer_filenames, only : filenames_init
+
+ use glide_diagnostics
+
+#if defined CISM_HAS_BISICLES || defined CISM_HAS_FELIX
+#define CISM_HAS_EXTERNAL_DYCORE 1
+#endif
+
+#ifdef CISM_HAS_EXTERNAL_DYCORE
+ use glimmer_to_dycore
+#endif
+
+ integer*4 external_dycore_model_index
+ real(kind=dp) :: cur_time, time_inc
+
+#ifdef CISM_HAS_EXTERNAL_DYCORE
+! dycore_model_index = this_rank + 1
+ dycore_model_index = 1
+
+ call parallel_barrier()
+ ! print *,"Running external dycore."
+ call gtd_run_dycore(external_dycore_model_index,cur_time,time_inc)
+ ! print *,"Completed Dycore Run."
+ call parallel_barrier()
+#else
+ print *,"ERROR: The program was not built with an external dynamic core."
+#endif
+
+end subroutine cism_run_external_dycore
+
+
+end module cism_external_dycore_interface
diff --git a/components/cism/glimmer-cism/cism_driver/cism_front_end.F90 b/components/cism/glimmer-cism/cism_driver/cism_front_end.F90
new file mode 100644
index 0000000000..043487c4b6
--- /dev/null
+++ b/components/cism/glimmer-cism/cism_driver/cism_front_end.F90
@@ -0,0 +1,411 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! cism_front_end.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module cism_front_end
+ ! The CISM front-end is used to connect both the standalone driver
+ ! (cism_driver) or the CISM interface to CESM (cism_cesm_interface),
+ ! to the internal and external dycore interface programs. These are
+ !* cism_internal_dycore_interface and cism_external_dycore_interface.
+
+contains
+
+subroutine cism_init_dycore(model)
+
+ use parallel
+ use glimmer_global
+ use glide
+ use glissade
+ use eismint_forcing
+ use glimmer_log
+ use glimmer_config
+ use glide_nc_custom, only: glide_nc_fillall
+ use glimmer_commandline
+ use glimmer_writestats
+ use glimmer_filenames, only : filenames_init
+ use glide_io, only: glide_io_writeall
+
+ use cism_external_dycore_interface
+
+! use glimmer_to_dycore
+
+ use glide_stop, only: glide_finalise
+ use glide_diagnostics
+
+ implicit none
+
+
+ type(glide_global_type) :: model ! model instance
+ type(ConfigSection), pointer :: config ! configuration stuff
+ real(kind=dp) :: time ! model time in years
+ integer :: clock,clock_rate
+
+ integer*4 external_dycore_model_index
+
+ integer :: wd
+ logical :: do_glide_init
+
+ integer :: tstep_count
+
+ ! print *,'Entering cism_init_dycore'
+
+
+ !TODO - call this only for parallel runs?
+ ! call parallel_initialise
+
+ call glimmer_GetCommandline()
+
+ ! DMR -- open_log call commented out, since called in gci_init_interface()
+ ! start logging
+ ! call open_log(unit=50, fname=logname(commandline_configname))
+
+ ! setup paths
+ call filenames_init(commandline_configname)
+
+ ! read configuration
+ call ConfigRead(commandline_configname,config)
+
+#if (! defined CCSMCOUPLED && ! defined CESMTIMERS)
+ ! start timing
+ call system_clock(clock,clock_rate)
+ wall_start_time = real(clock,kind=dp)/real(clock_rate,kind=dp)
+#else
+ wall_start_time = 0.0
+ wall_stop_time = 0.0
+#endif
+
+ ! initialise profiling
+ call profile_init(model%profile,'glide.profile')
+
+ call t_startf('cism')
+
+ ! initialise GLIDE
+ call t_startf('initialization')
+
+ call glide_config(model,config)
+
+ ! This call is needed only if running the EISMINT test cases
+ call eismint_initialise(model%eismint_climate,config)
+
+ wd = model%options%whichdycore
+! do_glide_init = (wd == DYCORE_GLIDE) .OR. (wd == DYCORE_BISICLES) .OR. (wd == DYCORE_ALBANYFELIX)
+ do_glide_init = (wd == DYCORE_GLIDE)
+
+ if (do_glide_init) then
+ call glide_initialise(model)
+ else ! glam/glissade dycore
+ call glissade_initialise(model)
+ endif
+
+ call CheckSections(config)
+
+ ! fill dimension variables on output files
+ call glide_nc_fillall(model)
+
+ time = model%numerics%tstart
+ tstep_count = 0
+ model%numerics%time = time ! MJH added 1/10/13 - the initial diagnostic glissade solve won't know
+ ! the correct time on a restart unless we set it here.
+
+ ! Set EISMINT forcing for initial time
+ call eismint_massbalance(model%eismint_climate,model,time)
+ call eismint_surftemp(model%eismint_climate,model,time)
+
+ ! read forcing time slice if needed - this will overwrite values from IC file if there is a conflict.
+ call glide_read_forcing(model, model)
+
+ call spinup_lithot(model)
+
+ if (model%options%whichdycore == DYCORE_BISICLES) then
+ call t_startf('init_external_dycore')
+ call cism_init_external_dycore(model%options%external_dycore_type,model)
+ call t_stopf('init_external_dycore')
+ endif
+
+ call t_stopf('initialization')
+
+ if (model%options%whichdycore .ne. DYCORE_BISICLES) then
+ !MJH Created this block here to fill out initial state without needing to enter time stepping loop. This allows
+ ! a run with tend=tstart to be run without time-stepping at all. It requires solving all diagnostic (i.e. not
+ ! time depdendent) variables (most important of which is velocity) for the initial state and then writing the
+ ! initial state as time 0 (or more accurately, as time=tstart). Also, halo updates need to occur after the
+ ! diagnostic variables are calculated.
+
+ ! ------------- Calculate initial state and output it -----------------
+
+ call t_startf('initial_diag_var_solve')
+
+ select case (model%options%whichdycore)
+ case (DYCORE_GLIDE)
+
+ if (model%numerics%tstart < (model%numerics%tend - model%numerics%tinc)) then
+ ! disable further profiling in normal usage
+ call t_adj_detailf(+10)
+ endif
+
+ ! Don't call glide_init_state_diagnostic when running old glide
+ ! Instead, start with zero velocity
+ if (.not. oldglide) then
+ call glide_init_state_diagnostic(model)
+ endif
+
+ if (model%numerics%tstart < (model%numerics%tend - model%numerics%tinc)) then
+ ! restore profiling to normal settings
+ call t_adj_detailf(-10)
+ endif
+
+ case (DYCORE_GLAM, DYCORE_GLISSADE, DYCORE_ALBANYFELIX)
+
+ if (model%numerics%tstart < (model%numerics%tend - model%numerics%tinc)) then
+ ! disable further profiling in normal usage
+ call t_adj_detailf(+10)
+ endif
+
+ ! solve the remaining diagnostic variables for the initial state
+ call glissade_diagnostic_variable_solve(model) ! velocity, usrf, etc.
+
+ if (model%numerics%tstart < (model%numerics%tend - model%numerics%tinc)) then
+ ! restore profiling to normal settings
+ call t_adj_detailf(-10)
+ endif
+
+ case default
+
+ end select
+
+ call t_stopf('initial_diag_var_solve')
+
+ ! Write initial diagnostic output to log file
+
+ call t_startf('initial_write_diagnostics')
+ call glide_write_diagnostics(model, time, &
+ tstep_count = tstep_count)
+ call t_stopf('initial_write_diagnostics')
+
+ end if ! whichdycore .ne. DYCORE_BISICLES
+
+
+ ! --- Output the initial state -------------
+
+ call t_startf('initial_io_writeall')
+ call glide_io_writeall(model, model, time=time) ! MJH The optional time argument needs to be supplied
+ ! since we have not yet set model%numerics%time
+ !WHL - model%numerics%time is now set above
+ call t_stopf('initial_io_writeall')
+
+end subroutine cism_init_dycore
+
+
+subroutine cism_run_dycore(model)
+
+ use parallel
+ use glimmer_global
+ use glide
+ use glissade
+ use eismint_forcing
+ use glimmer_log
+ use glimmer_config
+ use glide_nc_custom, only: glide_nc_fillall
+ use glimmer_commandline
+ use glimmer_writestats
+ use glimmer_filenames, only : filenames_init
+ use glide_io, only: glide_io_writeall, glide_io_writeall
+
+ use cism_external_dycore_interface
+
+ use glide_stop, only: glide_finalise
+ use glide_diagnostics
+
+ implicit none
+
+
+ type(glide_global_type) :: model ! model instance
+ type(ConfigSection), pointer :: config ! configuration stuff
+ real(kind=dp) :: time ! model time in years
+ real(kind=dp) :: dt ! current time step to use
+ real(kind=dp) :: time_eps ! tolerance within which times are equal
+ integer :: clock,clock_rate
+ integer :: tstep_count
+
+ integer*4 :: external_dycore_model_index
+
+! external_dycore_model_index = this_rank + 1
+ external_dycore_model_index = 1
+
+ time = model%numerics%tstart
+ tstep_count = 0
+ time_eps = model%numerics%tinc/1000.0d0
+
+ ! ------------- Begin time step loop -----------------
+
+ ! run an internal or external dycore, depending on setting external_dycore_type
+
+ ! check if we're doing any evolution
+ if (time < model%numerics%tend) then
+ do while(time + time_eps < model%numerics%tend)
+
+ ! Increment time step
+ if (model%options%whichdycore /= DYCORE_BISICLES) then
+ time = time + model%numerics%tinc
+ tstep_count = tstep_count + 1
+ model%numerics%time = time ! TODO This is redundant with what is happening in glide/glissade, but this is needed for forcing to work properly.
+ endif
+! print *,"external_dycore_type: ",model%options%external_dycore_type
+
+
+ !if (model%options%external_dycore_type .EQ. 0) then ! NO_EXTERNAL_DYCORE) then
+ ! if (model%options%whichdycore == DYCORE_GLIDE) then
+ call t_startf('tstep')
+
+ select case (model%options%whichdycore)
+ case (DYCORE_GLIDE)
+
+ call t_startf('glide_tstep_p1')
+ call glide_tstep_p1(model,time)
+ call t_stopf('glide_tstep_p1')
+
+ call t_startf('glide_tstep_p2')
+ call glide_tstep_p2(model)
+ call t_stopf('glide_tstep_p2')
+
+ call t_startf('glide_tstep_p3')
+ call glide_tstep_p3(model)
+ call t_stopf('glide_tstep_p3')
+
+ case (DYCORE_GLAM, DYCORE_GLISSADE, DYCORE_ALBANYFELIX)
+ ! glam/glissade dycore
+
+ call glissade_tstep(model,time)
+
+ case (DYCORE_BISICLES)
+ ! print *,'Using External Dycore'
+ ! The time variable gets incremented within this call:
+ dt = model%numerics%tinc
+
+ if (time + dt + time_eps > model%numerics%tend) then
+ dt = model%numerics%tend - time
+ endif
+ call cism_run_external_dycore(model%options%external_dycore_model_index, &
+ time,dt)
+ ! time = time + model%numerics%tinc
+ case default
+ end select
+
+ call t_stopf('tstep')
+ !endif
+
+ ! write ice sheet diagnostics to log file at desired interval (model%numerics%dt_diag)
+
+ call t_startf('write_diagnostics')
+ call glide_write_diagnostics(model, time, &
+ tstep_count = tstep_count)
+ call t_stopf('write_diagnostics')
+
+ ! update time from dycore advance
+ model%numerics%time = time
+
+ ! --- Set forcing ---
+ ! Setting forcing at the end of the time step maintains consistency
+ ! with a forward Euler time step and ensures consistency of the time stamp
+ ! to fields in input and output files.
+ ! For forward Euler time stepping we want S^n+1 = g(S^n, F^n)
+ ! where S is the model state, F is forcing, and n, n+1 are time levels
+ ! We also want a forcing field in the output file to have a time stamp
+ ! that matches its time stamp in the input file or the EISMINT analytic function.
+ ! The simplest way to ensure both of these criteria is to set forcing at the
+ ! end of each time step.
+ ! EISMINT forcing
+ ! NOTE: these only do something when an EISMINT case is run
+ call t_startf('set_forcing')
+ call eismint_massbalance(model%eismint_climate,model,time)
+ call eismint_surftemp(model%eismint_climate,model,time)
+ call t_stopf('set_forcing')
+
+ ! Forcing from a 'forcing' data file - will read time slice if needed
+ call t_startf('read_forcing')
+ call glide_read_forcing(model, model)
+ call t_stopf('read_forcing')
+
+ ! Write to output netCDF files at desired intervals
+ call t_startf('io_writeall')
+ call glide_io_writeall(model,model)
+ call t_stopf('io_writeall')
+ end do ! time < model%numerics%tend
+ else ! no evolution -- diagnostic run, still want to do IO
+ ! (DFM) uncomment this if we want to do an I/O step even if no evoloution
+ !call t_startf('glide_io_writeall')
+ !call glide_io_writeall(model,model)
+ !call t_stopf('glide_io_writeall')
+ endif
+
+end subroutine cism_run_dycore
+
+subroutine cism_finalize_dycore(model)
+
+ use parallel
+ use glimmer_global
+ use glide
+ use glissade
+ use glimmer_log
+ use glimmer_config
+ use glide_nc_custom, only: glide_nc_fillall
+ use glimmer_commandline
+ use glimmer_writestats
+ use glimmer_filenames, only : filenames_init
+ use glide_io, only: glide_io_writeall
+
+ use cism_external_dycore_interface
+
+ use glide_stop, only: glide_finalise
+ use glide_diagnostics
+
+ implicit none
+
+ type(glide_global_type) :: model ! model instance
+ integer :: clock,clock_rate
+
+ call t_stopf('cism')
+
+ ! finalise GLIDE
+ call glide_finalise(model)
+
+ !TODO - Do we need to call glimmer_write_stats?
+#if (! defined CCSMCOUPLED && ! defined CESMTIMERS)
+ call system_clock(clock,clock_rate)
+ wall_stop_time = real(clock,kind=dp)/real(clock_rate,kind=dp)
+ call glimmer_write_stats(commandline_resultsname,commandline_configname,wall_stop_time-wall_start_time)
+#endif
+
+ call close_log
+
+ !TODO - call this only for parallel runs?
+ ! call parallel_finalise
+end subroutine cism_finalize_dycore
+
+end module cism_front_end
diff --git a/components/cism/glimmer-cism/cism_driver/eismint_forcing.F90 b/components/cism/glimmer-cism/cism_driver/eismint_forcing.F90
new file mode 100644
index 0000000000..397d5e2281
--- /dev/null
+++ b/components/cism/glimmer-cism/cism_driver/eismint_forcing.F90
@@ -0,0 +1,585 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! eismint_forcing.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module eismint_forcing
+
+ ! read configuration and generate eismint mass balance and
+ ! temperature fields
+
+ use glimmer_global, only : dp
+ use glide_types, only : eismint_climate_type
+
+ !MAKE_RESTART
+#ifdef RESTARTS
+#define RST_EISMINT_FORCING
+!JCC - no restarts yet
+!#include "glimmer_rst_head.inc"
+#undef RST_EISMINT_FORCING
+#endif
+
+contains
+
+#ifdef RESTARTS
+#define RST_EISMINT_FORCING
+!JCC - no restarts yet
+!#include "glimmer_rst_body.inc"
+#undef RST_EISMINT_FORCING
+#endif
+
+ subroutine eismint_initialise(eismint_climate,config)
+
+ ! initialise eismint_climate model
+
+ use glimmer_global, only: dp
+ use glimmer_paramets, only: thk0, scyr, tim0
+ use glimmer_physcon, only: scyr
+ use glimmer_config
+ use glide_types
+ implicit none
+
+ type(eismint_climate_type) :: eismint_climate ! structure holding climate info
+ type(ConfigSection), pointer :: config ! structure holding sections of configuration file
+
+!WHL - The old scaling looked like this: eismint_climate%nmsb(1) = eismint_climate%nmsb(1) / (acc0 * scyr)
+! where acc0 = thk0*vel0/len0.
+! I replaced (acc0 * scyr) with acab_scale = scyr*thk0/tim0, where tim0 = len0/vel0.
+! This is the scaling used in other parts of the code, including Glint.
+! It can be shown (but is not immediately obvious) that acab_scale = acc0 * scyr.
+! This scale factor assumes that the input mass balance has units of m/yr.
+!
+! Note: We should not use the parameter scale_acab in glimmer_scales because
+! it may not have been initialized yet.
+
+ real(dp), parameter :: acab_scale = scyr*thk0/tim0
+
+ call eismint_readconfig(eismint_climate,config)
+ call eismint_printconfig(eismint_climate)
+
+ ! scale parameters
+ ! assumes that eismint_climate%nmsb starts with units of m/yr
+
+ select case(eismint_climate%eismint_type)
+
+ case(1) ! EISMINT-1 fixed margin
+ eismint_climate%nmsb(1) = eismint_climate%nmsb(1) / acab_scale
+
+ case(2) ! EISMINT-1 moving margin
+ eismint_climate%airt(2) = eismint_climate%airt(2) * thk0
+ eismint_climate%nmsb(1) = eismint_climate%nmsb(1) / acab_scale
+ eismint_climate%nmsb(2) = eismint_climate%nmsb(2) / acab_scale
+
+ case(3) ! EISMINT-2
+ eismint_climate%nmsb(1) = eismint_climate%nmsb(1) / acab_scale
+ eismint_climate%nmsb(2) = eismint_climate%nmsb(2) / acab_scale
+
+ case(4) ! MISMIP-1
+ eismint_climate%nmsb(1) = eismint_climate%nmsb(1) / acab_scale
+
+ end select
+
+ end subroutine eismint_initialise
+
+ subroutine eismint_readconfig(eismint_climate, config)
+
+ ! read configuration
+
+ use glimmer_log
+ use glimmer_config
+ implicit none
+
+ type(eismint_climate_type) :: eismint_climate ! structure holding climate info
+ type(ConfigSection), pointer :: config ! structure holding sections of configuration file
+
+ ! local variables
+ type(ConfigSection), pointer :: section
+ real(kind=dp), dimension(:), pointer :: dummy
+
+ call GetSection(config,section,'EISMINT-1 fixed margin')
+ if (associated(section)) then
+ eismint_climate%eismint_type = 1
+ dummy=>NULL()
+ call GetValue(section,'temperature',dummy,2)
+ eismint_climate%airt = (/-34.15d0, 8.d-8/)
+ if (associated(dummy)) then
+ eismint_climate%airt = dummy
+ deallocate(dummy)
+ dummy=>NULL()
+ end if
+ call GetValue(section,'massbalance',dummy,1)
+ eismint_climate%nmsb = (/0.3d0, 0.d0, 0.d0/)
+ if (associated(dummy)) then
+ eismint_climate%nmsb(1) = dummy(1)
+ end if
+ call GetValue(section,'period',eismint_climate%period)
+ call GetValue(section,'mb_amplitude',eismint_climate%mb_amplitude)
+ return
+ end if
+
+ !TODO - I think the default airt values declared above are appropriate for this case.
+ ! Set them here instead.
+
+ call GetSection(config,section,'EISMINT-1 moving margin')
+ if (associated(section)) then
+ eismint_climate%eismint_type = 2
+ dummy=>NULL()
+ call GetValue(section,'temperature',dummy,2)
+ if (associated(dummy)) then
+ eismint_climate%airt = dummy
+ deallocate(dummy)
+ dummy=>NULL()
+ end if
+ call GetValue(section,'massbalance',dummy,3)
+ if (associated(dummy)) then
+ eismint_climate%nmsb = dummy
+ deallocate(dummy)
+ dummy=>NULL()
+ end if
+ call GetValue(section,'period',eismint_climate%period)
+ eismint_climate%mb_amplitude = 100000.d0
+ call GetValue(section,'mb_amplitude',eismint_climate%mb_amplitude)
+ return
+ end if
+
+ call GetSection(config,section,'EISMINT-2')
+ if (associated(section)) then
+ eismint_climate%eismint_type = 3
+ dummy=>NULL()
+ call GetValue(section,'temperature',dummy,2)
+ if (associated(dummy)) then
+ eismint_climate%airt = dummy
+ deallocate(dummy)
+ dummy=>NULL()
+ else
+ eismint_climate%airt = (/-35.d0, 1.67d-5/)
+ end if
+ call GetValue(section,'massbalance',dummy,3)
+ if (associated(dummy)) then
+ eismint_climate%nmsb = dummy
+ deallocate(dummy)
+ dummy=>NULL()
+ end if
+ return
+ end if
+
+ !mismip tests
+
+ !TODO - Assign reasonable default values if not present in config file
+
+ call GetSection(config,section,'MISMIP-1')
+ if (associated(section)) then
+ eismint_climate%eismint_type = 4
+ dummy=>NULL()
+ call GetValue(section,'temperature',dummy,2)
+ if (associated(dummy)) then
+ eismint_climate%airt = dummy
+ deallocate(dummy)
+ dummy=>NULL()
+ end if
+ call GetValue(section,'massbalance',dummy,3)
+ if (associated(dummy)) then
+ eismint_climate%nmsb = dummy
+ deallocate(dummy)
+ dummy=>NULL()
+ end if
+ return
+ end if
+
+ !exact verification
+ !TODO - Is this test currently supported?
+
+ call GetSection(config,section,'EXACT')
+ if (associated(section)) then
+ eismint_climate%eismint_type = 5
+ dummy=>NULL()
+ call GetValue(section,'temperature',dummy,2)
+ if (associated(dummy)) then
+ eismint_climate%airt = dummy
+ deallocate(dummy)
+ dummy=>NULL()
+ end if
+ return
+ end if
+
+ ! Standard higher-order tests
+ ! These do not require EISMINT-type input parameters.
+
+ call GetSection(config,section,'DOME-TEST')
+ if (associated(section)) then
+ return
+ end if
+
+ call GetSection(config,section,'ISMIP-HOM-TEST')
+ if (associated(section)) then
+ return
+ end if
+
+ call GetSection(config,section,'SHELF-TEST')
+ if (associated(section)) then
+ return
+ end if
+
+ call GetSection(config,section,'STREAM-TEST')
+ if (associated(section)) then
+ return
+ end if
+
+ call GetSection(config,section,'ROSS-TEST')
+ if (associated(section)) then
+ return
+ end if
+
+ call GetSection(config,section,'GIS-TEST')
+ if (associated(section)) then
+ return
+ end if
+
+ !TODO - Any other allowed tests to add here?
+
+ ! Abort if one of the above cases has not been specified.
+ call write_log('No EISMINT forcing selected',GM_FATAL)
+
+ end subroutine eismint_readconfig
+
+ subroutine eismint_printconfig(eismint_climate)
+
+ ! print eismint_climate configuration
+
+ use glimmer_log
+ use parallel, only: tasks
+ implicit none
+
+ type(eismint_climate_type) :: eismint_climate ! structure holding climate info
+ character(len=100) :: message
+
+ call write_log_div
+
+ select case(eismint_climate%eismint_type)
+
+ case(1)
+ call write_log('EISMINT-1 fixed margin configuration')
+ call write_log('------------------------------------')
+ write(message,*) 'temperature : ',eismint_climate%airt(1)
+ call write_log(message)
+ write(message,*) ' ',eismint_climate%airt(2)
+ call write_log(message)
+ write(message,*) 'massbalance : ',eismint_climate%nmsb(1)
+ call write_log(message)
+ write(message,*) 'period : ',eismint_climate%period
+ call write_log(message)
+ if (eismint_climate%period .gt. 0.d0) then
+ write(message,*) 'mb amplitude : ',eismint_climate%mb_amplitude
+ call write_log(message)
+ end if
+
+ case(2)
+ call write_log('EISMINT-1 moving margin configuration')
+ call write_log('-------------------------------------')
+ write(message,*) 'temperature : ',eismint_climate%airt(1)
+ call write_log(message)
+ write(message,*) ' ',eismint_climate%airt(2)
+ call write_log(message)
+ write(message,*) 'massbalance : ',eismint_climate%nmsb(1)
+ call write_log(message)
+ write(message,*) ' ',eismint_climate%nmsb(2)
+ call write_log(message)
+ write(message,*) ' ',eismint_climate%nmsb(3)
+ call write_log(message)
+ write(message,*) 'period : ',eismint_climate%period
+ call write_log(message)
+ if (eismint_climate%period .gt. 0.d0) then
+ write(message,*) 'mb amplitude : ',eismint_climate%mb_amplitude
+ call write_log(message)
+ end if
+
+ case(3)
+ call write_log('EISMINT-2')
+ call write_log('---------')
+ write(message,*) 'temperature : ',eismint_climate%airt(1)
+ call write_log(message)
+ write(message,*) ' ',eismint_climate%airt(2)
+ call write_log(message)
+ write(message,*) 'massbalance : ',eismint_climate%nmsb(1)
+ call write_log(message)
+ write(message,*) ' ',eismint_climate%nmsb(2)
+ call write_log(message)
+ write(message,*) ' ',eismint_climate%nmsb(3)
+ call write_log(message)
+ end select
+
+ if ( (eismint_climate%eismint_type > 0) .and. (tasks > 1) ) then
+ call write_log('EISMINT tests are not supported for more than one processor', GM_FATAL)
+ end if
+
+ call write_log('')
+
+ end subroutine eismint_printconfig
+
+ subroutine eismint_massbalance(eismint_climate,model,time)
+
+ ! calculate eismint mass balance
+
+!TODO - Remove acc0
+
+ use glimmer_global, only : dp
+ use glide_types
+ use glimmer_paramets, only : len0, acc0, scyr
+ use glimmer_physcon, only : pi
+ use glimmer_scales, only : scale_acab
+ implicit none
+
+ type(eismint_climate_type) :: eismint_climate ! structure holding climate info
+ type(glide_global_type) :: model ! model instance
+ real(dp), intent(in) :: time ! current time
+
+ !WHL - Changed 'periodic_bc' to 'periodic' to avoid a name conflict with parallel modules
+ ! local variables
+ integer :: ns,ew
+ real(dp) :: dist, ewct, nsct, grid, rel
+ real(dp) :: periodic = 1.d0 !TODO - Make this an integer?
+
+ ewct = (real(model%general%ewn,dp) + 1.d0) / 2.d0
+ nsct = (real(model%general%nsn,dp) + 1.d0) / 2.d0
+ grid = real(model%numerics%dew,dp) * len0
+
+ if (model%options%periodic_ew) then
+ periodic = 0.d0
+ else
+ periodic = 1.d0
+ end if
+
+ select case(eismint_climate%eismint_type)
+
+ case(1)
+ ! EISMINT-1 fixed margin
+ model%climate%acab(:,:) = eismint_climate%nmsb(1)
+ if (eismint_climate%period .ne. 0.d0) then
+ model%climate%acab(:,:) = model%climate%acab(:,:) + eismint_climate%mb_amplitude * sin(2.d0*pi*time/eismint_climate%period)/ (acc0 * scyr)
+! model%climate%acab(:,:) = model%climate%acab(:,:) + climate%mb_amplitude * sin(2.d0*pi*time/climate%period) / scale_acab
+ end if
+
+ case(2)
+ ! EISMINT-1 moving margin
+ if (eismint_climate%period .ne. 0.d0) then
+ rel = eismint_climate%nmsb(3) + eismint_climate%mb_amplitude*sin(2.d0*pi*time/eismint_climate%period)
+ else
+ rel = eismint_climate%nmsb(3)
+ end if
+
+ do ns = 1,model%general%nsn
+ do ew = 1,model%general%ewn
+ dist = grid * sqrt(periodic*(real(ew,kind=dp) - ewct)**2 + (real(ns,kind=dp) - nsct)**2)
+ model%climate%acab(ew,ns) = min(eismint_climate%nmsb(1), eismint_climate%nmsb(2) * (rel - dist))
+ end do
+ end do
+
+ case(3)
+ ! EISMINT-2
+ rel = eismint_climate%nmsb(3)
+
+ do ns = 1,model%general%nsn
+ do ew = 1,model%general%ewn
+ dist = grid * sqrt(periodic*(real(ew,kind=dp) - ewct)**2 + (real(ns,kind=dp) - nsct)**2)
+ model%climate%acab(ew,ns) = min(eismint_climate%nmsb(1), eismint_climate%nmsb(2) * (rel - dist))
+ end do
+ end do
+
+ case(4)
+ !mismip 1
+ model%climate%acab = eismint_climate%nmsb(1)
+
+ case(5)
+ !verification
+ call exact_surfmass(eismint_climate,model,time,1.d0,eismint_climate%airt(2))
+
+ end select
+
+ end subroutine eismint_massbalance
+
+ subroutine eismint_surftemp(eismint_climate,model,time)
+
+ ! calculate eismint air surface temperature
+
+ use glide_types
+ use glimmer_global, only: dp
+ use glimmer_paramets, only : len0
+ use glimmer_physcon, only : pi
+ implicit none
+
+ type(eismint_climate_type) :: eismint_climate ! structure holding climate info
+ type(glide_global_type) :: model ! model instance
+ real(dp), intent(in) :: time ! current time
+
+ ! local variables
+ integer :: ns,ew
+ real(dp) :: dist, ewct, nsct, grid
+ real(dp) :: periodic = 1.d0
+
+ ewct = (real(model%general%ewn,dp)+1.d0) / 2.d0
+ nsct = (real(model%general%nsn,dp)+1.d0) / 2.d0
+ grid = real(model%numerics%dew,dp) * len0
+
+ if (model%options%periodic_ew) then
+ periodic = 0.d0
+ else
+ periodic = 1.d0
+ end if
+
+ select case(eismint_climate%eismint_type)
+
+ case(1)
+ ! EISMINT-1 fixed margin
+ do ns = 1,model%general%nsn
+ do ew = 1,model%general%ewn
+ dist = grid * max(periodic*abs(real(ew,kind=dp) - ewct),abs(real(ns,kind=dp) - nsct))*1d-3
+ model%climate%artm(ew,ns) = eismint_climate%airt(1) + eismint_climate%airt(2) * dist*dist*dist
+ end do
+ end do
+ if (eismint_climate%period .ne. 0.d0) then
+ model%climate%artm(:,:) = model%climate%artm(:,:) + 10.d0*sin(2.d0*pi*time/eismint_climate%period)
+ end if
+
+ case(2)
+ ! EISMINT-1 moving margin
+ model%climate%artm(:,:) = eismint_climate%airt(1) - model%geometry%thck(:,:) * eismint_climate%airt(2)
+ if (eismint_climate%period .ne. 0.d0) then
+ model%climate%artm(:,:) = model%climate%artm(:,:) + 10.d0*sin(2.d0*pi*time/eismint_climate%period)
+ end if
+
+ case(3)
+ ! EISMINT-2
+ do ns = 1,model%general%nsn
+ do ew = 1,model%general%ewn
+ dist = grid * sqrt(periodic*(real(ew,kind=dp) - ewct)**2 + (real(ns,kind=dp) - nsct)**2)
+ model%climate%artm(ew,ns) = eismint_climate%airt(1)+eismint_climate%airt(2) * dist
+ end do
+ end do
+
+ case(4)
+ model%climate%artm = eismint_climate%airt(1)
+
+ case(5)
+ !call both massbalance and surftemp at the same time to save computing time.
+ call exact_surfmass(eismint_climate,model,time,0.d0,eismint_climate%airt(2))
+ end select
+
+ end subroutine eismint_surftemp
+
+ !which_call - eismint_surftemp(0)/eismint_massbalance(1)/both(2)
+ !which_test - test f(0)/test g(1)/exact(2)
+
+ subroutine exact_surfmass(eismint_climate,model,time,which_call,which_test)
+
+ use glide_types
+ use testsFG
+ implicit none
+
+ type(eismint_climate_type) :: eismint_climate ! structure holding climate info
+ type(glide_global_type) :: model ! model instance
+ real(dp), intent(in) :: time ! current time
+ real(dp), intent(in) :: which_test ! Which exact test (F=0, G=1)
+ real(dp), intent(in) :: which_call ! 0 = surface temp, 1 = mass balance
+ integer :: ns,ew,lev,center
+
+ !verification
+ real(dp) :: r, z, x, y !in variables
+ real(dp) :: H, TT, U, w, Sig, M, Sigc !out variables
+ real(dp) :: H_0
+
+ center = (model%general%ewn - 1) * 0.5
+
+ !TODO - Change which_call to an integer?
+ ! Modify for Glissade? (dissip has smaller vertical dimension)
+ if (which_call .eq. 0.d0 .or. which_call .eq. 2.d0) then
+
+ !point by point call to the function
+ do ns = 1,model%general%nsn
+ do ew = 1,model%general%ewn
+ x = (ew - center)*model%numerics%dew
+ y = (ns - center)*model%numerics%dns
+ r = sqrt(x**2 + y**2)
+ do lev = 1, model%general%upn
+ z = model%geometry%thck(ew,ns)*model%numerics%sigma(lev)
+ !the function only returns values within the radius
+ if(r>0.d0 .and. r0.d0 .and. r.
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+! from glide_types.F90:
+! integer, parameter :: DYCORE_GLIDE = 0 ! old shallow-ice dycore from Glimmer
+! integer, parameter :: DYCORE_GLAM = 1 ! Payne-Price finite-difference solver
+! integer, parameter :: DYCORE_GLISSADE = 2 ! prototype finite-element solver
+! integer, parameter :: DYCORE_ALBANYFELIX = 3 ! External Albany-Felix finite-element solver
+! integer, parameter :: DYCORE_BISICLES = 4 ! BISICLES external dycore
+
+module gcm_cism_interface
+
+ use parallel
+ use glint_commandline
+ use glide
+ use cism_front_end
+
+ use glint_example_clim
+ use glint_main
+ use gcm_to_cism_glint
+
+
+ integer, parameter :: GCM_MINIMAL_MODEL = 0
+ integer, parameter :: GCM_DATA_MODEL = 1
+ integer, parameter :: GCM_CESM = 2
+
+contains
+
+subroutine gci_init_interface(which_gcm,g2c)
+ use parallel
+ use glint_commandline
+ use glimmer_config
+ use glide
+ use glide_types
+
+ use cism_front_end
+
+ integer, intent(in) :: which_gcm
+ type(gcm_to_cism_type) :: g2c ! holds everything
+
+ integer :: whichdycore
+ type(ConfigSection), pointer :: config ! configuration stuff
+ type(ConfigSection), pointer :: section !< pointer to the section to be checked
+
+ ! call parallel_initialise
+
+ ! get the CISM dycore to be used:
+ call glint_GetCommandline()
+ call open_log(unit=50, fname=logname(commandline_configname))
+ call ConfigRead(commandline_configname,config)
+ call GetSection(config,section,'options')
+ call GetValue(section,'dycore',whichdycore)
+ if (main_task) print *,'CISM dycore type (0=Glide, 1=Glam, 2=Glissade, 3=AlbanyFelix, 4 = BISICLES) = ', whichdycore
+
+ ! check to see if running minimal GCM or data GCM. Still need to add CESM GCM:
+ call GetSection(config,section,'GLINT climate')
+
+ if (associated(section)) then
+ g2c%which_gcm = GCM_DATA_MODEL
+ else
+ g2c%which_gcm = GCM_MINIMAL_MODEL
+ end if
+ if (main_task) print *,'g2c%which_gcm (1 = data, 2 = minimal) = ',g2c%which_gcm
+
+ select case (g2c%which_gcm)
+ case (GCM_MINIMAL_MODEL)
+ if (main_task) print*, 'call cism_init_dycore'
+ call cism_init_dycore(g2c%glide_model)
+
+ case (GCM_DATA_MODEL)
+ if (main_task) print*, 'call g2c_glint_init'
+ call g2c_glint_init(g2c)
+
+ case (GCM_CESM)
+ ! call gcm_glint_GetCommandline_proxy()
+ ! call g2c_glint_init(g2c)
+
+ case default
+ if (main_task) print *,"Error -- unknown GCM type."
+ end select
+
+end subroutine gci_init_interface
+
+subroutine gci_run_model(g2c)
+
+ type(gcm_to_cism_type) :: g2c
+
+ logical :: finished = .false.
+
+ do while (.not. finished)
+ select case (g2c%which_gcm)
+ case (GCM_MINIMAL_MODEL)
+ ! call gcm_update_model(gcm_model,cism_model)
+! if (main_task) print *,"In gci_run_model, calling cism_run_dycore"
+ call cism_run_dycore(g2c%glide_model)
+
+ case (GCM_DATA_MODEL,GCM_CESM)
+! if (main_task) print *,"In gci_run_model, calling g2c_glint_run"
+ call g2c_glint_run(g2c)
+ call g2c_glint_climate_time_step(g2c)
+ case default
+ end select
+ finished = (gci_finished(g2c))
+ end do
+end subroutine gci_run_model
+
+
+! gci_finished is used to test status of GCM
+function gci_finished(g2c) result(finished)
+
+ type(gcm_to_cism_type) :: g2c
+ logical :: finished
+
+ select case (g2c%which_gcm)
+ case (GCM_MINIMAL_MODEL)
+ finished = .true.
+
+ case (GCM_DATA_MODEL,GCM_CESM)
+ call g2c_glint_check_finished(g2c,finished)
+ case default
+ end select
+ !if (main_task) print *,"In gci_finished, finished = ",finished
+
+end function gci_finished
+
+
+subroutine gci_finalize_interface(g2c)
+
+ type(gcm_to_cism_type) :: g2c
+
+ select case (g2c%which_gcm)
+ case (GCM_MINIMAL_MODEL)
+ call cism_finalize_dycore(g2c%glide_model)
+
+ case (GCM_DATA_MODEL)
+ call g2c_glint_end(g2c)
+
+ case (GCM_CESM)
+ ! call g2c_glint_end(g2c)
+ case default
+ end select
+
+end subroutine gci_finalize_interface
+
+
+end module gcm_cism_interface
diff --git a/components/cism/glimmer-cism/cism_driver/gcm_to_cism_glint.F90 b/components/cism/glimmer-cism/cism_driver/gcm_to_cism_glint.F90
new file mode 100644
index 0000000000..f924609ef3
--- /dev/null
+++ b/components/cism/glimmer-cism/cism_driver/gcm_to_cism_glint.F90
@@ -0,0 +1,433 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! gcm_to_cism_glint.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module gcm_to_cism_glint
+
+ ! This module demonstrates the use of the Glint interface.
+ ! It loads in some example global fields and associated grid data,
+ ! initialises the model, and then runs it for a user-prescribed period.
+ ! The surface mass balance can be computed either with a PDD scheme
+ ! (as in the original Glimmer code), or with a crude scheme
+ ! that imitates SMB input from a climate model.
+
+ use glimmer_global, only: dp
+ use glint_main
+ use glimmer_log
+ use glint_global_interp
+ use glint_example_clim
+ use glint_commandline
+ use glimmer_writestats
+! use glimmer_commandline
+ use glimmer_paramets, only: GLC_DEBUG
+ use parallel, only: main_task
+
+type gcm_to_cism_type
+
+ ! Program variables -------------------------------------------------------------------
+
+ integer :: which_gcm = 0 ! type of global climate model being used, 0=minimal model, 1=data model, 2=CESM model
+ type(glide_global_type) :: glide_model ! ice sheet model used for glide
+
+ type(glint_params) :: ice_sheet ! This is the derived type variable that holds all
+ ! domains of the ice model
+ type(glex_climate) :: climate ! Climate parameters and fields
+
+ ! Arrays which hold the global fields used as input to Glint ------------------------
+
+ real(dp),dimension(:,:),pointer :: temp => null() ! Temperature (degC)
+ real(dp),dimension(:,:),allocatable :: precip ! Precipitation (mm/s)
+ real(dp),dimension(:,:),allocatable :: orog ! Orography (m)
+
+ ! Arrays which hold information about the ice model instances -------------------------
+
+ real(dp),dimension(:,:),allocatable :: coverage ! Coverage map for normal global grid
+ real(dp),dimension(:,:),allocatable :: cov_orog ! Coverage map for orography grid
+
+ ! Arrays which hold output from the model ---------------------------------------------
+ ! These are all on the normal global grid, except for the orography
+
+ real(dp),dimension(:,:),allocatable :: albedo ! Fractional albedo
+ real(dp),dimension(:,:),allocatable :: orog_out ! Output orography (m)
+ real(dp),dimension(:,:),allocatable :: ice_frac ! Ice coverage fraction
+ real(dp),dimension(:,:),allocatable :: fw ! Freshwater output flux (mm/s)
+ real(dp),dimension(:,:),allocatable :: fw_in ! Freshwater input flux (mm/s)
+
+ ! Arrays which hold information about the global grid ---------------------------------
+
+ real(dp),dimension(:), allocatable :: lats_orog ! Latitudes of global orography gridpoints
+ real(dp),dimension(:), allocatable :: lons_orog ! Longitudes of global oropraphy gridpoints
+
+ ! Scalars which hold information about the global grid --------------------------------
+
+ integer :: nx,ny ! Size of normal global grid
+ integer :: nxo,nyo ! Size of global orography grid
+
+ ! Scalar model outputs ----------------------------------------------------------------
+
+ real(dp) :: twin ! Timestep-integrated input water flux (kg)
+ real(dp) :: twout ! Timestep-integrated output water flux (kg)
+ real(dp) :: ice_vol ! Total ice volume (m^3)
+
+ ! Other variables ---------------------------------------------------------------------
+
+ logical :: out ! Outputs set flag
+ integer :: i,j ! Array index counters
+ integer :: time ! Current time (hours)
+ real(dp):: t1,t2
+ integer :: clock,clock_rate
+
+ ! fields passed to and from a GCM
+ ! (useful for testing the GCM subroutines in standalone mode)
+ !
+ ! Note that, for fields that possess a third dimension, this dimension is the elevation
+ ! class. Elevation class goes from 0 to glc_nec, where class 0 represents the bare land
+ ! "elevation class".
+
+ real(dp),dimension(:,:,:), allocatable :: qsmb ! surface mass balance (kg/m^2/s)
+ real(dp),dimension(:,:,:), allocatable :: tsfc ! surface temperature (degC)
+ real(dp),dimension(:,:,:), allocatable :: topo ! surface elevation (m)
+
+ real(dp),dimension(:,:,:), allocatable :: gfrac ! fractional glacier area [0,1]
+ real(dp),dimension(:,:,:), allocatable :: gtopo ! glacier surface elevation (m)
+ real(dp),dimension(:,:,:), allocatable :: ghflx ! heat flux from glacier interior, positive down (W/m^2)
+ real(dp),dimension(:,:), allocatable :: grofi ! ice runoff (calving) flux (kg/m^2/s)
+ real(dp),dimension(:,:), allocatable :: grofl ! ice runoff (liquid) flux (kg/m^2/s)
+ real(dp),dimension(:,:), allocatable :: ice_sheet_grid_mask ! mask of ice sheet grid coverage
+ real(dp),dimension(:,:), allocatable :: icemask_coupled_fluxes ! mask of ice sheet grid coverage where we are potentially sending non-zero fluxes
+
+ integer :: glc_nec ! , parameter :: glc_nec = 10 ! number of elevation classes
+
+ real(dp),dimension(:), allocatable :: glc_topomax
+! dimension(0:integer(glc_nec)) :: &
+! glc_topomax = (/ 0.d0, 200.d0, 400.d0, 700.d0, 1000.d0, 1300.d0, &
+! 1600.d0, 2000.d0, 2500.d0, 3000.d0, 10000.d0 /) ! upper limit of each class (m)
+
+ logical :: ice_tstep ! true if ice timestep was done
+ logical :: output_flag ! true if outputs have been set
+
+ ! from glint_commandline.F90:
+ character(len=5000) :: commandline_history !< complete command line
+ character(len=fname_length) :: commandline_configname !< name of the configuration file
+ character(len=fname_length) :: commandline_results_fname !< name of results file
+ character(len=fname_length) :: commandline_climate_fname !< name of climate configur
+
+end type gcm_to_cism_type
+
+ logical, parameter :: verbose_glint = .true. ! set to true for debugging
+
+contains
+
+subroutine g2c_glint_init(g2c)
+
+ ! Initialise glint
+
+ implicit none
+
+ type(gcm_to_cism_type) :: g2c
+
+ integer :: i,j ! Array index counters
+
+ ! -------------------------------------------------------------------------------------
+ ! Executable code starts here - Basic initialisation
+ ! -------------------------------------------------------------------------------------
+ integer, parameter :: glc_nec = 10 ! number of elevation classes
+
+ real(dp), dimension(0:glc_nec) :: &
+ glc_topomax = (/ 0.d0, 200.d0, 400.d0, 700.d0, 1000.d0, 1300.d0, &
+ 1600.d0, 2000.d0, 2500.d0, 3000.d0, 10000.d0 /) ! upper limit of each class (m)
+
+ g2c%glc_nec = glc_nec
+ g2c%glc_topomax = glc_topomax
+
+ g2c%which_gcm = 1
+
+ call glint_GetCommandline()
+
+ g2c%commandline_history = commandline_history !< complete command line
+ g2c%commandline_configname = commandline_configname !< name of the configuration file
+ g2c%commandline_results_fname = commandline_resultsname !< name of results file
+ g2c%commandline_climate_fname = commandline_climatename !< name of climate configuration
+
+ call system_clock(g2c%clock,g2c%clock_rate)
+ g2c%t1 = real(g2c%clock,kind=dp)/real(g2c%clock_rate,kind=dp)
+ !print *,"g2c%clock, g2c%clock_rate, t1",g2c%clock,g2c%clock_rate,g2c%t1
+
+ if (verbose_glint .and. main_task) print*, 'call glex_clim_init'
+
+ ! Initialise climate
+
+ call glex_clim_init(g2c%climate,g2c%commandline_climate_fname)
+
+ ! Set dimensions of global grids
+
+ call get_grid_dims(g2c%climate%clim_grid,g2c%nx,g2c%ny) ! Normal global grid
+ g2c%nxo=200 ; g2c%nyo=100 ! Example grid used for orographic output
+
+!print *,"g2c% nxo, nyo, nx, ny: ",g2c%nxo,g2c%nyo,g2c%nx,g2c%ny,nxo,nyo
+
+ ! start logging
+! call open_log(unit=101, fname=logname(g2c%commandline_configname))
+
+ if (verbose_glint .and. main_task) then
+ print*, ' '
+ print*, 'Starting glint_example:'
+ print*, 'climatename = ', trim(g2c%commandline_climate_fname)
+ print*, 'configname = ', trim(g2c%commandline_configname)
+ print*, 'climate%gcm_smb:', g2c%climate%gcm_smb
+ print*, ' '
+ endif
+
+ ! Allocate global arrays
+
+ allocate(g2c%temp(g2c%nx,g2c%ny),g2c%precip(g2c%nx,g2c%ny),g2c%orog(g2c%nx,g2c%ny))
+ allocate(g2c%coverage(g2c%nx,g2c%ny),g2c%orog_out(g2c%nxo,g2c%nyo),g2c%albedo(g2c%nx,g2c%ny))
+!!Check this:
+ allocate(g2c%ice_frac(g2c%nx,g2c%ny),g2c%fw(g2c%nx,g2c%ny))
+ allocate(g2c%lats_orog(g2c%nyo),g2c%lons_orog(g2c%nxo),g2c%cov_orog(g2c%nxo,g2c%nyo),g2c%fw_in(g2c%nx,g2c%ny))
+
+ ! Initialize global arrays
+
+ g2c%temp = 0.d0
+ g2c%precip = 0.d0
+ g2c%albedo = 0.d0
+ g2c%orog_out = 0.d0
+ g2c%orog = real(g2c%climate%orog_clim,dp) ! Put orography where it belongs
+
+ ! Allocate and initialize GCM arrays
+
+ if (g2c%climate%gcm_smb) then
+
+ ! input from GCM
+ allocate(g2c%tsfc(g2c%nx,g2c%ny, 0:g2c%glc_nec))
+ allocate(g2c%qsmb(g2c%nx,g2c%ny, 0:g2c%glc_nec))
+ allocate(g2c%topo(g2c%nx,g2c%ny, 0:g2c%glc_nec))
+
+ g2c%tsfc(:,:,:) = 0.d0
+ g2c%qsmb(:,:,:) = 0.d0
+ g2c%topo(:,:,:) = 0.d0
+
+ ! output to GCM
+ allocate(g2c%gfrac(g2c%nx,g2c%ny, 0:g2c%glc_nec))
+ allocate(g2c%gtopo(g2c%nx,g2c%ny, 0:g2c%glc_nec))
+ allocate(g2c%ghflx(g2c%nx,g2c%ny, 0:g2c%glc_nec))
+ allocate(g2c%grofi(g2c%nx,g2c%ny))
+ allocate(g2c%grofl(g2c%nx,g2c%ny))
+ allocate(g2c%ice_sheet_grid_mask(g2c%nx,g2c%ny))
+ allocate(g2c%icemask_coupled_fluxes(g2c%nx,g2c%ny))
+
+ g2c%gfrac(:,:,:) = 0.d0
+ g2c%gtopo(:,:,:) = 0.d0
+ g2c%ghflx(:,:,:) = 0.d0
+ g2c%grofi(:,:) = 0.d0
+ g2c%grofl(:,:) = 0.d0
+ g2c%ice_sheet_grid_mask(:,:) = 0.d0
+ g2c%icemask_coupled_fluxes(:,:) = 0.d0
+
+ endif
+
+ ! Set up global grids ----------------------------------------------------------------
+
+ ! Calculate example orographic latitudes
+
+ do j=1,g2c%nyo
+ g2c%lats_orog(j) = -(180.d0/g2c%nyo)*j + 90.d0 + (90.d0/g2c%nyo)
+ enddo
+
+ ! Calculate example orographic longitudes
+
+ do i=1,g2c%nxo
+ g2c%lons_orog(i) = (360.d0/g2c%nxo)*i - (180.d0/g2c%nxo)
+ enddo
+
+ ! Initialise the ice model
+
+ if (g2c%climate%gcm_smb) then ! act as if we are receiving the SMB from a GCM
+
+ call initialise_glint_gcm(g2c%ice_sheet, &
+ g2c%climate%clim_grid%lats, &
+ g2c%climate%clim_grid%lons, &
+ g2c%climate%climate_tstep, &
+ (/g2c%commandline_configname/), &
+ daysinyear=g2c%climate%days_in_year, &
+ glc_nec = g2c%glc_nec, &
+ gfrac = g2c%gfrac, &
+ gtopo = g2c%gtopo, &
+ grofi = g2c%grofi, &
+ grofl = g2c%grofl, &
+ ghflx = g2c%ghflx, &
+ ice_sheet_grid_mask = g2c%ice_sheet_grid_mask, &
+ icemask_coupled_fluxes = g2c%icemask_coupled_fluxes)
+
+ else ! standard Glint initialization
+
+ call initialise_glint(g2c%ice_sheet, &
+ g2c%climate%clim_grid%lats, &
+ g2c%climate%clim_grid%lons, &
+ g2c%climate%climate_tstep, &
+ (/g2c%commandline_configname/), &
+ orog=g2c%orog_out, &
+ albedo=g2c%albedo, &
+ ice_frac=g2c%ice_frac, &
+ orog_longs=g2c%lons_orog, &
+ orog_lats=g2c%lats_orog, &
+ daysinyear=g2c%climate%days_in_year)
+
+ endif ! gcm_smb
+
+ ! Set the message level (1 is the default - only fatal errors)
+ ! N.B. Must do this after initialisation
+
+ call glimmer_set_msg_level(6)
+
+ ! Get coverage maps for the ice model instances
+
+ if (g2c%climate%gcm_smb) then ! not using cov_orog
+ if (glint_coverage_map(g2c%ice_sheet, g2c%coverage) .ne. 0) then
+ call write_log('Unable to get coverage maps',GM_FATAL,__FILE__,__LINE__)
+ stop
+ endif
+ else
+ if (glint_coverage_map(g2c%ice_sheet, g2c%coverage, g2c%cov_orog) .ne. 0) then
+ call write_log('Unable to get coverage maps',GM_FATAL,__FILE__,__LINE__)
+ stop
+ endif
+ endif
+
+ g2c%time = g2c%climate%climate_tstep ! time in integer hours
+
+! if (main_task) print*, 'Done in g2c_glint_init'
+
+end subroutine g2c_glint_init
+
+
+subroutine g2c_glint_run(g2c)
+
+ type(gcm_to_cism_type) :: g2c
+
+ ! Do timesteps ---------------------------------------------------------------------------
+
+ !TODO - Timestepping as in simple_glide? Initialize with time = 0, then update time right after 'do'
+ ! This would require changing some time logic inside the Glint subroutines.
+
+! g2c%time = g2c%climate%climate_tstep ! time in integer hours
+
+! do
+
+ ! The SMB is computed crudely for now, just to test the GCM interfaces.
+ ! At some point we could read in a realistic SMB as in CESM TG runs.
+
+ ! get current temp and precip fields
+
+ call example_climate(g2c%climate, g2c%precip, g2c%temp, real(g2c%time,dp))
+
+ if (g2c%climate%gcm_smb) then ! act as if we are receiving the SMB from a GCM
+
+ !TODO - For some reason, the gcm code is much slower than the pdd code.
+ ! Figure out why.
+
+ ! call a simple subroutine to estimate qsmb and tsfc in different elevation classes
+
+ call compute_gcm_smb(g2c%temp, g2c%precip, &
+ g2c%orog, &
+ g2c%qsmb, g2c%tsfc, &
+ g2c%topo, &
+ g2c%glc_nec, g2c%glc_topomax)
+
+ call glint_gcm (g2c%ice_sheet, g2c%time, &
+ g2c%qsmb, g2c%tsfc, &
+ g2c%topo, &
+ output_flag = g2c%output_flag, &
+ ice_tstep = g2c%ice_tstep, &
+ gfrac = g2c%gfrac, &
+ gtopo = g2c%gtopo, &
+ grofi = g2c%grofi, &
+ grofl = g2c%grofl, &
+ ghflx = g2c%ghflx, &
+ ice_sheet_grid_mask = g2c%ice_sheet_grid_mask, &
+ icemask_coupled_fluxes = g2c%icemask_coupled_fluxes)
+
+ else ! standard Glint timestepping
+
+ call glint(g2c%ice_sheet, g2c%time, g2c%temp, g2c%precip, g2c%orog, &
+ orog_out=g2c%orog_out, albedo=g2c%albedo, output_flag=g2c%out, &
+ ice_frac=g2c%ice_frac, water_out=g2c%fw, water_in=g2c%fw_in, &
+ total_water_in=g2c%twin, total_water_out=g2c%twout, ice_volume=g2c%ice_vol)
+
+ endif ! gcm_smb
+
+ !g2c%time = g2c%time + g2c%climate%climate_tstep
+ ! if (g2c%time > g2c%climate%total_years*g2c%climate%hours_in_year) exit
+
+! end do ! main timestep loop
+
+ if (GLC_DEBUG) then
+ ! Print time so as to have something to watch while the code runs
+ if (mod(real(g2c%time,dp),8760.d0) < 0.01) print*, 'time (yr) =', real(g2c%time,dp)/8760.d0
+ end if
+end subroutine g2c_glint_run
+
+
+subroutine g2c_glint_climate_time_step(g2c)
+ type(gcm_to_cism_type) :: g2c
+
+ g2c%time = g2c%time + g2c%climate%climate_tstep
+end subroutine g2c_glint_climate_time_step
+
+subroutine g2c_glint_check_finished(g2c,finished)
+ type(gcm_to_cism_type) :: g2c
+ logical :: finished
+
+ if (g2c%time > g2c%climate%total_years*g2c%climate%hours_in_year) then
+ finished = .true.
+ else
+ finished = .false.
+ endif
+
+end subroutine g2c_glint_check_finished
+
+
+subroutine g2c_glint_end(g2c)
+ type(gcm_to_cism_type) :: g2c
+
+ ! Finalise/tidy up everything -----------------------------------------------------------
+
+ call end_glint(g2c%ice_sheet)
+ call system_clock(g2c%clock,g2c%clock_rate)
+ t2 = real(g2c%clock,kind=dp)/real(g2c%clock_rate,kind=dp)
+ call glimmer_write_stats(g2c%commandline_results_fname,g2c%commandline_configname,g2c%t2-g2c%t1)
+
+ ! 101 format(e12.5)
+
+end subroutine g2c_glint_end
+
+!---------------------------------------------------------------------------------
+
+
+end module gcm_to_cism_glint
diff --git a/components/cism/glimmer-cism/cism_driver/testsfg.F90 b/components/cism/glimmer-cism/cism_driver/testsfg.F90
new file mode 100644
index 0000000000..50988426c4
--- /dev/null
+++ b/components/cism/glimmer-cism/cism_driver/testsfg.F90
@@ -0,0 +1,284 @@
+module testsFG
+! Copyright (C) 2005-2007 Ed Bueler
+!
+! This file is part of PISM.
+!
+! PISM is free software; you can redistribute it and/or modify it under the
+! terms of the GNU General Public License as published by the Free Software
+! Foundation; either version 2 of the License, or (at your option) any later
+! version.
+!
+! PISM is distributed in the hope that it will be useful, but WITHOUT ANY
+! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+! FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+! details.
+!
+! You should have received a copy of the GNU General Public License
+! along with PISM; if not, write to the Free Software
+! Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+! TESTSFG is a Fortran 90 implementation of two exact solutions for a
+! thermocoupled ice sheet. Reference:
+!
+! E. Bueler, J. Brown, and C. Lingle (2007). "Exact solutions to the
+! thermomechanically coupled shallow ice approximation: effective tools
+! for verification", J. Glaciol., J. Glaciol., vol. 53 no. 182, 499--516.
+!
+! ELB 3/29/05; 7/27/07; 7/29/08
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ use glimmer_global, only : dp
+ public :: testF, testG
+ private :: bothexact, p3, p4
+
+ ! DOUBLE PRECISION DESIRABLE:
+ integer, parameter, public :: kind=dp
+
+ real(kind), parameter, public :: SperA = 31556926.0 ! 365.2422 days
+ real(kind), parameter, public :: g=9.81 ! m/s^2; accel of gravity
+ real(kind), parameter, public :: Rgas=8.314 ! J/(mol K)
+
+ ! ice properties; parameters which appear in constitutive relation
+ real(kind), parameter, public :: rho=910.0 ! kg/m^3; density
+ real(kind), parameter, public :: k=2.1 ! J/m K s; thermal conductivity
+ real(kind), parameter, public :: cpheat=2009.0! J/kg K; specific heat capacity
+ real(kind), parameter, public :: n=3 ! Glen exponent
+ ! next two are EISMINT II values; Paterson-Budd for T<263
+ real(kind), parameter, public :: A=3.615e-13 ! Pa^-3 s^-1
+ real(kind), parameter, public :: Q=6.0e4 ! J/mol
+
+ ! EISMINT II temperature boundary condition (Experiment F):
+ real(kind), parameter, public :: Ggeo=.042 ! J/m^2 s; geo. heat flux
+ real(kind), parameter, public :: ST=1.67e-5 ! K m^-1
+ real(kind), parameter, public :: Tmin=223.15 ! K
+
+ ! parameters describing extent of sheet
+ real(kind), parameter, public :: H0=3000.0 ! m
+ real(kind), parameter, public :: L=750000.0 ! m
+
+ ! period and magnitude of perturbation; inactive in Test F:
+ real(kind), parameter, public :: Tp=2000.0*SperA ! s
+ real(kind), parameter, public :: Cp=200.0 ! m
+
+contains
+
+ subroutine testF(r,z,H,T,U,w,Sig,M,Sigc)
+ real(kind), intent(in) :: r, z
+ real(kind), intent(out) :: H, T, U, w, Sig, M, Sigc
+ call bothexact(0.0_kind,r,z,0.0_kind,H,T,U,w,Sig,M,Sigc)
+ end subroutine testF
+
+ subroutine testG(t,r,z,H,TT,U,w,Sig,M,Sigc)
+ real(kind), intent(in) :: t, r, z
+ real(kind), intent(out) :: H, TT, U, w, Sig, M, Sigc
+ call bothexact(t,r,z,Cp,H,TT,U,w,Sig,M,Sigc)
+ end subroutine testG
+
+ subroutine bothexact(t,r,z,Cp,H,TT,U,w,Sig,M,Sigc)
+ real(kind), intent(in) :: t, r, z, Cp
+ real(kind), intent(out) :: H, TT, U, w, Sig, M, Sigc
+
+ real(kind), parameter :: pi = 3.14159265358979
+ real(kind), parameter :: Kcond=k/(rho*cpheat) ! constant in temp eqn
+
+ ! declare all temporary quantities real(kind); computed in blocks below
+ real(kind) pow, Hconst, s, lamhat, f, goft, Ts, nusqrt, nu
+ real(kind) lamhatr, fr, Hr, mu, I3, surfArr, Uconst, omega
+ real(kind) Sigmu, lamhatrr, frr, Hrr, Tsr, nur, mur, phi, gamma, I4
+ real(kind) I4H, divQ, Ht, nut, dTt, Tr, Tz, Tzz
+
+ if (r<=0 .or. r>=L) then
+ print *,'code and derivation assume 00.3*L .and. r<0.9*L) then
+ f = ( cos(pi*(r-0.6*L)/(0.6*L)) )**2
+ else
+ f = 0.0
+ end if
+ goft = Cp*sin(2.0*pi*t/Tp)
+ H = Hconst*(lamhat)**pow + goft*f
+
+ ! compute TT = temperature
+ Ts = Tmin+ST*r
+ nusqrt = sqrt( 1 + (4.0*H*Ggeo)/(k*Ts) )
+ nu = ( k*Ts/(2.0*Ggeo) )*( 1 + nusqrt )
+ TT = Ts * (nu+H) / (nu+z)
+
+ ! compute surface slope and horizontal velocity
+ lamhatr = ((1+1/n)/L)*( 1 - (1-s)**(1/n) - s**(1/n) )
+ if (r>0.3*L .and. r<0.9*L) then
+ fr = -(pi/(0.6*L)) * sin(2.0*pi*(r-0.6*L)/(0.6*L))
+ else
+ fr = 0.0
+ end if
+ Hr = Hconst * pow * lamhat**(pow-1) * lamhatr + goft*fr ! chain rule
+ if (Hr>0) then
+ print *,'code and derivation assume H_r negative for all 00.3*L .and. r<0.9*L) then
+ frr = -(2.0*pi*pi/(0.36*L*L)) * cos(2.0*pi*(r-0.6*L)/(0.6*L))
+ else
+ frr = 0.0
+ end if
+ Hrr = Hconst*pow*(pow-1)*(lamhat)**(pow-2) * lamhatr**2 + &
+ Hconst*pow*(lamhat)**(pow-1)*lamhatrr + goft*frr
+ Tsr = ST
+ nur = (k*Tsr/(2.0*Ggeo)) * (1 + nusqrt) + &
+ (1/Ts) * (Hr*Ts-H*Tsr) / nusqrt
+ mur = (-Q/(Rgas*Ts*Ts*(nu+H)**2)) * (Tsr*(nu+H)+Ts*(nur+Hr))
+ phi = 1/r + n*Hrr/Hr + Q*Tsr/(Rgas*Ts*Ts) - (n+1)*mur/mu ! division by r
+ gamma = mu**n * exp(mu*H) * (mur*H+mu*Hr) * H**n
+ I4 = p4(mu*H) * exp(mu*H) - p4(mu*(H-z)) * exp(mu*(H-z))
+ w = omega * ((mur/mu - phi)*I4/mu + (phi*(H-z)+Hr)*I3 - gamma*z)
+
+ ! compute compensatory accumulation M
+ I4H = p4(mu*H) * exp(mu*H) - 24
+ divQ = - omega * (mur/mu - phi) * I4H / mu + omega * gamma * H
+ Ht = (Cp*2.0*pi/Tp) * cos(2.0*pi*t/Tp) * f
+ M = Ht + divQ
+
+ ! compute compensatory heating
+ nut = Ht/nusqrt
+ dTt = Ts * ((nut+Ht)*(nu+z)-(nu+H)*nut) * (nu+z)**(-2)
+ Tr = Tsr*(nu+H)/(nu+z) + Ts * ((nur+Hr)*(nu+z)-(nu+H)*nur) * (nu+z)**(-2)
+ Tz = -Ts * (nu+H) * (nu+z)**(-2)
+ Tzz = 2.0 * Ts * (nu+H) * (nu+z)**(-3)
+ Sigc = dTt + U*Tr + w*Tz - Kcond*Tzz - Sig
+ end subroutine bothexact
+
+ function p3(x)
+ real(kind), intent(in) :: x
+ !real(kind) :: p3
+ ! p_3=x^3-3*x^2+6*x-6, using Horner's
+ p3 = -6 + x*(6 + x*(-3 + x))
+ end function p3
+
+ function p4(x)
+ real(kind), intent(in) :: x
+ !real(kind) :: p4
+ ! p_4=x^4-4*x^3+12*x^2-24*x+24, using Horner's
+ p4 = 24 + x*(-24 + x*(12 + x*(-4 + x)))
+ end function p4
+
+ subroutine model_exact(t,r,z,Hh,H0,TT,U,w,Sig,M,Sigc)
+ real(kind), intent(in) :: t, r, z, Hh, H0
+ real(kind), intent(inout) :: TT
+ real(kind), intent(out) :: U, w, Sig, M, Sigc
+
+ real(kind), parameter :: pi = 3.14159265358979
+ real(kind), parameter :: Kcond=k/(rho*cpheat) ! constant in temp eqn
+
+ ! declare all temporary quantities real(kind); computed in blocks below
+ real(kind) pow, Hconst, s, lamhat, f, goft, Ts, nusqrt, nu
+ real(kind) lamhatr, fr, Hr, mu, I3, surfArr, Uconst, omega
+ real(kind) Sigmu, lamhatrr, frr, Hrr, Tsr, nur, mur, phi, gamma, I4
+ real(kind) I4H, divQ, Ht, nut, dTt, Tr, Tz, Tzz
+ real Cp, H
+
+ if (r<=0 .or. r>=L) then
+ print *,'code and derivation assume 00.3*L .and. r<0.9*L) then
+ f = ( cos(pi*(r-0.6*L)/(0.6*L)) )**2
+ else
+ f = 0.0
+ end if
+ goft = Cp*sin(2.0*pi*t/Tp)
+ !H = Hconst*(lamhat)**pow + goft*f
+ if (H .gt. Hconst*(lamhat)**pow + goft*f) then
+ H = Hconst*(lamhat)**pow + goft*f
+ end if
+ ! compute TT = temperature
+ Ts = Tmin+ST*r
+ nusqrt = sqrt( 1 + (4.0*H*Ggeo)/(k*Ts) )
+ nu = ( k*Ts/(2.0*Ggeo) )*( 1 + nusqrt )
+ if(TT .eq. 0.0) then
+ TT = Ts * (nu+H) / (nu+z)
+ end if
+
+ ! compute surface slope and horizontal velocity
+ lamhatr = ((1+1/n)/L)*( 1 - (1-s)**(1/n) - s**(1/n) )
+ if (r>0.3*L .and. r<0.9*L) then
+ fr = -(pi/(0.6*L)) * sin(2.0*pi*(r-0.6*L)/(0.6*L))
+ else
+ fr = 0.0
+ end if
+ Hr = Hconst * pow * lamhat**(pow-1) * lamhatr + goft*fr ! chain rule
+ if (Hr>0) then
+ print *,'code and derivation assume H_r negative for all 00.3*L .and. r<0.9*L) then
+ frr = -(2.0*pi*pi/(0.36*L*L)) * cos(2.0*pi*(r-0.6*L)/(0.6*L))
+ else
+ frr = 0.0
+ end if
+ Hrr = Hconst*pow*(pow-1)*(lamhat)**(pow-2) * lamhatr**2 + &
+ Hconst*pow*(lamhat)**(pow-1)*lamhatrr + goft*frr
+ Tsr = ST
+ nur = (k*Tsr/(2.0*Ggeo)) * (1 + nusqrt) + &
+ (1/Ts) * (Hr*Ts-H*Tsr) / nusqrt
+ mur = (-Q/(Rgas*Ts*Ts*(nu+H)**2)) * (Tsr*(nu+H)+Ts*(nur+Hr))
+ phi = 1/r + n*Hrr/Hr + Q*Tsr/(Rgas*Ts*Ts) - (n+1)*mur/mu ! division by r
+ gamma = mu**n * exp(mu*H) * (mur*H+mu*Hr) * H**n
+ I4 = p4(mu*H) * exp(mu*H) - p4(mu*(H-z)) * exp(mu*(H-z))
+ w = omega * ((mur/mu - phi)*I4/mu + (phi*(H-z)+Hr)*I3 - gamma*z)
+
+ ! compute compensatory accumulation M
+ I4H = p4(mu*H) * exp(mu*H) - 24
+ divQ = - omega * (mur/mu - phi) * I4H / mu + omega * gamma * H
+ Ht = (Cp*2.0*pi/Tp) * cos(2.0*pi*t/Tp) * f
+ M = Ht + divQ
+
+ ! compute compensatory heating
+ nut = Ht/nusqrt
+ dTt = Ts * ((nut+Ht)*(nu+z)-(nu+H)*nut) * (nu+z)**(-2)
+ Tr = Tsr*(nu+H)/(nu+z) + Ts * ((nur+Hr)*(nu+z)-(nu+H)*nur) * (nu+z)**(-2)
+ Tz = -Ts * (nu+H) * (nu+z)**(-2)
+ Tzz = 2.0 * Ts * (nu+H) * (nu+z)**(-3)
+ Sigc = dTt + U*Tr + w*Tz - Kcond*Tzz - Sig
+ end subroutine model_exact
+end module testsFG
diff --git a/components/cism/glimmer-cism/libdycore/BISICLES/BisiclesToGlimmer.H b/components/cism/glimmer-cism/libdycore/BISICLES/BisiclesToGlimmer.H
new file mode 100644
index 0000000000..5bed43bdf2
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/BISICLES/BisiclesToGlimmer.H
@@ -0,0 +1,35 @@
+// The DyCoreToGlimmer class provides methods to move Glimmer Fortran data to C++ structures
+// for access by the Chombo-based BISICLES model. The structure names and structure member
+// names mostly correspond to derived types defined in Glimmer. In general, pointers to
+// the Fortran data arrays are used, rather than copies of these arrays. This saves space
+// and reduces the steps needed to update the Glimmer data between calls to the BISICLES
+// ice sheet modeling program. Methods are provided to set these array pointers, and
+// copy array dimension information. Objects of this class are accessed by extern C
+// routines in bike_to_glim_extern.cpp, and by the BISICLES front end. DMR--5/24/10
+
+#ifndef BISICLESTOGLIMMER
+#define BISICLESTOGLIMMER
+
+
+#include
+#include
+#include "bike_driver.H"
+#include "../DyCoreToGlimmer.H"
+
+
+class BisiclesToGlimmer : public DyCoreToGlimmer
+{
+ private:
+ // AmrIce bisicles_object;
+
+ public:
+
+ //DynCoreToGlimmer BisiclesToGlimmer();
+ int initDyCore(const char * input_fname);
+ // cur_time_yr is updated in place as solution is evolved
+ int runDyCore(double& cur_time_yr, const double time_inc_yr);
+ int deleteDyCore();
+
+};
+
+#endif
diff --git a/components/cism/glimmer-cism/libdycore/BISICLES/BisiclesToGlimmer.cpp b/components/cism/glimmer-cism/libdycore/BISICLES/BisiclesToGlimmer.cpp
new file mode 100644
index 0000000000..9122389c4a
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/BISICLES/BisiclesToGlimmer.cpp
@@ -0,0 +1,54 @@
+// The DyCoreToGlimmer class provides methods to move Glimmer Fortran data to C++ structures
+// for access by the Chombo-based BISICLES model. The structure names and structure member
+// names mostly correspond to derived types defined in Glimmer. In general, pointers to
+// the Fortran data arrays are used, rather than copies of these arrays. This saves space
+// and reduces the steps needed to update the Glimmer data between calls to the BISICLES
+// ice sheet modeling program. Methods are provided to set these array pointers, and
+// copy array dimension information. Objects of this class are accessed by extern C
+// routines in bike_to_glim_extern.cpp, and by the BISICLES front end. DMR--5/24/10
+
+#include "BisiclesToGlimmer.H"
+
+
+using namespace std;
+
+
+int
+BisiclesToGlimmer::initDyCore(const char * input_fname)
+{
+
+ // long * dimInfo;
+
+ cout << "In BISICLES initDyCore" << endl;
+ // dimInfo = this -> getLongVar("dimInfo","geometry");
+
+
+ // cout << "DimInfo in initDyCore: " << endl;
+ // for (i=0;i<10;i++) cout << dimInfo[i] << " ";
+ // cout << "In BISICLES initDyCore, calling bike_driver_inin:" << endl;
+ bike_driver_init(2,0,this,input_fname);
+ return 0; // ought to make sensible use of this.
+
+}
+
+// updates cur_time_yr to match time update in dycore
+int
+BisiclesToGlimmer::runDyCore(double& cur_time_yr, const double time_inc_yr)
+{
+ cout << "In BISICLES runDyCore" << endl;
+ bike_driver_run(this,cur_time_yr,time_inc_yr);
+ return 0; // ought to make sensible use of this.
+}
+
+int
+BisiclesToGlimmer::deleteDyCore()
+{
+ bike_driver_finalize(this -> getDyCoreIndex());
+ return 0; // ought to make sensible use of this.
+}
+
+//int storeBisiclesObject(AmrIce bisicles_object)
+//{}
+
+//AmrIce retrieveBisiclesObject()
+//{}
diff --git a/components/cism/glimmer-cism/libdycore/CHANGELOG b/components/cism/glimmer-cism/libdycore/CHANGELOG
new file mode 100644
index 0000000000..a80c0cde98
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/CHANGELOG
@@ -0,0 +1,9 @@
+Changes in libdycore and its subdirectories:
+
+10/4/10 (DFM)
+(a) BISICLES/BisiclesToGlimmer.[H,cpp] -- removed hardwired path to bike_driver.H
+
+(b) dycore_to_glimmer_extern.cpp -- included and in order to compile on my machine here at LBL
+
+(c) DyCoreToGlimmer.cpp -- include
+
diff --git a/components/cism/glimmer-cism/libdycore/CMakeLists.txt b/components/cism/glimmer-cism/libdycore/CMakeLists.txt
new file mode 100644
index 0000000000..e648d5d00b
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/CMakeLists.txt
@@ -0,0 +1,19 @@
+# libdycore build
+
+FILE(GLOB DYCORE_SOURCES *.cpp *.cpp)
+FILE(GLOB DYCORE_HEADERS *.H *.H)
+
+IF (CISM_ENABLE_BISICLES)
+ FILE(GLOB BISICLES_SOURCES *.cpp BISICLES/*.cpp)
+ FILE(GLOB BISICLES_HEADERS *.H BISICLES/*.H)
+ENDIF()
+
+add_library(DyCoreToGlimmer ${DYCORE_SOURCES} ${DYCORE_HEADERS}
+ ${BISICLES_SOURCES} ${BISICLES_HEADERS})
+
+# Need include directories from Trilinos but also mod files from glimmer
+
+include_directories (${BISICLES_INTERFACE_DIR})
+
+
+
diff --git a/components/cism/glimmer-cism/libdycore/DyCoreModelRegistry.H b/components/cism/glimmer-cism/libdycore/DyCoreModelRegistry.H
new file mode 100644
index 0000000000..67cd0c355f
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/DyCoreModelRegistry.H
@@ -0,0 +1,50 @@
+// DyCoreModelRegistry is used to store multiple dynamic core models
+// without using global variables, and allowing retrieval by an
+// integer model index. --DMR 5/24/10
+
+#include "DyCoreToGlimmer.H"
+//IK, 8/6/13: added some ifdefs here to allow for other external dycores than just bisicles
+
+#ifdef CISM_HAS_BISICLES
+#include "BISICLES/BisiclesToGlimmer.H"
+#endif
+//#include "Ymir/YmirToGlimmer.H"
+//IK, 8/6/13: added the following for FELIX as external dycore
+#ifdef CISM_HAS_FELIX
+#include "FELIX/FelixToGlimmer.H"
+#endif
+
+#ifndef DYCORE_MODEL_REGISTRY
+#define DYCORE_MODEL_REGISTRY
+
+#define DYCORE_MODEL_COUNT 10
+
+class DyCoreModelRegistry
+{
+ private:
+
+ struct RegistryEntry {
+ DyCoreToGlimmer * dycore_to_glimmer;
+ int dycore_type; // 0=BISICLES, 1=Ymir, 2=FELIX
+ int my_reg_index;
+ int dycore_present;
+ } entry[DYCORE_MODEL_COUNT];
+
+ int cur_model_count;
+
+
+ public:
+
+ DyCoreModelRegistry();
+ int ClearRegistryEntries();
+ int ClearRegistryEntry(int index);
+ DyCoreToGlimmer * getDyCoreToGlimmerByIndex(int index);
+ //int * getDyCoreToGlimmerByIndex(int index);
+ int getModelCount();
+ int incModelCount();
+ int setDyCoreByType(int index,int dyncore_type);
+ int setRegistryIndex(int index);
+ int getRegistryIndex(int index);
+};
+
+#endif
diff --git a/components/cism/glimmer-cism/libdycore/DyCoreModelRegistry.cpp b/components/cism/glimmer-cism/libdycore/DyCoreModelRegistry.cpp
new file mode 100644
index 0000000000..d13dc0457a
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/DyCoreModelRegistry.cpp
@@ -0,0 +1,126 @@
+// DyCoreModelRegistry is used to store multiple dynamic core models
+// without using global variables, and allowing retrieval by an
+// integer model index. --DMR 5/24/10
+
+// 4/9/12 (DMR) commented out Ymir object creation because of a build
+// problem on hopper.nersc.gov
+
+#include
+#include
+#include
+#include "DyCoreModelRegistry.H"
+
+using namespace std;
+
+// RegistryEntry entry[BISICLES_MODEL_COUNT];
+
+DyCoreModelRegistry::DyCoreModelRegistry()
+{
+ cur_model_count = 0;
+}
+
+int
+DyCoreModelRegistry::ClearRegistryEntries()
+{
+ int i;
+
+ for (i=0;i deleteDyCore();
+ delete entry[index].dycore_to_glimmer;
+ }
+ entry[index].dycore_present = 0;
+ return(0);
+}
+
+DyCoreToGlimmer *
+DyCoreModelRegistry::getDyCoreToGlimmerByIndex(int index)
+{
+
+ // cout << index << " Registry entry dycore type: " << entry[index].dycore_type << endl;
+ //IK, 8/6/13: added error checking here so that if entry[index].dycore_to_glimmer is has not been set
+ //properly in setDyCoreByType, the code will abort.
+ if ((entry[index].dycore_to_glimmer) == NULL) {
+ fprintf(stderr, "Error in DyCoreModelRegistry::getDyCoreToGlimmerByIndex: external dycore not set properly in setDyCoreByType. \n");
+ fprintf(stderr, "Aborting... \n");
+ exit(1);
+ }
+ return((DyCoreToGlimmer *) entry[index].dycore_to_glimmer);
+}
+
+int
+DyCoreModelRegistry::setDyCoreByType(int index,int dycore_type)
+{
+ entry[index].dycore_type = dycore_type;
+ entry[index].dycore_present = 1;
+
+ switch (entry[index].dycore_type) {
+ case 0:
+ entry[index].dycore_to_glimmer = NULL;
+ break;
+ case 1: // BISICLES dycore
+//IK, 8/6/13: added ifdefs here to allow multiple external dycores
+#ifdef CISM_HAS_BISICLES
+ entry[index].dycore_to_glimmer = new BisiclesToGlimmer;
+#else
+ entry[index].dycore_to_glimmer = NULL;
+#endif
+ break;
+ case 2: //IK, 8/6/13: added case for FELIX dycore
+#ifdef CISM_HAS_FELIX
+ entry[index].dycore_to_glimmer = new FelixToGlimmer;
+#else
+ entry[index].dycore_to_glimmer = NULL;
+#endif
+ break;
+ case 3:
+ //entry[index].dycore_to_glimmer = new YmirToGlimmer;
+ break;
+
+ default: entry[index].dycore_to_glimmer = NULL;
+ break;
+ }
+ return(0);
+}
+
+
+int
+DyCoreModelRegistry::getModelCount()
+{
+ return(cur_model_count);
+}
+
+int
+DyCoreModelRegistry::incModelCount()
+{
+ cur_model_count++;
+ return(0);
+}
+
+int
+DyCoreModelRegistry::setRegistryIndex(int index)
+{
+ entry[index].my_reg_index = index;
+ return(0);
+}
+
+int
+DyCoreModelRegistry::getRegistryIndex(int index)
+{
+ return(entry[index].my_reg_index);
+}
diff --git a/components/cism/glimmer-cism/libdycore/DyCoreToGlimmer.H b/components/cism/glimmer-cism/libdycore/DyCoreToGlimmer.H
new file mode 100644
index 0000000000..1b7ba47ddd
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/DyCoreToGlimmer.H
@@ -0,0 +1,147 @@
+// The DyCoreToGlimmer class provides methods to move Glimmer Fortran data to C++ structures
+// for access by C++ based dynamical cores. The structure names and structure member
+// names mostly correspond to derived types defined in Glimmer. In general, pointers to
+// the Fortran data arrays are used, rather than copies of these arrays. This saves space
+// and reduces the steps needed to update the Glimmer data between calls to the core
+// ice sheet modeling program. Methods are provided to set these array pointers, and
+// copy array dimension information. Objects of this class are accessed by extern C
+// routines in dycore_to_glimmer_extern.cpp, and by the dynamical core front end. DMR--5/24/10
+
+//#pragma once
+#ifndef DYCORETOGLIMMER
+#define DYCORETOGLIMMER
+
+#include
+#include
+
+
+class DyCoreToGlimmer
+{
+
+ private:
+
+ // Keep track of dycore_type and dycore_index. Dycore_index is used to index the
+ // the external dycore object storage array.
+ struct {
+ int dycore_type;
+ int dycore_index;
+ } dycore_info;
+
+ // The following structures are based on the derived types in glide_types.F90
+
+ struct {
+ double * thck;
+ double * usrf;
+ double * lsrf;
+ double * topg;
+ double * floating_mask;
+ double * ice_mask;
+ double * lower_cell_loc; // z-location of lowest cell-center
+ double * lower_cell_temp; // temperature in lowest cell
+ long * dimInfo;
+ long * ewlb;
+ long * ewub;
+ long * nslb;
+ long * nsub;
+ long * nhalo;
+
+ //double * thkmask;
+ //double * marine_bc_normal;
+
+ } geometry;
+
+ struct {
+ double * uvel; //output
+ double * vvel; //output
+ double * wvel;
+ double * wgrd;
+ double * btrc; // basal traction coefficient
+ long * dimInfo;
+ } velocity;
+
+ struct {
+ double * temp; // Three-dimensional temperature field.
+ double * bheatflx; // basal heat flux (2D)
+ double * bmlt; // Basal melt-rate
+ long * dimInfo;
+ } temper;
+
+ struct {
+
+ } lithot_type;
+
+ struct {
+ double * tstart;
+ double * tend;
+ double * time;
+ //double * tinc;
+
+ double * dew; // ew cell size
+ double * dns; // ns cell size
+
+ } numerics;
+
+ struct {
+ double * acab; // Annual mass balance.
+ double * acab_tavg; // Annual mass balance (time average)
+ double * calving; // Calving flux (scaled as mass balance, thickness, etc)
+ long * dimInfo;
+ double * eus; // eustatic sea level
+ } climate;
+
+ struct {
+ double * beta; // basal shear coefficient
+ double * btraction; // -dir (1,:,:) and y-dir (2,:,:) "consistent" basal
+ // traction fields (calculated from matrix coeffs)
+ long dimInfo;
+ } velocity_hom;
+
+ struct {
+ double seconds_per_year;
+ double gravity;
+ double rho_ice;
+ double rho_seawater;
+ double therm_diffusivity_ice;
+ double heat_capacity_ice;
+ } constants;
+
+
+ struct {
+ long * communicator;
+ long * process_count;
+ long * my_rank;
+ } mpi_vars;
+
+ public:
+
+ DyCoreToGlimmer();
+ virtual ~DyCoreToGlimmer();
+
+
+ int setDoubleVar( double *var, const char *var_name, const char *struct_name);
+ double * getDoubleVar( const char *var_name, const char *struct_name);
+
+ int setLongVar( long * var, const char * var_name, const char *struct_name);
+ long * getLongVar( const char * var_name, const char *struct_name);
+
+ int setInt4Var( int * var, const char * var_name, const char *struct_name);
+ int * getInt4Var( const char * var_name, const char *struct_name);
+
+ int copyInDoubleVar( const double *var, const char *var_name,
+ const char *struct_name, const long *var_dim_info);
+ int copyInLongVar( const long *var, const char *var_name,
+ const char *struct_name, const long *var_dim_info);
+
+ virtual int initDyCore(const char *input_fname); // = 0;
+ virtual int runDyCore(double& cur_time_yr, const double time_inc_yr); // = 0;
+ virtual int deleteDyCore(); // = 0;
+
+ int setDyCoreType(const int dycore_type);
+ int getDyCoreType();
+
+ int setDyCoreIndex(const int dycore_index);
+ int getDyCoreIndex();
+
+};
+
+#endif
diff --git a/components/cism/glimmer-cism/libdycore/DyCoreToGlimmer.cpp b/components/cism/glimmer-cism/libdycore/DyCoreToGlimmer.cpp
new file mode 100644
index 0000000000..aa3810e67e
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/DyCoreToGlimmer.cpp
@@ -0,0 +1,585 @@
+// The DyCoreToGlimmer class provides methods to move Glimmer Fortran data to C++ structures
+// for access by the Chombo-based BISICLES model. The structure names and structure member
+// names mostly correspond to derived types defined in Glimmer. In general, pointers to
+// the Fortran data arrays are used, rather than copies of these arrays. This saves space
+// and reduces the steps needed to update the Glimmer data between calls to the BISICLES
+// ice sheet modeling program. Methods are provided to set these array pointers, and
+// copy array dimension information. Objects of this class are accessed by extern C
+// routines in dycore_to_glimmer_extern.cpp, and by the BISICLES front end. DMR--5/24/10
+
+#include "DyCoreToGlimmer.H"
+#include
+
+using namespace std;
+
+//DyCoreToGlimmer::DyCoreToGlimmer(int dycore_type)
+DyCoreToGlimmer::DyCoreToGlimmer()
+{
+ // initialize all pointerrs to null
+ // geometry...
+ geometry.thck = NULL;
+ geometry.usrf = NULL;
+ geometry.lsrf = NULL;
+ geometry.topg = NULL;
+ geometry.floating_mask = NULL;
+ geometry.ice_mask = NULL;
+ geometry.lower_cell_loc = NULL;
+ geometry.lower_cell_temp = NULL;
+ geometry.dimInfo = NULL;
+ geometry.ewlb = NULL;
+ geometry.ewub = NULL;
+ geometry.nslb = NULL;
+ geometry.nsub = NULL;
+ geometry.nhalo = NULL;
+
+ // velocity
+ velocity.uvel = NULL; //output
+ velocity.vvel = NULL; //output
+ velocity.wvel = NULL;
+ velocity.wgrd = NULL;
+ velocity.btrc = NULL; // basal traction coefficient
+ velocity.dimInfo = NULL;
+
+ // temper
+ temper.temp = NULL; // Three-dimensional temperature field.
+ temper.bheatflx = NULL; // basal heat flux (2D)
+ temper.bmlt = NULL; // Basal melt-rate
+
+ // numerics
+ numerics.tstart = NULL; // start time
+ numerics.tend = NULL; // end time
+ numerics.time = NULL; // current time
+
+ numerics.dew = NULL; // ew cell size
+ numerics.dns = NULL; // ns cell size
+
+ // constants are doubles, not pointers. Set to nonphysical values
+ double bogusVal = -1.2345e10;
+ constants.seconds_per_year = bogusVal;
+ constants.gravity = bogusVal;
+ constants.rho_ice = bogusVal;
+ constants.rho_seawater = bogusVal;
+ constants.therm_diffusivity_ice = bogusVal;
+ constants.heat_capacity_ice = bogusVal;
+
+
+ // climate
+ climate.acab = NULL; // Annual mass balance.
+ climate.acab_tavg = NULL; // Annual mass balance (time average)
+ climate.calving = NULL; // Calving flux (scaled as mass balance, thickness,)
+ climate.dimInfo = NULL;
+ climate.eus = NULL; // eustatic sea level
+
+}
+
+DyCoreToGlimmer::~DyCoreToGlimmer()
+{
+ // cout << "Init DyCoreToGlimmer" << endl;
+}
+
+int
+DyCoreToGlimmer::setDoubleVar(double *var, const char *var_name, const char *struct_name)
+{
+ //cout << "struct_name::" << struct_name << "::" << endl;
+
+ if (strcmp(struct_name,"geometry") == 0) {
+ if (strcmp(var_name,"thck") == 0) geometry.thck = var;
+ else if (strcmp(var_name,"topg") == 0) geometry.topg = var;
+ else if (strcmp(var_name,"usrf") == 0) geometry.usrf = var;
+ else if (strcmp(var_name,"lsrf") == 0) geometry.lsrf = var;
+ else if (strcmp(var_name,"floating_mask") == 0) geometry.floating_mask = var;
+ else if (strcmp(var_name,"ice_mask") == 0) geometry.ice_mask = var;
+ else if (strcmp(var_name,"lower_cell_loc") == 0) geometry.lower_cell_loc = var;
+ else if (strcmp(var_name,"lower_cell_temp") == 0) geometry.lower_cell_temp = var;
+ else cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+
+ else if (strcmp(struct_name,"velocity") == 0) {
+ if (strcmp(var_name,"uvel") == 0) velocity.uvel = var;
+ else if (strcmp(var_name,"vvel") == 0) velocity.vvel = var;
+ else if (strcmp(var_name,"wvel") == 0) velocity.wvel = var;
+ else if (strcmp(var_name,"wgrd") == 0) velocity.wgrd = var;
+ else if (strcmp(var_name,"btrc") == 0) velocity.btrc = var;
+ else cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+
+
+ }
+
+
+
+ else if (strcmp(struct_name,"temper") == 0) {
+ if (strcmp(var_name,"temp") == 0) temper.temp = var;
+ else if (strcmp(var_name,"bheatflx") == 0) temper.bheatflx = var;
+ else if (strcmp(var_name,"bmlt") == 0) temper.bmlt = var;
+ else cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ else if (strcmp(struct_name,"numerics") == 0) {
+ if (strcmp(var_name,"tstart") == 0) numerics.tstart = var;
+ else if (strcmp(var_name,"tend") == 0) numerics.tend = var;
+ else if (strcmp(var_name,"time") == 0) numerics.time = var;
+ else cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ else if (strcmp(struct_name,"climate") == 0) {
+ if (strcmp(var_name,"acab") == 0) climate.acab = var;
+ else if (strcmp(var_name,"acab_tavg") == 0) climate.acab_tavg = var;
+ else if (strcmp(var_name,"calving") == 0) climate.calving = var;
+ else cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ else {
+ cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ return(0);
+}
+
+double *
+DyCoreToGlimmer::getDoubleVar(const char *var_name, const char *struct_name)
+{
+
+ double * var=0;
+
+ //cout << "struct_name::" << struct_name << "::" << endl;
+
+ if (strcmp(struct_name,"geometry") == 0) {
+ if (strcmp(var_name,"thck") == 0)
+ {
+ return(geometry.thck);
+ }
+ else if (strcmp(var_name,"topg") == 0)
+ {
+ return(geometry.topg);
+ }
+ else if (strcmp(var_name,"usrf") == 0)
+ {
+ return(geometry.usrf);
+ }
+ else if (strcmp(var_name,"lsrf") == 0)
+ {
+ return(geometry.lsrf);
+ }
+ else if (strcmp(var_name,"floating_mask") == 0)
+ {
+ return(geometry.floating_mask);
+ }
+ else if (strcmp(var_name,"ice_mask") == 0)
+ {
+ return(geometry.ice_mask);
+ }
+ else if (strcmp(var_name,"lower_cell_loc") == 0)
+ {
+ return(geometry.lower_cell_loc);
+ }
+ else if (strcmp(var_name,"lower_cell_temp") == 0)
+ {
+ return(geometry.lower_cell_temp);
+ }
+ else
+ {
+ cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ }
+ else if (strcmp(struct_name,"numerics") == 0) {
+ if (strcmp(var_name,"dew") == 0) return(numerics.dew);
+ else if (strcmp(var_name,"dns") == 0) return(numerics.dns);
+ else if (strcmp(var_name,"tstart") == 0) return(numerics.tstart);
+ else if (strcmp(var_name,"tend") == 0) return(numerics.tend);
+ else if (strcmp(var_name,"time") == 0) return(numerics.time);
+ else {
+ cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ }
+ else if (strcmp(struct_name,"constants") == 0) {
+ if (strcmp(var_name,"seconds_per_year") == 0) return(&constants.seconds_per_year);
+ else if (strcmp(var_name,"gravity") == 0) return(&constants.gravity);
+ else if (strcmp(var_name,"rho_ice") == 0) return(&constants.rho_ice);
+ else if (strcmp(var_name,"rho_seawater") == 0) return(&constants.rho_seawater);
+ else if (strcmp(var_name,"therm_diffusivity_ice") == 0) return(&constants.therm_diffusivity_ice);
+ else if (strcmp(var_name,"heat_capacity_ice") == 0) return(&constants.heat_capacity_ice);
+ else {
+ cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ }
+ else if (strcmp(struct_name,"velocity") == 0) {
+ if (strcmp(var_name,"btrc") == 0) return (velocity.btrc);
+ else if (strcmp(var_name,"uvel") == 0) return (velocity.uvel);
+ else if (strcmp(var_name,"vvel") == 0) return (velocity.vvel);
+ else if (strcmp(var_name,"wvel") == 0) return (velocity.wvel);
+ else if (strcmp(var_name,"wgrd") == 0) return (velocity.wgrd);
+ else {
+ cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ //cout << "Set velocity var, " << var_name << endl;
+ }
+
+ else if (strcmp(struct_name,"temper") == 0) {
+ if (strcmp(var_name,"temp") == 0) var = temper.temp;
+ else if (strcmp(var_name,"bheatflx") == 0) var = temper.bheatflx;
+ else if (strcmp(var_name,"bmlt") == 0) var = temper.bmlt;
+ else {
+ cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ }
+
+ else if (strcmp(struct_name,"climate") == 0) {
+ if (strcmp(var_name,"acab") == 0) var = climate.acab;
+ else {
+ cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ }
+ else {
+ cerr << "unknown variable type = " << struct_name
+ << "." << var_name << " undefined!" << endl;
+ }
+ return(var);
+}
+
+
+int
+DyCoreToGlimmer::setLongVar(long * var, const char *var_name, const char *struct_name)
+{
+ if (strcmp(struct_name,"geometry") == 0) {
+ if (strcmp(var_name,"dimInfo") == 0) geometry.dimInfo = var;
+ }
+ else if (strcmp(struct_name,"velocity") == 0) {
+ //cout << "Set velocity var, " << var_name << endl;
+ }
+ else {
+ cerr << "unknown longVar type = " << struct_name
+ << "." << var_name << endl;
+ }
+ return(0);
+}
+
+
+
+long *
+DyCoreToGlimmer::getLongVar( const char *var_name, const char *struct_name)
+{
+ long * var;
+
+ if (strcmp(struct_name,"geometry") == 0) {
+ if (strcmp(var_name,"dimInfo") == 0) var = geometry.dimInfo;
+ else if (strcmp(var_name,"ewlb") == 0) var = geometry.ewlb;
+ else if (strcmp(var_name,"ewub") == 0) var = geometry.ewub;
+ else if (strcmp(var_name,"nslb") == 0) var = geometry.nslb;
+ else if (strcmp(var_name,"nsub") == 0) var = geometry.nsub;
+ else if (strcmp(var_name,"nhalo") == 0) var = geometry.nhalo;
+ else
+ {
+ cerr << "unknonwn variable " << var_name << " in "
+ << struct_name << endl;
+ }
+ }
+ else if (strcmp(struct_name,"mpi_vars") == 0) {
+ if (strcmp(var_name,"communicator") == 0) var = mpi_vars.communicator;
+ else if (strcmp(var_name,"process_count") == 0) var = mpi_vars.process_count;
+ else if (strcmp(var_name,"my_rank") == 0) var = mpi_vars.my_rank;
+ else
+ {
+ cerr << "unknonwn variable " << var_name << " in "
+ << struct_name << endl;
+ }
+ }
+
+ else if (strcmp(struct_name,"velocity") == 0) {
+ if (strcmp(var_name,"dimInfo") == 0) var = velocity.dimInfo;
+ else
+ {
+ cerr << "unknonwn variable " << var_name << " in "
+ << struct_name << endl;
+ }
+ }
+
+ else if (strcmp(struct_name,"climate") == 0) {
+ if (strcmp(var_name,"dimInfo") == 0) var = climate.dimInfo;
+ else
+ {
+ cerr << "unknonwn variable " << var_name << " in "
+ << struct_name << endl;
+ }
+ }
+
+ return(var);
+}
+
+
+int
+DyCoreToGlimmer::setInt4Var(int * var, const char *var_name, const char *struct_name)
+{
+ // cout << "struct_name::" << struct_name << "::" << endl;
+
+ if (strcmp(struct_name,"felix_struct_name") == 0) {
+ // if (strcmp(var_name,"dimInfo") == 0) geometry.dimInfo = var;
+ }
+ else if (strcmp(struct_name,"velocity") == 0) {
+ //cout << "Set velocity var, " << var_name << endl;
+ }
+ else {
+ cerr << "unknown int4Var type = " << struct_name
+ << "." << var_name << endl;
+ }
+ return(0);
+}
+
+
+int *
+DyCoreToGlimmer::getInt4Var( const char *var_name, const char *struct_name)
+{
+ int * var;
+
+ if (strcmp(struct_name,"felix_struct_name") == 0) {
+ if (strcmp(var_name,"dimInfo") == 0) var = 0;
+ else
+ {
+ cerr << "unknonwn variable " << var_name << " in "
+ << struct_name << endl;
+ }
+ }
+
+ return(var);
+}
+
+
+
+int
+DyCoreToGlimmer::copyInDoubleVar( const double *var, const char *var_name,
+ const char *struct_name, const long *var_dim_info)
+{
+ long elem_count=1;
+ long i;
+
+ // std::cout << "copyInDoubleVar " << var_name << " = " << *var << std::endl;
+
+ for (i=1;i<=var_dim_info[0];i++) elem_count *= var_dim_info[i];
+
+ //cout << "struct_name::" << struct_name << "::" << endl;
+ if (strcmp(struct_name,"geometry") == 0) {
+ if (strcmp(var_name,"dimInfo") == 0) {
+
+ }
+ }
+
+ if (strcmp(struct_name,"velocity") == 0) {
+
+ }
+
+ if (strcmp(struct_name,"numerics") == 0) {
+ if (strcmp(var_name,"dew") == 0)
+ {
+ numerics.dew = new double[elem_count];
+ for (i=0;i getLongVar("dimInfo","geometry");
+
+
+ // cout << "DimInfo in initDyCore: " << endl;
+ // for (i=0;i<10;i++) cout << dimInfo[i] << " ";
+ // cout << "In FELIX initDyCore, calling felix_driver_inin:" << endl;
+ felix_driver_init(2,0,this,input_fname);
+ return 0; // ought to make sensible use of this.
+
+}
+
+// updates cur_time_yr to match time update in dycore
+int
+FelixToGlimmer::runDyCore(float& cur_time_yr, const float time_inc_yr)
+{
+ cout << "In FELIX runDyCore" << endl;
+ felix_driver_run(this,cur_time_yr,time_inc_yr);
+ return 0; // ought to make sensible use of this.
+}
+
+int
+FelixToGlimmer::deleteDyCore()
+{
+ felix_driver_finalize(this -> getDyCoreIndex());
+ return 0; // ought to make sensible use of this.
+}
+
+//int storeFelixObject(AmrIce bisicles_object)
+//{}
+
+//AmrIce retrieveFelixObject()
+//{}
diff --git a/components/cism/glimmer-cism/libdycore/README b/components/cism/glimmer-cism/libdycore/README
new file mode 100644
index 0000000000..431438ea0d
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/README
@@ -0,0 +1,14 @@
+The libdycore directory contains routines that allow CISM to be connected to
+external dynamic cores written in C++. Currently, an interface to the
+BISICLES dycore is being developed. A prototype driver that uses the BISICLES
+dycore can be found in example-drivers/simple_bisicles/src. Build instructions
+are located in that directory. A typical build of the BISICLES/libDyCoreToGlimmer.a
+library can be performed using run_make. Building BISICLES requires access to the
+BISICLES and Chombo installations. For more information on BISICLES and Chombo,
+please send email to dfmartin@lbl.gov or ranken@lanl.gov. Some additional information
+can also be found in glimmer_to_dycore.info, though this file is out-of-date.
+
+For a description of BISICLES, see:
+https://seesar.lbl.gov/anag/staff/martin/talks/Martin-LIWG-Jan2011_final.pdf.
+
+Last Revised: 04/19/12 DMR
diff --git a/components/cism/glimmer-cism/libdycore/Ymir/YmirToGlimmer.H b/components/cism/glimmer-cism/libdycore/Ymir/YmirToGlimmer.H
new file mode 100644
index 0000000000..912e5c8eeb
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/Ymir/YmirToGlimmer.H
@@ -0,0 +1,15 @@
+#include "../DyCoreToGlimmer.H"
+
+#ifndef YMIR_TO_GLIMMER
+#define YMIR_TO_GLIMMER
+
+class YmirToGlimmer : public DyCoreToGlimmer
+{
+ public:
+
+ // int initDyCore();
+ // int runDyCore();
+ // int deleteDyCore();
+};
+
+#endif
diff --git a/components/cism/glimmer-cism/libdycore/Ymir/YmirToGlimmer.cpp b/components/cism/glimmer-cism/libdycore/Ymir/YmirToGlimmer.cpp
new file mode 100644
index 0000000000..d089053f3f
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/Ymir/YmirToGlimmer.cpp
@@ -0,0 +1,22 @@
+#include "YmirToGlimmer.H"
+
+using namespace std;
+
+int
+YmirToGlimmer::initDyCore()
+{
+ cout << "In Ymir initDyCore" << endl;
+}
+
+int
+YmirToGlimmer::runDyCore()
+{
+ cout << "In Ymir runDyCore" << endl;
+}
+
+int
+YmirToGlimmer::deleteDyCore()
+{
+
+}
+
diff --git a/components/cism/glimmer-cism/libdycore/dycore_stubs/glimmer_to_dycore_stubs.F90 b/components/cism/glimmer-cism/libdycore/dycore_stubs/glimmer_to_dycore_stubs.F90
new file mode 100644
index 0000000000..72959b92a0
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/dycore_stubs/glimmer_to_dycore_stubs.F90
@@ -0,0 +1,49 @@
+! The glimmer_to_dycore stubs module contains stubs for the Fortran side of the Glimmer-DyCore
+! interface. It uses the routines in dycore_to_glim_extern.cpp to create one
+! or more instances of a dynamic core ice sheet model. The dycore_model_index is
+! the only parameter needed by glimmer_to_dycore subroutines to interact with a
+! specific instance of a dynamic core model. DMR--5/24/10
+
+module glimmer_to_dycore
+ !*FD glimmer_to_dycore contains Fortran routines to couple Glimmer to a
+ ! dynamic core model.
+ use glide_types
+ use simple_forcing
+
+ contains
+
+ subroutine gtd_init_dycore_interface(model,dycore_type,dycore_model_index)
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index, error_code
+ integer*4 dycore_type ! 0=BISICLES, 1=Ymir
+
+! call dycore_init_registry()
+! call dycore_init_model(dycore_type,dycore_model_index,error_code)
+! call gtd_set_geometry_vars(model,dycore_model_index)
+
+! print *,"In init_dycore_interface, dycore_type = ",dycore_type
+! print *,"In init_dycore_interface, dycore1 = ",dycore_model_index
+ end subroutine gtd_init_dycore_interface
+
+ subroutine gtd_run_dycore(dycore_model_index)
+ integer*4 dycore_model_index
+
+! call dycore_run_model(dycore_model_index)
+ end subroutine gtd_run_dycore
+
+ subroutine gtd_set_geometry_vars(model,dycore_model_index)
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index
+
+ end subroutine gtd_set_geometry_vars
+
+
+ subroutine gtd_set_velocity_vars(model,dycore_model_index)
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index
+
+ end subroutine gtd_set_velocity_vars
+
+
+
+end module glimmer_to_dycore
diff --git a/components/cism/glimmer-cism/libdycore/dycore_to_glimmer_extern.H b/components/cism/glimmer-cism/libdycore/dycore_to_glimmer_extern.H
new file mode 100644
index 0000000000..5a5ffc0177
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/dycore_to_glimmer_extern.H
@@ -0,0 +1,4 @@
+#include "DyCoreModelRegistry.H"
+
+int dycore_registry(int init,int get_model_by_index,int * model_index,
+ DyCoreToGlimmer ** dycore_to_glimmer_ptr,int dycore_type);
diff --git a/components/cism/glimmer-cism/libdycore/dycore_to_glimmer_extern.cpp b/components/cism/glimmer-cism/libdycore/dycore_to_glimmer_extern.cpp
new file mode 100644
index 0000000000..6633f0650b
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/dycore_to_glimmer_extern.cpp
@@ -0,0 +1,264 @@
+// dyncore_to_glimmer_extern.cpp contains the extern C routines used to provide an interface
+// between the Fortran based Glimmer and the C++ based BISICLES. These routines access the
+// dycore_registry routine to create and use DyCoreToGlimmer class objects. Since the registry
+// can contain multiple DyCoreToGlimmer objects, it allows multiple DYCORE models to be
+// instantiated and used by Glimmer. DMR--5/24/10
+
+#include
+#include
+#include
+#include
+#include "DyCoreModelRegistry.H"
+
+
+extern "C" {
+ void dycore_init_registry_();
+ void dycore_reset_registry_();
+ void dycore_get_new_model_(int * dycore_type,int * index,int * error_code);
+ void dycore_init_model_(int * dycore_type,int * index,char * input_fname,int * error_code);
+ void dycore_run_model_(int * model_index, double * cur_time_yr, double * time_inc_yr);
+ void dycore_delete_model_(int * dycore_model_index);
+
+ void dycore_set_ptr_double_var_(double *var, char *var_name_in,
+ char *struct_name_in, int *model_index);
+ void dycore_set_ptr_long8_var_(long *var, char *var_name,
+ char *struct_name, int *model_index);
+ void dycore_set_ptr_int4_var_(int *var, char *var_name,
+ char *struct_name, int *model_index);
+
+ void dycore_copy_in_double_var_(double *var, char *var_name, char *struct_name,
+ long *dim_info, int *model_index);
+ void dycore_copy_in_long_var_(long *var, char *var_name, char *struct_name,
+ long *dim_info, int *model_index);
+
+ void dycore_test_vel_input_(int *model_index,double *vel,char *var_name,int *dim_info);
+ void dycore_test_vel_output_(int *model_index,double *vel,char *var_name,int *dim_info);
+
+}
+
+using namespace std;
+
+
+// dycore_registry uses the DyCoreModelRegistry class to create a registry of DyCoreToGlimmer
+// objects that are interface instances between DYCORE and Glimmer. This is the only
+// routine in this file that accesses a DyCoreModelRegistry object. It is not accessed
+// directly from Fortran, but used by the extern routines in this file.
+int dycore_registry(int init,int get_model_by_index,int * model_index,
+ DyCoreToGlimmer ** dycore_to_glimmer_ptr,int dycore_type,int clear_entry)
+{
+ // this declaration initializes the registry, when dycore_registry
+ // is first called:
+ static DyCoreModelRegistry dmr;
+
+ if (init == 1){
+ cout << "Initializing Dycore Model Registry" << endl;
+ return(0);
+ }
+ if (init == -1){
+ dmr.ClearRegistryEntries();
+ cout << "Cleared Dycore Model Registry" << endl;
+ return(0);
+ }
+ if (clear_entry > 0) {
+ cout << "Calling ClearRegistryEntry, for entry: " << clear_entry << endl;
+ dmr.ClearRegistryEntry(clear_entry);
+ return(0);
+ }
+
+ if (get_model_by_index == 1) {
+ if (*model_index == -1) {
+ // if model_index=-1, initialize a new registry entry and
+ // obtain a new model index:
+ *model_index = dmr.getModelCount() + 1;
+
+ // init a dycore interface object, and add it to the registry:
+ dmr.setDyCoreByType(*model_index,dycore_type);
+
+ dmr.setRegistryIndex(*model_index);
+
+ if (*model_index > DYCORE_MODEL_COUNT) {
+ cout << "Error, exceeded DYCORE Registry limit of " <<
+ DYCORE_MODEL_COUNT << endl;
+ return(-1);
+ }
+ dmr.incModelCount();
+ return(0);
+ }
+ // get pointer to DyCoreToGlimmer object from registry:
+ *dycore_to_glimmer_ptr = dmr.getDyCoreToGlimmerByIndex(*model_index);
+
+ return(dmr.getRegistryIndex(*model_index));
+ }
+ return(0);
+}
+
+void dycore_init_registry_()
+{
+ DyCoreToGlimmer * dummy_dtg;
+ int init_registry = 1;
+
+ // initialize a registry if dycore model interfaces:
+ dycore_registry(init_registry,0,0,&dummy_dtg,0,0);
+}
+
+void dycore_reset_registry_()
+{
+ DyCoreToGlimmer * dummy_dtg;
+ int init_registry = -1; // set init_registry to clear registry
+
+ // initialize a registry if dycore model interfaces:
+ dycore_registry(init_registry,0,0,&dummy_dtg,0,0);
+}
+
+
+void dycore_get_new_model_(int * dycore_type,int * index,int * error_code)
+{
+ DyCoreToGlimmer * dtg;
+ int model_index=-1;
+
+ // cout << "In dycore_get_new_model_ , dycore_type = " << *dycore_type << endl;
+
+ // use *model_index=-1 to initialize a new registry entry:
+ *error_code = dycore_registry(0,1,&model_index,&dtg,*dycore_type,0);
+ *index = model_index;
+}
+
+void dycore_init_model_(int * dycore_type,int * model_index,char * input_fname,int * error_code)
+{
+ DyCoreToGlimmer * dtg;
+
+ // cout << "In dycore_init_model_ , dycore_type = " << *dycore_type << endl;
+
+ dycore_registry(0,1,model_index,&dtg,-1,0);
+
+ dtg -> setDyCoreType(*dycore_type);
+ dtg -> setDyCoreIndex(*model_index);
+ dtg -> initDyCore(input_fname);
+}
+
+void dycore_run_model_(int * model_index, double * cur_time_yr, double * time_inc_yr)
+{
+ DyCoreToGlimmer * dtg;
+
+ dycore_registry(0,1,model_index,&dtg,-1,0);
+
+ //cout << "In dycore_run_model, model_index = " << *model_index << endl;
+ //cout << "In drm, cur_time, time_inc = " << *cur_time_yr << " " << *time_inc_yr << endl;
+
+ dtg -> runDyCore(*cur_time_yr,*time_inc_yr);
+}
+
+void dycore_delete_model_(int * model_index)
+{
+ DyCoreToGlimmer * dtg;
+ int clear_entry;
+
+ clear_entry = *model_index;
+ dycore_registry(0,1,model_index,&dtg,-1,clear_entry);
+ // reg_index = dycore_registry(0,1,model_index,&dtg,-1);
+ // dtg -> deleteDyCore();
+}
+
+void dycore_set_ptr_double_var_(double *var, char *var_name,
+ char *struct_name, int *model_index)
+{
+ DyCoreToGlimmer * dtg;
+
+ dycore_registry(0,1,model_index,&dtg,-1,0);
+ dtg -> setDoubleVar(var,var_name,struct_name);
+}
+
+void dycore_set_ptr_long8_var_(long *var, char *var_name,
+ char *struct_name, int *model_index)
+{
+ DyCoreToGlimmer * dtg;
+
+ // cout << "var_name::" << var_name << "::" << endl;
+
+ dycore_registry(0,1,model_index,&dtg,-1,0);
+ dtg -> setLongVar(var,var_name,struct_name);
+}
+
+void dycore_set_ptr_int4_var_(int *var, char *var_name,
+ char *struct_name, int *model_index)
+{
+ DyCoreToGlimmer * dtg;
+
+ // cout << "var_name::" << var_name << "::" << endl;
+
+ dycore_registry(0,1,model_index,&dtg,-1,0);
+ dtg -> setInt4Var(var,var_name,struct_name);
+}
+
+void dycore_copy_in_double_var_(double *var, char *var_name, char *struct_name,
+ long *dim_info, int *model_index)
+{
+ DyCoreToGlimmer * dtg;
+
+ // cout << "In copy_in_double_var, var_name::" << var_name << "::" << endl;
+ std::cout << " dycore_copy_in_double_var_ " << var_name
+ << " = " << *var << std::endl;
+ dycore_registry(0,1,model_index,&dtg,-1,0);
+ dtg -> copyInDoubleVar(var,var_name,struct_name,dim_info);
+}
+
+void dycore_copy_in_long_var_(long *var, char *var_name, char *struct_name,
+ long *dim_info, int *model_index)
+{
+ DyCoreToGlimmer * dtg;
+
+ // cout << "In copy_long_var" << endl;
+ //cout << "struct_name::" << struct_name << "::" << endl;
+
+ dycore_registry(0,1,model_index,&dtg,-1,0);
+ dtg -> copyInLongVar(var,var_name,struct_name,dim_info);
+}
+
+
+void dycore_test_vel_input_(int *model_index,double *vel,char *var_name,
+ int * dim_info)
+{
+ int i, reg_index;
+ // double test_array[10];
+ DyCoreToGlimmer * dtg;
+ // double * var;
+
+ // cout << "test_vel_in, Calling dycore_registry" << endl;
+
+ reg_index = dycore_registry(0,1,model_index,&dtg,-1,0);
+
+ cout << "test_vel_in model_index compare: " << *model_index << " "
+ << reg_index << endl;
+
+ // if (*model_index == 1) (*dtg).set_velocity_data(vel,"uvel",dim_info);
+ // if (*model_index == 2) (*dtg).set_velocity_data(vel,"vvel",dim_info);
+
+ cout << "In vel input, var = " << var_name << ": ";
+ for (i=0;i<14;i++) cout << vel[i] << " ";
+ cout << endl;
+}
+
+void dycore_test_vel_output_(int *model_index,double *vel,char *var_name,
+ int * dim_info)
+{
+ int i;
+ // double test_array[10];
+ DyCoreToGlimmer * dtg;
+ double * var;
+
+ cout << "In test output, Model Index: " << *model_index << endl;
+
+ cout << "output: my_reg_index: " << dycore_registry(0,1,model_index,&dtg,-1,0) << endl;
+
+ if (*model_index == 1) {
+ // (*dtg).get_velocity_data(&var,"uvel",dim_info);
+ }
+
+ if (*model_index == 2) {
+ // (*dtg).get_velocity_data(&var,"vvel",dim_info);
+ }
+ cout << "In vel output, var = " << var_name << ": ";
+ for (i=0;i<14;i++) cout << var[i] << " ";
+ cout << endl;
+
+}
diff --git a/components/cism/glimmer-cism/libdycore/glimmer_to_dycore.F90 b/components/cism/glimmer-cism/libdycore/glimmer_to_dycore.F90
new file mode 100644
index 0000000000..7a7a2a50fc
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/glimmer_to_dycore.F90
@@ -0,0 +1,384 @@
+! The glimmer_to_dycore module contains the Fortran side of the Glimmer-DyCore
+! interface. It uses the routines in dycore_to_glim_extern.cpp to create one
+! or more instances of a dynamic core ice sheet model. The dycore_model_index is
+! the only parameter needed by glimmer_to_dycore subroutines to interact with a
+! specific instance of a dynamic core model. DMR--5/24/10
+
+module glimmer_to_dycore
+ !*FD glimmer_to_dycore contains Fortran routines to couple Glimmer to a
+ ! dynamic core model.
+ use glide_types
+ !use mpi_mod
+ use parallel
+ !use simple_forcing
+
+ contains
+
+ subroutine gtd_init_dycore_interface()
+ call dycore_init_registry()
+!print *,"Past dycore_init_registry"
+ end subroutine gtd_init_dycore_interface
+
+ subroutine gtd_delete_dycore_interface()
+ call dycore_reset_registry()
+ end subroutine gtd_delete_dycore_interface
+
+ subroutine gtd_init_dycore(model,dycore_model_index)
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index
+
+ integer*4 error_code
+ integer*4 dycore_type ! 0=BISICLES, 1=Ymir
+ character(8),DIMENSION(3) :: dycore_names = (/"Native ","BISICLES","FELIX "/)
+
+!print *,'in init -- topg ndims,shape = ',size(shape(model%geometry%topg)),shape(model%geometry%topg)
+ dycore_type = model%options%external_dycore_type
+!print *,"In gtd_init_dycore, calling get_new_model"
+ call dycore_get_new_model(dycore_type,dycore_model_index,error_code)
+!print *,"In gtd_init_dycore, calling get_set_var routines"
+ call gtd_set_geometry_vars(model,dycore_model_index)
+!print *,"In gtd_init_dycore, past set_geometry_vars"
+ call gtd_set_velocity_vars(model,dycore_model_index)
+ call gtd_set_numerics_vars(model,dycore_model_index)
+ call gtd_set_temper_vars(model,dycore_model_index)
+ call gtd_set_climate_vars(model,dycore_model_index)
+ call gtd_set_mpi_vars(model,dycore_model_index)
+ call gtd_set_constants(model,dycore_model_index)
+
+ !print *,"In gtd_init_dycore, dycore_type, dycore_index = " , &
+ ! dycore_names(dycore_type+1),dycore_model_index
+ call dycore_init_model(dycore_type,dycore_model_index, &
+ trim(model%options%dycore_input_file)//char(0),error_code)
+
+ end subroutine gtd_init_dycore
+
+ subroutine gtd_run_dycore(dycore_model_index,cur_time,time_inc)
+ integer*4 dycore_model_index
+ real(dp) cur_time, time_inc
+
+ call dycore_run_model(dycore_model_index,cur_time,time_inc)
+ end subroutine gtd_run_dycore
+
+ subroutine gtd_delete_dycore(dycore_model_index)
+ integer*4 dycore_model_index
+
+ call dycore_delete_model(dycore_model_index)
+ end subroutine gtd_delete_dycore
+
+ subroutine gtd_set_dim_info(shape,dim_info)
+ integer, dimension(:), intent(in) :: shape
+ integer*8, dimension(:), intent(inout) :: dim_info
+
+ dim_info = 0
+ dim_info(1) = size(shape)
+ dim_info(2:1+dim_info(1)) = shape
+ end subroutine gtd_set_dim_info
+
+ subroutine gtd_set_geometry_vars(model,dycore_model_index)
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index
+
+ integer*4 shape2, rank
+ character*20 var_name
+ character*20 dtype_name
+ integer*4 var_name_len, dtype_name_len
+
+ integer*8 dim_info(11)
+ integer*8 dim_info2(2)
+ integer*8 ewlbl, ewubl, nslbl, nsubl, nhalol
+
+! print *,"In gtd_set_geometry_vars, dycore_model_index = ",dycore_model_index
+
+! print *,'thck ndims,shape = ',size(shape(model%geometry%thck)),shape(model%geometry%thck)
+! print *,'topg ndims,shape = ',size(shape(model%geometry%topg)),shape(model%geometry%topg)
+
+! print *,'usrf ndims,shape = ',size(shape(model%geometry%usrf)),shape(model%geometry%usrf)
+
+ dtype_name = 'geometry'//char(0)
+
+ var_name = 'thck'//char(0)
+ !call gtd_set_dim_info(shape(model%geometry%thck),dim_info)
+ call dycore_set_ptr_double_var(model%geometry%thck,var_name,dtype_name,dycore_model_index)
+
+ var_name = 'topg'//char(0)
+ !call gtd_set_dim_info(shape(model%geometry%topg),dim_info)
+ call dycore_set_ptr_double_var(model%geometry%topg,var_name,dtype_name,dycore_model_index)
+
+ var_name = 'usrf'//char(0)
+ !call gtd_set_dim_info(shape(model%geometry%usrf),dim_info)
+ call dycore_set_ptr_double_var(model%geometry%usrf,var_name,dtype_name,dycore_model_index)
+
+ var_name = 'lsrf'//char(0)
+ !call gtd_set_dim_info(shape(model%geometry%lsrf),dim_info)
+ call dycore_set_ptr_double_var(model%geometry%lsrf,var_name,dtype_name,dycore_model_index)
+
+ !* (DFM -- added floating_mask, ice_mask, lower_cell_loc, and lower_cell_temp
+ var_name = 'floating_mask'//char(0)
+ !call gtd_set_dim_info(shape(model%geometry%floating_mask),dim_info)
+ call dycore_set_ptr_double_var(model%geometry%floating_mask,var_name,dtype_name,dycore_model_index)
+
+ var_name = 'ice_mask'//char(0)
+ !call gtd_set_dim_info(shape(model%geometry%ice_mask),dim_info)
+ call dycore_set_ptr_double_var(model%geometry%ice_mask,var_name,dtype_name,dycore_model_index)
+
+ var_name = 'lower_cell_loc'//char(0)
+ !call gtd_set_dim_info(shape(model%geometry%lower_cell_loc),dim_info)
+ call dycore_set_ptr_double_var(model%geometry%lower_cell_loc,var_name,dtype_name,dycore_model_index)
+
+ var_name = 'lower_cell_temp'//char(0)
+ !call gtd_set_dim_info(shape(model%geometry%lower_cell_temp),dim_info)
+ call dycore_set_ptr_double_var(model%geometry%lower_cell_temp,var_name,dtype_name,dycore_model_index)
+
+
+ ! print *,"this_rank, ewlb, ewub, nslb, nsub", this_rank, ewlb, ewub, nslb, nsub
+
+! (DFM -2/12/13) since ewlb, et al contain local grid info, use dim_info to
+! pass in global index space info
+ dim_info(1) = 3
+ dim_info(2) = model%general%upn
+ dim_info(3) = global_ewn
+ dim_info(4) = global_nsn
+
+
+! dtype_name = 'geometry'
+! dtype_name_len = 8
+
+ ! use age to get dim_info for now (only 3d var in geometry derived type)
+! call gtd_set_dim_info(shape(model%geometry%age),dim_info)
+
+ ! print *, "dim_info = ", dim_info(1), dim_info(2), dim_info(3), dim_info(4)
+
+ var_name = 'dimInfo'//char(0)
+ dim_info2(1) = 1
+ dim_info2(2) = dim_info(1) + 1
+ call dycore_copy_in_long_var(dim_info,var_name,dtype_name,dim_info2, dycore_model_index)
+
+ ewlbl = ewlb
+ ewubl = ewub
+ nslbl = nslb
+ nsubl = nsub
+ nhalol = nhalo
+
+ dim_info2(1) = 1
+ dim_info2(2) = 1
+ var_name = 'ewlb'//char(0)
+ call dycore_copy_in_long_var(ewlbl,var_name,dtype_name,dim_info2, dycore_model_index)
+ var_name = 'ewub'//char(0)
+ call dycore_copy_in_long_var(ewubl,var_name,dtype_name,dim_info2, dycore_model_index)
+ var_name = 'nslb'//char(0)
+ call dycore_copy_in_long_var(nslbl,var_name,dtype_name,dim_info2, dycore_model_index)
+ var_name = 'nsub'//char(0)
+ call dycore_copy_in_long_var(nsubl,var_name,dtype_name,dim_info2, dycore_model_index)
+ var_name = 'nhalo'//char(0)
+ call dycore_copy_in_long_var(nhalol,var_name,dtype_name,dim_info2, dycore_model_index)
+
+
+! print *,"leaving gtd_set_geometry_vars, dim_info = ",dim_info
+ end subroutine gtd_set_geometry_vars
+
+
+ subroutine gtd_set_velocity_vars(model,dycore_model_index)
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index
+
+ character*20 var_name
+ character*20 dtype_name
+ integer*4 var_name_len, dtype_name_len
+
+ integer*8 dim_info(11)
+ integer*8 dim_info2(2)
+
+! print *,"In copy_velocity_vars, dycore_model_index = ",dycore_model_index
+
+ dtype_name = 'velocity'//char(0)
+
+ ! print *,'uvel ndims,shape = ',size(shape(model%velocity%uvel)),shape(model%velocity%uvel)
+
+ ! print *,'vvel ndims,shape = ',size(shape(model%velocity%vvel)),shape(model%velocity%vvel)
+
+ ! print *,'wvel ndims,shape = ',size(shape(model%velocity%wvel)),shape(model%velocity%wvel)
+
+
+ var_name = 'uvel'//char(0)
+ call dycore_set_ptr_double_var(model%velocity%uvel,var_name, &
+ dtype_name,dycore_model_index);
+ var_name = 'vvel'//char(0)
+ call dycore_set_ptr_double_var(model%velocity%vvel,var_name, &
+ dtype_name,dycore_model_index);
+ var_name = 'wvel'//char(0)
+ call dycore_set_ptr_double_var(model%velocity%wvel,var_name, &
+ dtype_name,dycore_model_index);
+
+ var_name = 'wgrd'//char(0)
+ call dycore_set_ptr_double_var(model%velocity%wgrd,var_name, &
+ dtype_name,dycore_model_index);
+
+! print *,'beta ndims,shape = ',size(shape(model%velocity%beta)),shape(model%velocity%beta)
+
+ var_name = 'btrc'//char(0)
+ call dycore_set_ptr_double_var(model%velocity%beta,var_name, &
+ dtype_name,dycore_model_index);
+
+ call gtd_set_dim_info(shape(model%velocity%uvel),dim_info)
+
+ var_name = 'dimInfo'//char(0)
+ dim_info2(1) = 1
+ dim_info2(2) = 4
+ call dycore_copy_in_long_var(dim_info,var_name,dtype_name,dim_info2,dycore_model_index)
+ end subroutine gtd_set_velocity_vars
+
+ subroutine gtd_set_numerics_vars(model,dycore_model_index)
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index
+
+ character*20 var_name
+ character*20 dtype_name
+ integer*4 var_name_len, dtype_name_len
+ integer*8 dim_info2(2)
+
+ dtype_name = 'numerics'//char(0)
+
+ dim_info2(1) = 1
+ dim_info2(2) = 1
+
+
+ var_name = 'tstart'//char(0)
+ call dycore_set_ptr_double_var(model%numerics%tstart,var_name,dtype_name,dycore_model_index)
+ var_name = 'tend'//char(0)
+ call dycore_set_ptr_double_var(model%numerics%tend,var_name,dtype_name,dycore_model_index)
+ var_name = 'time'//char(0)
+ call dycore_set_ptr_double_var(model%numerics%time,var_name,dtype_name,dycore_model_index)
+
+ var_name = 'dew'//char(0)
+ call dycore_copy_in_double_var(model%numerics%dew,var_name,dtype_name,dim_info2,dycore_model_index)
+ var_name = 'dns'//char(0)
+ call dycore_copy_in_double_var(model%numerics%dns,var_name,dtype_name,dim_info2,dycore_model_index)
+
+ end subroutine gtd_set_numerics_vars
+
+ subroutine gtd_set_constants(model,dycore_model_index)
+ use glimmer_physcon, only: grav, scyr, rhoi, rhoo
+
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index
+
+ character*20 var_name
+ character*20 dtype_name
+ integer*4 var_name_len, dtype_name_len
+ integer*8 dim_info2(2)
+
+ dtype_name = 'constants'//char(0)
+
+ dim_info2(1) = 1
+ dim_info2(2) = 1
+
+ var_name = 'gravity'//char(0)
+ call dycore_copy_in_double_var(grav,var_name,dtype_name,dim_info2,dycore_model_index)
+
+ var_name = 'seconds_per_year'//char(0)
+ call dycore_copy_in_double_var(scyr,var_name,dtype_name,dim_info2,dycore_model_index)
+
+ var_name = 'rho_ice'//char(0)
+ call dycore_copy_in_double_var(rhoi,var_name,dtype_name,dim_info2,dycore_model_index)
+
+ var_name = 'rho_seawater'//char(0)
+ call dycore_copy_in_double_var(rhoo,var_name,dtype_name,dim_info2,dycore_model_index)
+
+ end subroutine gtd_set_constants
+
+ subroutine gtd_set_temper_vars(model,dycore_model_index)
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index
+ character*20 var_name
+ character*20 dtype_name
+
+ integer*8 dim_info(11), dim_info2(2)
+
+ dtype_name = 'temper'//char(0)
+
+ var_name = 'temp'//char(0)
+ call dycore_set_ptr_double_var(model%temper%temp,var_name,dtype_name,dycore_model_index)
+
+ var_name = 'bheatflx'//char(0)
+ call dycore_set_ptr_double_var(model%temper%bheatflx,var_name,dtype_name,dycore_model_index)
+
+ var_name = 'bmlt'//char(0)
+ call dycore_set_ptr_double_var(model%temper%bmlt,var_name,dtype_name,dycore_model_index)
+
+ ! print *,'temp ndims,shape = ',size(shape(model%temper%temp)),shape(model%temper%temp)
+
+ ! print *,'bheatflx ndims,shape = ',size(shape(model%temper%bheatflx)),shape(model%temper%bheatflx)
+
+ ! print *,'bmlt ndims,shape = ',size(shape(model%temper%bmlt)),shape(model%temper%bmlt)
+
+ call gtd_set_dim_info(shape(model%temper%temp),dim_info)
+
+ var_name = 'dimInfo'//char(0)
+ dim_info2(1) = 1
+ dim_info2(2) = dim_info(1) + 1
+ call dycore_copy_in_long_var(dim_info,var_name,dtype_name,dim_info2,dycore_model_index)
+ end subroutine gtd_set_temper_vars
+
+ subroutine gtd_set_climate_vars(model,dycore_model_index)
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index
+ character*20 var_name
+ character*20 dtype_name
+
+ integer*8 dim_info(11), dim_info2(2)
+
+ dtype_name = 'climate'//char(0)
+
+ var_name = 'acab'//char(0)
+ call dycore_set_ptr_double_var(model%climate%acab,var_name,dtype_name,dycore_model_index)
+ var_name = 'acab_tavg'//char(0)
+ call dycore_set_ptr_double_var(model%climate%acab_tavg,var_name,dtype_name,dycore_model_index)
+ var_name = 'calving'//char(0)
+ call dycore_set_ptr_double_var(model%climate%calving,var_name,dtype_name,dycore_model_index)
+
+ call gtd_set_dim_info(shape(model%climate%acab),dim_info)
+ ! print *,"In climate set, dim_info: ",dim_info
+ var_name = 'dimInfo'//char(0)
+ dim_info2(1) = 1
+ dim_info2(2) = dim_info(1) + 1
+ call dycore_copy_in_long_var(dim_info,var_name,dtype_name,dim_info2,dycore_model_index)
+
+ var_name = 'eus'
+ dim_info2(1) = 1
+ dim_info2(2) = 1
+ ! eus parm isn't being set during initialization, so commented out here:
+ !call dycore_copy_in_double_var(model%climate%eus,var_name,dtype_name,dim_info2,dycore_model_index)
+ !print *,"eus: ",model%climate%eus
+
+ end subroutine gtd_set_climate_vars
+
+ subroutine gtd_set_mpi_vars(model,dycore_model_index)
+ type(glide_global_type) :: model
+ integer*4 dycore_model_index
+ character*20 var_name
+ character*20 dtype_name
+
+ integer*8 dim_info(11), dim_info2(2)
+
+ ! integer,save :: comm, tasks, this_rank -- from parallel_mpi.F90
+ integer*8 communicator, process_count, my_rank
+
+
+ communicator = comm
+ process_count = tasks
+ my_rank = this_rank
+
+ dtype_name = 'mpi_vars'//char(0)
+
+ dim_info2(1) = 1
+ dim_info2(2) = 1
+ var_name = 'communicator'//char(0)
+ call dycore_copy_in_long_var(communicator,var_name,dtype_name,dim_info2, dycore_model_index)
+ var_name = 'process_count'//char(0)
+ call dycore_copy_in_long_var(process_count,var_name,dtype_name,dim_info2, dycore_model_index)
+ var_name = 'my_rank'//char(0)
+ call dycore_copy_in_long_var(my_rank,var_name,dtype_name,dim_info2, dycore_model_index)
+
+ end subroutine gtd_set_mpi_vars
+
+end module glimmer_to_dycore
diff --git a/components/cism/glimmer-cism/libdycore/glimmer_to_dycore.info b/components/cism/glimmer-cism/libdycore/glimmer_to_dycore.info
new file mode 100644
index 0000000000..a7821af92f
--- /dev/null
+++ b/components/cism/glimmer-cism/libdycore/glimmer_to_dycore.info
@@ -0,0 +1,89 @@
+Coupling Glimmer to External Dynamic Cores
+Last revised: 8/26/2010 Doug Ranken
+
+The purpose of the Glimmer/Dycore interface is to provide a flexible method for connecting the
+Fortran-based Glimmer package to C/C++ based dynamic cores (though it could be modified to
+connect external Fortran dynamic cores, as well). This document provides instructions for
+building the external dynamic core interface between Glimmer and BISICLES (or Ymir, when it
+is available). With the current design, adding new dynamic cores can be done using small
+modifications to DyCoreModelRegistry.H and DyCoreModelRegistry.cpp. Since the
+DyCoreModelRegistry class is a small code, it would be relatively easy to have a build system
+couple-in a specific set of dynamic cores, based on a few user specified configuration
+parameters.
+
+The interface is designed so that all glimmer calls to external dynamic core routines are
+contained in the Fortran module glimmer_to_dycore.F90. To allow glimmer to be built without
+including any external dynamic cores, there is also a glimmer_to_dycore_stubs.F90 module.
+
+The rest of the interface is written in C/C++. The dycore_to_glimmer_extern.cpp provides a
+bridge between the Fortran and C++. The C++ routines provide a registry class designed to
+handle multiple dynamic core interfaces (which can be of different types), and classes that
+implement the interface. The DycoreToGlimmer class is designed to be a parent class for
+subclasses that handle the interface to specific dynamic cores.
+
+The rest of this document tells how to build an interface connecting Glimmer to the BISICLES
+dynamic core.
+
+There are 3 main steps to the build process, to be done in this order:
+1) Build the interface library
+2) Build BISICLES (or Ymir) library
+3) Compile simple_bisicles
+
+1) Build the interface library
+In directory your_glimmmer/src/libdycore
+make BISICLES
+
+2) Build BISICLES and single-file Chombo libraries (libBisicles.a, libChomboLibs.a)
+In the BISICLES installation, code/interface directory:
+ make bisicles
+
+3) Compile simple_bisicles
+rm simple_bisicles
+make -f Makefile.dycore simple_bisicles
+
+To compile with stub routines for the interface:
+1) Build the interface library
+In directory your_glimmmer/src/libdycore
+make DYCORE_STUBS
+
+2) Compile simple_bisicles
+rm simple_bisicles
+make -f Makefile.dycore simple_bisicles
+make -f Makefile.dycore_stubs simple_bisicles
+
+DyCoreModelRegistry.cpp + .H
+DyCoreToGlimmer.cpp + .H
+dycore_to_glimmer_extern.cpp
+
+
+Testing simple_bisicles:
+1) simple_bisicles, load hump.config
+2) simple_bisicles_run
+3) gdb simple_bisicles, load hump.config
+
+
+BISICLES Build Notes:
+
+
+Chombo:
+with a fresh checkout, need to set Chombo/lib/mk/Make.defs.local
+with machine-dependent info
+
+latest resolved dependencies:
+HDF5, which needs szip.
+Atlas version of lapack.
+
+Glimmer-CISM:
+
+using LANL parallel branch:
+svn co https://username@svn.berlios.de/svnroot/repos/glimmer-cism/glimmer-cism-lanl/branches/parallel
+
+Dan's configure command:
+
+./configure --with-netcdf=/home/loren/users/dmartin/util/netcdf/netcdf-4.0.1/ --prefix=/home/loren/users/dmartin/cleanCheckout/gc1/parallel --with-blas=-lblas FC=gfortran FCFLAGS="-ffree-line-length-none -g -DNO_RESCALE" FFLAGS="-g -DNO_RESCALE" CFLAGS="-g -DNO_RESCALE"
+
+Doug's configure command:
+
+./configure --with-netcdf=/home/ranken/util/netcdf/netcdf-4.0.1 --with-hdf5-lib=/home/ranken/util/hdf/hdf5-1.8.4/hdf5/lib/ --prefix=$PWD --with-blas=-lblas --with-lapack=-llapack --with-tags=gfortran --with-slap-slap FC=gfortran FCFLAGS="-ffree-line-length-none -g -DNO_RESCALE" FFLAGS="-g -DNO_RESCALE" CFLAGS="-g -DNO_RESCALE"
+
+./configure --with-netcdf=/home/ranken/util/netcdf/netcdf-4.0.1 --with-hdf5-lib=/home/ranken/util/hdf/hdf5-1.8.4/hdf5/lib/ --prefix=$PWD --with-blas=-lblas --with-lapack=-llapack --with-tags=gfortran --with-slap-slap FC=gfortran FCFLAGS="-ffree-line-length-none -g -DNO_RESCALE" FFLAGS="-g -DNO_RESCALE" CFLAGS="-g -DNO_RESCALE" --with-hdf5=/home/ranken/util/hdf/5-1.6.10-linux-x86_64-static --with-szip=/home/ranken/util/hdf/szip-2.1/src/.libs/libsz.a --with-libdycore=/home/ranken/util/BISICLES/code/interface/libdycore --with-bisicles=/home/ranken/util/BISICLES
diff --git a/components/cism/glimmer-cism/libglad/README b/components/cism/glimmer-cism/libglad/README
new file mode 100644
index 0000000000..83566f317a
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/README
@@ -0,0 +1,36 @@
+This directory contains an alternative to glint that can be used when a GCM
+passes already-downscaled fields: glad (where the "a.d." stands for "already
+downscaled"). glad acts as a lightweight layer between the GCM and the rest of
+CISM. It is responsible for:
+
+(1) Handling time stepping and temporal averaging
+
+(2) Providing a simpler interface to the climate model, rather than requiring
+the climate model to make detailed calls to things like glide_tstep_p1, etc.
+
+(3) Translating inputs and outputs into appropriate quantities
+
+Eventually, it is possible that this layer could be removed, moving some of its
+functionality up into the GLC layer of CESM/ACME, and some of its functionality
+down into the rest of the CISM code. However, we may choose to keep this
+lightweight layer in place, because it does have some value.
+
+----
+
+The main differences between the code here and the code in libglint are:
+
+(1) libglad does not do any upscaling / downscaling / interpolation
+
+(2) libglad currently only works with SMB inputs - not PDD, etc.
+
+----
+
+Note that a few modules here are also used by libglint, including:
+
+- glad_constants.F90
+
+- glad_restart_gcm.F90
+
+So those may contain a bit of code that is needed by libglint but not by
+libglad.
+
diff --git a/components/cism/glimmer-cism/libglad/glad_constants.F90 b/components/cism/glimmer-cism/libglad/glad_constants.F90
new file mode 100644
index 0000000000..ece500e48d
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_constants.F90
@@ -0,0 +1,77 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glad_constants.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glad_constants
+
+ use glimmer_global, only: dp
+ use glimmer_physcon, only: pi
+
+ implicit none
+
+ ! ------------------------------------------------------------
+ ! global parameters
+ ! ------------------------------------------------------------
+
+ real(dp), parameter :: lapse = 0.0065_dp ! atm lapse rate, deg/m
+ real(dp),parameter :: days2hours = 24.d0
+ real(dp),parameter :: hours2seconds = 3600.d0 !> Hours to seconds conversion factor
+
+ real(dp), parameter :: default_diy = 360.d0 !> Default number of days in year
+ real(dp), parameter :: default_y2h = days2hours*default_diy !> Default years to hours conversion
+
+ ! Constants set at run-time
+
+ integer :: days_in_year = default_diy !> The number of days in a year
+ real(dp) :: years2hours = default_y2h !> Years to hours conversion factor
+ real(dp) :: hours2years = 1.d0/default_y2h !> Hours to years conversion factor
+
+ private :: default_diy, default_y2h
+
+ ! Minimum thickness of ice, at or below which a point is considered bare land for upscaling/
+ ! downscaling purposes. Values other than 0 can result in odd behavior - e.g., a value
+ ! greater than 0 means that CLM would consider a point to have become icesheet, and so
+ ! would send positive SMB, but if this SMB didn't reach the min_thck threshold, then
+ ! CISM would effectively tell CLM, "no, it's not actually icesheet yet - it's still
+ ! bare ground".
+ real(dp), parameter :: min_thck = 0.d0
+
+contains
+
+ subroutine glad_set_year_length(daysinyear)
+
+ integer, intent(in) :: daysinyear
+
+ days_in_year = daysinyear
+ years2hours = days2hours*days_in_year
+ hours2years = 1.d0/years2hours
+
+ end subroutine glad_set_year_length
+
+end module glad_constants
diff --git a/components/cism/glimmer-cism/libglad/glad_initialise.F90 b/components/cism/glimmer-cism/libglad/glad_initialise.F90
new file mode 100644
index 0000000000..386de60432
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_initialise.F90
@@ -0,0 +1,344 @@
+! WJS (1-30-12): The following (turning optimization off) is needed as a workaround for an
+! xlf compiler bug, at least in IBM XL Fortran for AIX, V12.1 on bluefire
+#ifdef CPRIBM
+@PROCESS OPT(0)
+#endif
+
+#ifdef CPRIBM
+@PROCESS ALIAS_SIZE(107374182)
+#endif
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glad_initialise.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glad_initialise
+
+ !> Initialise GLAD model instance
+
+ use glad_type
+ use glimmer_global, only: dp
+ implicit none
+
+ private
+ public glad_i_initialise_gcm, glad_i_end
+
+contains
+
+ subroutine glad_i_initialise_gcm(config, instance, &
+ force_start, force_dt, &
+ gcm_restart, gcm_restart_file, &
+ gcm_config_unit)
+
+ ! Initialise a GLAD ice model instance for GCM coupling
+
+ use glimmer_paramets, only: GLC_DEBUG
+ use glimmer_log
+ use glimmer_config
+ use glimmer_coordinates, only : coordsystem_new
+ use glad_mbal_coupling, only : glad_mbc_init
+ use glad_io , only: glad_io_createall , glad_io_writeall
+ use glad_mbal_io , only: glad_mbal_io_createall, glad_mbal_io_writeall
+ use glimmer_ncio
+ use glide_nc_custom , only: glide_nc_fillall
+ use glide
+ use glissade
+ use glad_constants
+ use glad_restart_gcm
+ use glide_diagnostics
+ use parallel, only: main_task
+
+ implicit none
+
+ ! Arguments
+ type(ConfigSection), pointer :: config ! structure holding sections of configuration file
+ type(glad_instance), intent(inout) :: instance ! The instance being initialised.
+
+ integer, intent(in) :: force_start ! glad forcing start time (hours)
+ integer, intent(in) :: force_dt ! glad forcing time step (hours)
+
+ logical, optional, intent(in) :: gcm_restart ! logical flag to read from a restart file
+ character(*),optional, intent(in) :: gcm_restart_file ! restart filename for restart
+ integer, optional, intent(in) :: gcm_config_unit ! fileunit for reading config files
+
+ ! Internal
+
+ integer :: config_fileunit
+
+ config_fileunit = 99
+ if (present(gcm_config_unit)) then
+ config_fileunit = gcm_config_unit
+ endif
+
+ ! initialise model
+
+ call glide_config(instance%model, config, config_fileunit)
+
+ ! if this is a continuation run, then set up to read restart
+ ! (currently assumed to be a CESM restart file)
+
+ if (present(gcm_restart)) then
+
+ if (gcm_restart) then
+
+ if (present(gcm_restart_file)) then
+
+ ! read the restart file
+ call glad_read_restart_gcm(instance%model, gcm_restart_file)
+ instance%model%options%is_restart = 1
+
+ else
+
+ call write_log('Missing gcm_restart_file when gcm_restart is true',&
+ GM_FATAL,__FILE__,__LINE__)
+
+ endif
+
+ endif
+ endif
+
+ if (instance%model%options%whichdycore == DYCORE_GLIDE) then ! SIA dycore
+
+ ! initialise the model
+ call glide_initialise(instance%model)
+
+ ! compute the initial diagnostic state
+ call glide_init_state_diagnostic(instance%model)
+
+ else ! glam/glissade HO dycore
+
+ ! initialise the model
+ call glissade_initialise(instance%model)
+
+ ! compute the initial diagnostic state
+ call glissade_diagnostic_variable_solve(instance%model)
+
+ endif
+
+ instance%ice_tstep = get_tinc(instance%model)*nint(years2hours)
+
+ instance%glide_time = instance%model%numerics%tstart
+
+ ! read glad configuration
+
+ call glad_i_readconfig(instance, config)
+ call glad_i_printconfig(instance)
+
+ ! Construct the list of necessary restart variables based on the config options
+ ! selected by the user in the config file (specific to glad - other configs,
+ ! e.g. glide, isos, are handled separately by their setup routines).
+ ! This is done regardless of whether or not a restart ouput file is going
+ ! to be created for this run, but this information is needed before setting up outputs. MJH 1/17/13
+ ! Note: the corresponding call for glide is placed within *_readconfig, which is probably more appropriate,
+ ! but putting this call into glad_i_readconfig creates a circular dependency.
+
+ call define_glad_restart_variables(instance)
+
+ ! create glad variables for the glide output files
+ call glad_io_createall(instance%model, data=instance)
+
+ ! create instantaneous glad variables
+ call openall_out(instance%model, outfiles=instance%out_first)
+ call glad_mbal_io_createall(instance%model, data=instance, outfiles=instance%out_first)
+
+ ! fill dimension variables
+ call glide_nc_fillall(instance%model)
+ call glide_nc_fillall(instance%model, outfiles=instance%out_first)
+
+ ! Check we've used all the config sections
+
+ call CheckSections(config)
+
+ ! New grid (grid on this task)
+
+ ! WJS (1-11-13): I'm not sure if it's correct to set the origin to (0,0) when running
+ ! on multiple tasks, with a decomposed grid. However, as far as I can tell, the
+ ! origin of this variable isn't important, so I'm not trying to fix it right now.
+
+ instance%lgrid = coordsystem_new(0.d0, 0.d0, &
+ get_dew(instance%model), &
+ get_dns(instance%model), &
+ get_ewn(instance%model), &
+ get_nsn(instance%model))
+
+ ! Allocate arrays appropriately
+
+ call glad_i_allocate_gcm(instance, force_start)
+
+ ! Read data and initialise climate
+
+ call glad_i_readdata(instance)
+
+ ! initialise the mass-balance accumulation
+
+ call glad_mbc_init(instance%mbal_accum, instance%lgrid)
+
+ ! If flag set to force frequent coupling (for testing purposes),
+ ! then decrease all coupling timesteps to very short intervals
+ if (instance%test_coupling) then
+ instance%mbal_accum%tstep = 24
+ instance%mbal_accum_time = 24
+ instance%ice_tstep = 24
+ endif
+
+ instance%mbal_tstep = instance%mbal_accum%tstep
+
+ instance%next_time = force_start - force_dt + instance%mbal_tstep
+
+ if (GLC_DEBUG .and. main_task) then
+ write (6,*) 'Called glad_mbc_init'
+ write (6,*) 'mbal tstep =', instance%mbal_tstep
+ write (6,*) 'next_time =', instance%next_time
+ write (6,*) 'start_time =', instance%mbal_accum%start_time
+ end if
+
+ ! Mass-balance accumulation length
+
+ if (instance%mbal_accum_time == -1) then
+ instance%mbal_accum_time = max(instance%ice_tstep,instance%mbal_tstep)
+ end if
+
+ if (instance%mbal_accum_time < instance%mbal_tstep) then
+ call write_log('Mass-balance accumulation timescale must be as '//&
+ 'long as mass-balance time-step',GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ if (mod(instance%mbal_accum_time,instance%mbal_tstep) /= 0) then
+ call write_log('Mass-balance accumulation timescale must be an '// &
+ 'integer multiple of the mass-balance time-step',GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ if (.not. (mod(instance%mbal_accum_time, instance%ice_tstep)==0 .or. &
+ mod(instance%ice_tstep, instance%mbal_accum_time)==0)) then
+ call write_log('Mass-balance accumulation timescale and ice dynamics '//&
+ 'timestep must divide into one another',GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ if (instance%ice_tstep_multiply/=1 .and. mod(instance%mbal_accum_time,nint(years2hours)) /= 0.d0) then
+ call write_log('For ice time-step multiplication, mass-balance accumulation timescale '//&
+ 'must be an integer number of years',GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ ! Initialise some other stuff
+
+ if (instance%mbal_accum_time>instance%ice_tstep) then
+ instance%n_icetstep = instance%ice_tstep_multiply*instance%mbal_accum_time/instance%ice_tstep
+ else
+ instance%n_icetstep = instance%ice_tstep_multiply
+ end if
+
+ ! Write initial ice sheet diagnostics for this instance
+
+ call glide_write_diagnostics(instance%model, &
+ instance%model%numerics%time, &
+ tstep_count = instance%model%numerics%timecounter)
+
+ ! Write netCDF output for this instance
+
+ call glide_io_writeall(instance%model, instance%model)
+ call glad_io_writeall(instance, instance%model)
+ call glad_mbal_io_writeall(instance, instance%model, outfiles=instance%out_first)
+
+ end subroutine glad_i_initialise_gcm
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine glad_i_end(instance)
+
+ !> Tidy up
+
+ use glide
+ use glimmer_ncio
+ implicit none
+ type(glad_instance), intent(inout) :: instance !> The instance being initialised.
+
+ call glide_finalise(instance%model)
+ call closeall_out(instance%model,outfiles=instance%out_first)
+ instance%out_first => null()
+
+ end subroutine glad_i_end
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine glad_i_readdata(instance)
+ !> read data from netCDF file and initialise climate
+
+ use glad_io
+ use glide_thck, only: glide_calclsrf
+ implicit none
+
+ type(glad_instance),intent(inout) :: instance !> Instance whose elements are to be allocated.
+
+ ! read data
+ call glad_io_readall(instance,instance%model)
+
+ call glide_calclsrf(instance%model%geometry%thck,instance%model%geometry%topg, &
+ instance%model%climate%eus,instance%model%geometry%lsrf)
+ instance%model%geometry%usrf = instance%model%geometry%thck + instance%model%geometry%lsrf
+
+ end subroutine glad_i_readdata
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine define_glad_restart_variables(instance)
+
+ ! This subroutine analyzes the glad options input by the user in the config file
+ ! and determines which variables are necessary for an exact restart. MJH 1/11/2013
+
+ ! Please comment thoroughly the reasons why a particular variable needs to be a restart variable for a given config.
+
+ use glad_io, only: glad_add_to_restart_variable_list
+ use glad_mbal_io, only: glad_mbal_add_to_restart_variable_list
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+ type(glad_instance), intent (in) :: instance !> Derived type that includes all glad options
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ ! lat and lon need to be on the input file. Since a restart run only reads the
+ ! restart file (and not the original input file) we need to write lat and lon back to
+ ! the restart file so they will be available for the following run segment.
+
+ call glad_add_to_restart_variable_list('lat lon')
+
+ ! The variables rofi_tavg, rofl_tavg, and hflx_tavg are time-averaged fluxes on the local grid
+ ! from the previous coupling interval. They are included here so that the coupler can be sent
+ ! the correct fluxes after restart; otherwise these fluxes would have values of zero.
+ !TODO - Add av_count_output so we can restart in the middle of a mass balance timestep?
+
+ call glad_add_to_restart_variable_list('rofi_tavg rofl_tavg hflx_tavg')
+
+ end subroutine define_glad_restart_variables
+
+
+end module glad_initialise
diff --git a/components/cism/glimmer-cism/libglad/glad_input_averages.F90 b/components/cism/glimmer-cism/libglad/glad_input_averages.F90
new file mode 100644
index 0000000000..c890bd2828
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_input_averages.F90
@@ -0,0 +1,175 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glad_input_averages.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glad_input_averages
+
+ !> This module defines a type and related operations for working with inputs from the
+ !> GCM. Its main purpose is to produce temporal averages of these inputs.
+
+ ! Note that this module has some functionality in common with glad_mbal_coupling, but
+ ! they are used at different stages in the time loops.
+
+ ! NOTE(wjs, 2015-03-17) Most or all of the functionality here could be removed if we
+ ! performed all of the necessary temporal averaging in the climate model, with coupling
+ ! to the CISM code only happening once per mass balance time step. If we do that, we
+ ! should probably add checks to ensure that the model is really just being called when
+ ! it's time for a mass balance time step.
+
+ use glimmer_global, only : dp
+ use glimmer_paramets, only: GLC_DEBUG, stdout
+ use glimmer_log
+ use parallel, only : main_task
+
+ implicit none
+ private
+
+ type, public :: glad_input_averages_type
+ private
+
+ integer :: av_start_time = 0 ! Value of time from the last occasion averaging was restarted (hours)
+ integer :: av_steps = 0 ! Number of times glimmer has been called in current round of averaging
+ integer :: next_av_start = 0 ! Time when we expect next averaging to start
+ logical :: new_av = .true. ! Set to true if the next correct call starts a new averaging round
+
+ real(dp),pointer,dimension(:,:) :: tot_qsmb => null() ! running total surface mass balance (kg m-2 s-1)
+ real(dp),pointer,dimension(:,:) :: tot_tsfc => null() ! running total surface temperature (deg C)
+
+ end type glad_input_averages_type
+
+ public :: initialize_glad_input_averages
+ public :: get_av_start_time
+ public :: accumulate_averages
+ public :: calculate_averages
+ public :: reset_glad_input_averages
+
+contains
+
+ subroutine initialize_glad_input_averages(glad_inputs, ewn, nsn, next_av_start)
+ ! Initialize a glad_inputs instance
+
+ type(glad_input_averages_type), intent(inout) :: glad_inputs
+
+ ! dimensions of local grid
+ integer, intent(in) :: ewn
+ integer, intent(in) :: nsn
+
+ ! Starting time of next averaging period (hours)
+ integer, intent(in) :: next_av_start
+
+ allocate(glad_inputs%tot_qsmb(ewn,nsn)); glad_inputs%tot_qsmb = 0.d0
+ allocate(glad_inputs%tot_tsfc(ewn,nsn)); glad_inputs%tot_tsfc = 0.d0
+
+ glad_inputs%next_av_start = next_av_start
+ end subroutine initialize_glad_input_averages
+
+ integer function get_av_start_time(glad_inputs)
+ ! Get value of time from the last occasion averaging was restarted (hours)
+ type(glad_input_averages_type), intent(in) :: glad_inputs
+
+ get_av_start_time = glad_inputs%av_start_time
+ end function get_av_start_time
+
+ subroutine accumulate_averages(glad_inputs, qsmb, tsfc, time)
+ ! Accumulate averages based on one set of inputs.
+ !
+ ! Should be called every time we have new inputs from the climate model.
+
+ type(glad_input_averages_type), intent(inout) :: glad_inputs
+ real(dp),dimension(:,:),intent(in) :: qsmb ! flux of glacier ice (kg/m^2/s)
+ real(dp),dimension(:,:),intent(in) :: tsfc ! surface ground temperature (C)
+ integer, intent(in) :: time ! Current model time
+
+ if (glad_inputs%new_av) then
+ call start_new_averaging_period(glad_inputs, time)
+ end if
+
+ glad_inputs%tot_qsmb(:,:) = glad_inputs%tot_qsmb(:,:) + qsmb(:,:)
+ glad_inputs%tot_tsfc(:,:) = glad_inputs%tot_tsfc(:,:) + tsfc(:,:)
+
+ glad_inputs%av_steps = glad_inputs%av_steps + 1
+
+ end subroutine accumulate_averages
+
+ subroutine calculate_averages(glad_inputs, qsmb, tsfc)
+ ! Calculate averages over the averaging period
+ type(glad_input_averages_type), intent(in) :: glad_inputs
+ real(dp), dimension(:,:), intent(out) :: qsmb ! average surface mass balance (kg m-2 s-1)
+ real(dp), dimension(:,:), intent(out) :: tsfc ! average surface temperature (deg C)
+
+ qsmb(:,:) = glad_inputs%tot_qsmb(:,:) / real(glad_inputs%av_steps,dp)
+ tsfc(:,:) = glad_inputs%tot_tsfc(:,:) / real(glad_inputs%av_steps,dp)
+ end subroutine calculate_averages
+
+ subroutine reset_glad_input_averages(glad_inputs, next_av_start)
+ ! Resets this glad_inputs instance
+ !
+ ! Should be called at the end of an averaging period, in order to prepare for the
+ ! next averaging period
+ type(glad_input_averages_type), intent(inout) :: glad_inputs
+ integer, intent(in) :: next_av_start ! start time for next averaging period (hours)
+
+ glad_inputs%tot_qsmb(:,:) = 0.d0
+ glad_inputs%tot_tsfc(:,:) = 0.d0
+
+ glad_inputs%av_steps = 0
+ glad_inputs%new_av = .true.
+ glad_inputs%next_av_start = next_av_start
+ end subroutine reset_glad_input_averages
+
+ subroutine start_new_averaging_period(glad_inputs, time)
+ ! Should be called the first time accumulate_averages is called for a new averaging
+ ! period. Sets some flags appropriately in this case.
+ !
+ ! Also performs some error checking to make sure we're not calling GLAD at an
+ ! unexpected time.
+
+ type(glad_input_averages_type), intent(inout) :: glad_inputs
+ integer, intent(in) :: time ! Current model time
+
+ character(len=100) :: message
+
+ if (GLC_DEBUG .and. main_task) then
+ write (stdout,*) 'Accumulating averages, current time (hr) =', time
+ write (stdout,*) 'av_start_time =', glad_inputs%av_start_time
+ write (stdout,*) 'next_av_start =', glad_inputs%next_av_start
+ write (stdout,*) 'new_av =', glad_inputs%new_av
+ end if
+
+ if (time == glad_inputs%next_av_start) then
+ glad_inputs%av_start_time = time
+ glad_inputs%new_av = .false.
+ else
+ write(message,*) 'Unexpected calling of GLAD at time ', time
+ call write_log(message,GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ end subroutine start_new_averaging_period
+
+end module glad_input_averages
diff --git a/components/cism/glimmer-cism/libglad/glad_io.F90.default b/components/cism/glimmer-cism/libglad/glad_io.F90.default
new file mode 100644
index 0000000000..8501ba1b6c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_io.F90.default
@@ -0,0 +1,866 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+! WARNING: this file was automatically generated on
+! Fri, 03 Apr 2015 18:33:13 +0000
+! from ncdf_template.F90.in
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+! WJS (1-30-12): The following (turning optimization off) is needed as a workaround for an
+! xlf compiler bug, at least in IBM XL Fortran for AIX, V12.1 on bluefire
+#ifdef CPRIBM
+@PROCESS OPT(0)
+#endif
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! ncdf_template.F90.in - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#define NCO outfile%nc
+#define NCI infile%nc
+
+
+module glad_io
+ ! template for creating subsystem specific I/O routines
+ ! written by Magnus Hagdorn, 2004
+
+ use glad_type
+
+ implicit none
+
+ private :: get_xtype, is_enabled, is_enabled_0dint, is_enabled_1dint, is_enabled_2dint, is_enabled_0dreal, is_enabled_1dreal, is_enabled_2dreal, is_enabled_3dreal
+
+ character(310), save :: restart_variable_list='' ! list of variables needed for a restart
+!TODO change 310 to a variable - see glimmer_ncdf.F90 in the definition for type glimmer_nc_stat for other instances of this value.
+
+ interface is_enabled ! MJH 10/21/13: Interface needed for determining if arrays have been enabled. See notes below in glad_io_create.
+ module procedure is_enabled_0dint
+ module procedure is_enabled_1dint
+ module procedure is_enabled_2dint
+ module procedure is_enabled_0dreal
+ module procedure is_enabled_1dreal
+ module procedure is_enabled_2dreal
+ module procedure is_enabled_3dreal
+ end interface is_enabled
+
+contains
+
+ !*****************************************************************************
+ ! netCDF output
+ !*****************************************************************************
+ subroutine glad_io_createall(model,data,outfiles)
+ ! open all netCDF files for output
+ use glad_type
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(glide_global_type) :: model
+ type(glad_instance) :: data ! MJH 10/21/13: Making 'data' mandatory. See notes below in glad_io_create
+ type(glimmer_nc_output),optional,pointer :: outfiles
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ do while(associated(oc))
+ call glad_io_create(oc,model,data)
+ oc=>oc%next
+ end do
+ end subroutine glad_io_createall
+
+ subroutine glad_io_writeall(data,model,atend,outfiles,time)
+ ! if necessary write to netCDF files
+ use glad_type
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(glad_instance) :: data
+ type(glide_global_type) :: model
+ logical, optional :: atend
+ type(glimmer_nc_output),optional,pointer :: outfiles
+ real(dp),optional :: time
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+ logical :: forcewrite=.false.
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ if (present(atend)) then
+ forcewrite = atend
+ end if
+
+ do while(associated(oc))
+#ifdef HAVE_AVG
+ if (oc%do_averages) then
+ call glad_avg_accumulate(oc,data,model)
+ end if
+#endif
+ call glimmer_nc_checkwrite(oc,model,forcewrite,time)
+ if (oc%nc%just_processed) then
+ ! write standard variables
+ call glad_io_write(oc,data)
+#ifdef HAVE_AVG
+ if (oc%do_averages) then
+ call glad_avg_reset(oc,data)
+ end if
+#endif
+ end if
+ oc=>oc%next
+ end do
+ end subroutine glad_io_writeall
+
+ subroutine glad_io_create(outfile,model,data)
+ use parallel
+ use glide_types
+ use glad_type
+ use glimmer_ncdf
+ use glimmer_ncio
+ use glimmer_map_types
+ use glimmer_log
+ use glimmer_paramets
+ use glimmer_scales
+ use glimmer_log
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ type(glide_global_type) :: model
+ type(glad_instance) :: data ! MJH 10/21/13: Making 'data' mandatory. See note below
+
+ integer status,varid,pos
+
+ ! MJH 10/21/13: Local variables needed for checking if a variable is enabled.
+ real(dp) :: tavgf
+ integer :: up
+
+ integer :: time_dimid
+ integer :: x1_dimid
+ integer :: y1_dimid
+
+ ! defining dimensions
+ status = parallel_inq_dimid(NCO%id,'time',time_dimid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_inq_dimid(NCO%id,'x1',x1_dimid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_inq_dimid(NCO%id,'y1',y1_dimid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ ! Expanding restart variables: if 'restart' or 'hot' is present, we remove that
+ ! word from the variable list, and flip the restartfile flag.
+ ! In CISM 2.0, 'restart' is the preferred name to represent restart variables,
+ ! but 'hot' is supported for backward compatibility. Thus, we check for both.
+ NCO%vars = ' '//trim(adjustl(NCO%vars))//' ' ! Need to maintain a space at beginning and end of list
+ ! expanding restart variables
+ pos = index(NCO%vars,' restart ')
+ if (pos.ne.0) then
+ NCO%vars = NCO%vars(:pos)//NCO%vars(pos+8:)
+ NCO%restartfile = .true.
+ end if
+ pos = index(NCO%vars,' hot ')
+ if (pos.ne.0) then
+ NCO%vars = NCO%vars(:pos)//NCO%vars(pos+4:)
+ NCO%restartfile = .true.
+ end if
+ ! Now apply necessary changes if the file is a restart file.
+ if (NCO%restartfile) then
+ if ((len_trim(NCO%vars) + len_trim(restart_variable_list) + 2) >= len(NCO%vars) ) then
+ call write_log('Adding restart variables has made the list of output variables too long for file ' // NCO%filename, GM_FATAL)
+ else
+ ! Expand the restart variable list
+ ! Need to maintain a space at beginning and end of list
+ NCO%vars = trim(NCO%vars) // ' ' // trim(restart_variable_list) // ' ' ! (a module variable)
+ ! Set the xtype to be double (required for an exact restart)
+ outfile%default_xtype = NF90_DOUBLE
+ endif
+ end if
+
+ ! Convert temp and flwa to versions on stag grid, if needed
+ ! Note: this check must occur after restart variables are expanded which happens in glimmer_nc_readparams
+ call check_for_tempstag(model%options%whichdycore,NCO)
+
+ ! checking if we need to handle time averages
+ pos = index(NCO%vars,"_tavg")
+ if (pos.ne.0) then
+ outfile%do_averages = .True.
+ end if
+
+ ! Now that the output variable list is finalized, make sure we aren't truncating what the user intends to be output.
+ ! Note: this only checks that the text in the variable list does not extend to within one character of the end of the variable.
+ ! It does not handle the case where the user exactly fills the allowable length with variables or has a too-long list with more than one space between variable names.
+ if ((len_trim(NCO%vars) + 1 ) >= len(NCO%vars)) then
+ call write_log('The list of output variables is too long for file ' // NCO%filename, GM_FATAL)
+ endif
+
+
+ ! MJH, 10/21/13: In the auto-generated code below, the creation of each output variable is wrapped by a check if the data for that
+ ! variable has a size greater than 0. This is because of recently added checks in glide_types.F90 that don't fully allocate
+ ! some variables if certain model options are disabled. This is to lower memory requirements while running the model.
+ ! The reason they have to be allocated with size zero rather than left unallocated is because the data for
+ ! some netCDF output variables is defined with math, which causes an error if the operands are unallocated.
+ ! Note that if a variable is not created, then it will not be subsequently written to.
+ ! Also note that this change requires that data be a mandatory argument to this subroutine.
+
+ ! Some output variables will need tavgf. The value does not matter, but it must exist.
+ ! Nonetheless, for completeness give it the proper value that it has in glad_io_write.
+ tavgf = outfile%total_time
+ if (tavgf.ne.0.d0) then
+ tavgf = 1.d0/tavgf
+ end if
+ ! Similarly, some output variables use the variable up. Give it value of 0 here.
+ up = 0
+
+ ! hflx_tavg -- heat flux to ice surface
+ pos = index(NCO%vars,' hflx_tavg ')
+ status = parallel_inq_varid(NCO%id,'hflx_tavg',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+9) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%hflx_tavg)) then
+ call write_log('Creating variable hflx_tavg')
+ status = parallel_def_var(NCO%id,'hflx_tavg',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'heat flux to ice surface')
+ status = parallel_put_att(NCO%id, varid, 'units', 'W m-2')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable hflx_tavg was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! lat -- latitude
+ pos = index(NCO%vars,' lat ')
+ status = parallel_inq_varid(NCO%id,'lat',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+3) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%lat)) then
+ call write_log('Creating variable lat')
+ status = parallel_def_var(NCO%id,'lat',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'latitude')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'latitude')
+ status = parallel_put_att(NCO%id, varid, 'units', 'degreeN')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable lat was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! lon -- longitude
+ pos = index(NCO%vars,' lon ')
+ status = parallel_inq_varid(NCO%id,'lon',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+3) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%lon)) then
+ call write_log('Creating variable lon')
+ status = parallel_def_var(NCO%id,'lon',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'longitude')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'longitude')
+ status = parallel_put_att(NCO%id, varid, 'units', 'degreeE')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable lon was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! rofi_tavg -- solid calving flux
+ pos = index(NCO%vars,' rofi_tavg ')
+ status = parallel_inq_varid(NCO%id,'rofi_tavg',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+9) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%rofi_tavg)) then
+ call write_log('Creating variable rofi_tavg')
+ status = parallel_def_var(NCO%id,'rofi_tavg',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'solid calving flux')
+ status = parallel_put_att(NCO%id, varid, 'units', 'kg m-2 s-1')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable rofi_tavg was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! rofl_tavg -- liquid runoff flux
+ pos = index(NCO%vars,' rofl_tavg ')
+ status = parallel_inq_varid(NCO%id,'rofl_tavg',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+9) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%rofl_tavg)) then
+ call write_log('Creating variable rofl_tavg')
+ status = parallel_def_var(NCO%id,'rofl_tavg',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'liquid runoff flux')
+ status = parallel_put_att(NCO%id, varid, 'units', 'kg m-2 s-1')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable rofl_tavg was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ end subroutine glad_io_create
+
+ subroutine glad_io_write(outfile,data)
+ use parallel
+ use glad_type
+ use glimmer_ncdf
+ use glimmer_paramets
+ use glimmer_scales
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(glad_instance) :: data
+ ! the model instance
+
+ ! local variables
+ real(dp) :: tavgf
+ integer status, varid
+ integer up
+
+ tavgf = outfile%total_time
+ if (tavgf.ne.0.d0) then
+ tavgf = 1.d0/tavgf
+ end if
+
+ ! write variables
+ status = parallel_inq_varid(NCO%id,'hflx_tavg',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%hflx_tavg, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'lat',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%lat, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'lon',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%lon, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'rofi_tavg',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%rofi_tavg, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'rofl_tavg',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%rofl_tavg, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ end subroutine glad_io_write
+
+
+ subroutine glad_add_to_restart_variable_list(vars_to_add)
+ ! This subroutine adds variables to the list of variables needed for a restart.
+ ! It is a public subroutine that allows other parts of the model to modify the list,
+ ! which is a module level variable. MJH 1/17/2013
+
+ use glimmer_log
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+ character(len=*), intent (in) :: vars_to_add ! list of variable(s) to be added to the list of restart variables
+ !character(*), intent (inout) :: restart_variable_list ! list of variables needed to perform an exact restart - module variable
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ !------------------------------------------------------------------------------------
+
+ ! Add the variables to the list so long as they don't make the list too long.
+ if ( (len_trim(restart_variable_list) + 1 + len_trim(vars_to_add)) > len(restart_variable_list)) then
+ call write_log('Adding restart variables has made the restart variable list too long.',GM_FATAL)
+ else
+ restart_variable_list = trim(adjustl(restart_variable_list)) // ' ' // trim(vars_to_add)
+ !call write_log('Adding to glad restart variable list: ' // trim(vars_to_add) )
+ endif
+
+ end subroutine glad_add_to_restart_variable_list
+
+
+ ! Functions for the interface 'is_enabled'. These are needed by the auto-generated code in glad_io_create
+ ! to determine if a variable is 'turned on', and should be written.
+
+ function is_enabled_0dint(var)
+ integer, intent(in) :: var
+ logical :: is_enabled_0dint
+ is_enabled_0dint = .true. ! scalars are always enabled
+ return
+ end function is_enabled_0dint
+
+ function is_enabled_1dint(var)
+ integer, dimension(:), pointer, intent(in) :: var
+ logical :: is_enabled_1dint
+ if (associated(var)) then
+ is_enabled_1dint = .true.
+ else
+ is_enabled_1dint = .false.
+ endif
+ return
+ end function is_enabled_1dint
+
+ function is_enabled_2dint(var)
+ integer, dimension(:,:), pointer, intent(in) :: var
+ logical :: is_enabled_2dint
+ if (associated(var)) then
+ is_enabled_2dint = .true.
+ else
+ is_enabled_2dint = .false.
+ endif
+ return
+ end function is_enabled_2dint
+
+ function is_enabled_0dreal(var)
+ real(dp), intent(in) :: var
+ logical :: is_enabled_0dreal
+ is_enabled_0dreal = .true. ! scalars are always enabled
+ return
+ end function is_enabled_0dreal
+
+ function is_enabled_1dreal(var)
+ real(dp), dimension(:), pointer, intent(in) :: var
+ logical :: is_enabled_1dreal
+ if (associated(var)) then
+ is_enabled_1dreal = .true.
+ else
+ is_enabled_1dreal = .false.
+ endif
+ return
+ end function is_enabled_1dreal
+
+ function is_enabled_2dreal(var)
+ real(dp), dimension(:,:), pointer, intent(in) :: var
+ logical :: is_enabled_2dreal
+ if (associated(var)) then
+ is_enabled_2dreal = .true.
+ else
+ is_enabled_2dreal = .false.
+ endif
+ return
+ end function is_enabled_2dreal
+
+ function is_enabled_3dreal(var)
+ real(dp), dimension(:,:,:), pointer, intent(in) :: var
+ logical :: is_enabled_3dreal
+ if (associated(var)) then
+ is_enabled_3dreal = .true.
+ else
+ is_enabled_3dreal = .false.
+ endif
+ return
+ end function is_enabled_3dreal
+
+
+ !*****************************************************************************
+ ! netCDF input
+ !*****************************************************************************
+ subroutine glad_io_readall(data, model, filetype)
+ ! read from netCDF file
+ use glad_type
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(glad_instance) :: data
+ type(glide_global_type) :: model
+ integer, intent(in), optional :: filetype ! 0 for input, 1 for forcing; defaults to input
+
+ ! local variables
+ type(glimmer_nc_input), pointer :: ic
+ integer :: filetype_local
+
+ if (present(filetype)) then
+ filetype_local = filetype
+ else
+ filetype_local = 0 ! default to input type
+ end if
+
+ if (filetype_local == 0) then
+ ic=>model%funits%in_first
+ else
+ ic=>model%funits%frc_first
+ endif
+ do while(associated(ic))
+ call glimmer_nc_checkread(ic,model)
+ if (ic%nc%just_processed) then
+ call glad_io_read(ic,data)
+ end if
+ ic=>ic%next
+ end do
+ end subroutine glad_io_readall
+
+
+ subroutine glad_read_forcing(data, model)
+ ! Read data from forcing files
+ use glimmer_log
+ use glide_types
+ use glimmer_ncdf
+
+ implicit none
+ type(glad_instance) :: data
+ type(glide_global_type), intent(inout) :: model
+
+ ! Locals
+ type(glimmer_nc_input), pointer :: ic
+ integer :: t
+ real(dp) :: eps ! a tolerance to use for stepwise constant forcing
+
+ ! Make eps a fraction of the time step.
+ eps = model%numerics%tinc * 1.0d-4
+
+ ! read forcing files
+ ic=>model%funits%frc_first
+ do while(associated(ic))
+
+ !print *, 'possible forcing times', ic%times
+
+ ! Find the current time in the file
+ do t = ic%nt, 1, -1 ! look through the time array backwards
+ if ( ic%times(t) <= model%numerics%time + eps) then
+ ! use the largest time that is smaller or equal to the current time (stepwise forcing)
+
+ ! Set the desired time to be read
+ ic%current_time = t
+ !print *, 'time, forcing index, forcing time', model%numerics%time, ic%current_time, ic%times(ic%current_time)
+ exit ! once we find the time, exit the loop
+ endif
+ end do
+
+ ! read all forcing fields present in this file for the time specified above
+ ic%nc%just_processed = .false. ! set this to false so it will be re-processed every time through - this ensures info gets written to the log, and that time levels don't get skipped.
+ call glad_io_readall(data, model, filetype=1)
+
+ ! move on to the next forcing file
+ ic=>ic%next
+ end do
+
+ end subroutine glad_read_forcing
+
+
+!------------------------------------------------------------------------------
+
+
+ subroutine glad_io_read(infile,data)
+ ! read variables from a netCDF file
+ use parallel
+ use glimmer_log
+ use glimmer_ncdf
+ use glad_type
+ use glimmer_paramets
+ use glimmer_scales
+ implicit none
+ type(glimmer_nc_input), pointer :: infile
+ ! structure containg output netCDF descriptor
+ type(glad_instance) :: data
+ ! the model instance
+
+ ! local variables
+ integer status,varid
+ integer up
+ real(dp) :: scaling_factor
+
+ ! read variables
+ status = parallel_inq_varid(NCI%id,'hflx_tavg',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%hflx_tavg)) then
+ call write_log(' Loading hflx_tavg')
+ status = distributed_get_var(NCI%id, varid, &
+ data%hflx_tavg, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling hflx_tavg",GM_DIAGNOSTIC)
+ data%hflx_tavg = data%hflx_tavg*scaling_factor
+ end if
+ else
+ call write_log('Variable hflx_tavg was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'lat',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%lat)) then
+ call write_log(' Loading lat')
+ status = distributed_get_var(NCI%id, varid, &
+ data%lat, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling lat",GM_DIAGNOSTIC)
+ data%lat = data%lat*scaling_factor
+ end if
+ else
+ call write_log('Variable lat was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'lon',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%lon)) then
+ call write_log(' Loading lon')
+ status = distributed_get_var(NCI%id, varid, &
+ data%lon, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling lon",GM_DIAGNOSTIC)
+ data%lon = data%lon*scaling_factor
+ end if
+ else
+ call write_log('Variable lon was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'rofi_tavg',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%rofi_tavg)) then
+ call write_log(' Loading rofi_tavg')
+ status = distributed_get_var(NCI%id, varid, &
+ data%rofi_tavg, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling rofi_tavg",GM_DIAGNOSTIC)
+ data%rofi_tavg = data%rofi_tavg*scaling_factor
+ end if
+ else
+ call write_log('Variable rofi_tavg was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'rofl_tavg',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%rofl_tavg)) then
+ call write_log(' Loading rofl_tavg')
+ status = distributed_get_var(NCI%id, varid, &
+ data%rofl_tavg, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling rofl_tavg",GM_DIAGNOSTIC)
+ data%rofl_tavg = data%rofl_tavg*scaling_factor
+ end if
+ else
+ call write_log('Variable rofl_tavg was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ end subroutine glad_io_read
+
+ subroutine glad_io_checkdim(infile,model,data)
+ ! check if dimension sizes in file match dims of model
+ use parallel
+ use glimmer_log
+ use glimmer_ncdf
+ use glide_types
+ use glad_type
+ implicit none
+ type(glimmer_nc_input), pointer :: infile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: model
+ type(glad_instance), optional :: data
+
+ integer status,dimid,dimsize
+ character(len=150) message
+
+ ! check dimensions
+ end subroutine glad_io_checkdim
+
+ !*****************************************************************************
+ ! calculating time averages
+ !*****************************************************************************
+#ifdef HAVE_AVG
+ subroutine glad_avg_accumulate(outfile,data,model)
+ use parallel
+ use glide_types
+ use glad_type
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: model
+ type(glad_instance) :: data
+
+ ! local variables
+ real(dp) :: factor
+ integer status, varid
+
+ ! increase total time
+ outfile%total_time = outfile%total_time + model%numerics%tinc
+ factor = model%numerics%tinc
+
+ end subroutine glad_avg_accumulate
+
+ subroutine glad_avg_reset(outfile,data)
+ use parallel
+ use glad_type
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(glad_instance) :: data
+
+ ! local variables
+ integer status, varid
+
+ ! reset total time
+ outfile%total_time = 0.d0
+
+ end subroutine glad_avg_reset
+#endif
+
+ !*********************************************************************
+ ! some private procedures
+ !*********************************************************************
+
+ !> apply default type to be used in netCDF file
+ integer function get_xtype(outfile,xtype)
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile !< derived type holding information about output file
+ integer, intent(in) :: xtype !< the external netCDF type
+
+ get_xtype = xtype
+
+ if (xtype.eq.NF90_REAL .and. outfile%default_xtype.eq.NF90_DOUBLE) then
+ get_xtype = NF90_DOUBLE
+ end if
+ if (xtype.eq.NF90_DOUBLE .and. outfile%default_xtype.eq.NF90_REAL) then
+ get_xtype = NF90_REAL
+ end if
+ end function get_xtype
+
+ !*********************************************************************
+ ! lots of accessor subroutines follow
+ !*********************************************************************
+ subroutine glad_get_lat(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glad_type
+ implicit none
+ type(glad_instance) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = data%lat
+ end subroutine glad_get_lat
+
+ subroutine glad_set_lat(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glad_type
+ implicit none
+ type(glad_instance) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%lat = inarray
+ end subroutine glad_set_lat
+
+ subroutine glad_get_lon(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glad_type
+ implicit none
+ type(glad_instance) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = data%lon
+ end subroutine glad_get_lon
+
+ subroutine glad_set_lon(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glad_type
+ implicit none
+ type(glad_instance) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%lon = inarray
+ end subroutine glad_set_lon
+
+
+end module glad_io
diff --git a/components/cism/glimmer-cism/libglad/glad_main.F90 b/components/cism/glimmer-cism/libglad/glad_main.F90
new file mode 100644
index 0000000000..cced320b86
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_main.F90
@@ -0,0 +1,921 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glad_main.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glad_main
+
+ ! This module provides an interface to GCMs in the case where fields have already been
+ ! downscaled to the ice sheet grid (and the GCM does its own upscaling from the ice
+ ! sheet grid to the land grid).
+ !
+ ! This only provides code for the SMB case, not for the PDD case.
+
+ use glimmer_global, only: dp, fname_length
+ use glad_type
+ use glad_constants
+ use glimmer_config
+ use glimmer_filenames, only : process_path
+ use parallel, only: main_task
+ use glad_input_averages, only : get_av_start_time, accumulate_averages, &
+ calculate_averages, reset_glad_input_averages
+
+ use glimmer_paramets, only: stdout, GLC_DEBUG
+
+ implicit none
+ private
+
+ ! ------------------------------------------------------------
+ ! glad_params derived type definition
+ ! This is where default values are set.
+ ! ------------------------------------------------------------
+
+ type, public :: glad_params
+
+ !> Derived type containing parameters relevant to all instances of
+ !> the model - i.e. those parameters which pertain to the global model.
+
+ ! Ice model instances --------------------------------------
+
+ integer :: ninstances = 1 !> Number of ice model instances
+ character(fname_length),pointer,dimension(:) :: config_fnames => null() ! array of config filenames
+ type(glad_instance),pointer,dimension(:) :: instances => null() !> Array of glimmer\_instances
+
+ ! Global model parameters ----------------------------------
+
+ integer :: tstep_mbal = 1 !> Mass-balance timestep (hours)
+ integer :: start_time !> Time of first call to glad (hours)
+ integer :: time_step !> Calling timestep of global model (hours)
+
+ ! Parameters that can be set by the GCM calling Glad
+
+ logical :: gcm_restart = .false. !> If true, restart the model from a GCM restart file
+ character(fname_length) :: gcm_restart_file !> Name of restart file
+ integer :: gcm_fileunit = 99 !> Fileunit specified by GCM for reading config files
+
+ end type glad_params
+
+ !---------------------------------------------------------------------------------------
+ ! Use of the routines here:
+ !
+ ! NOTE(wjs, 2015-03-24) I think this is going to need some rework in order to handle
+ ! multiple instances the way I'm planning to do it in CESM, with the coupler managing
+ ! these multiple instances: I think we're going to want a totally separate glad
+ ! instance for each ice sheet instance. Then some of these initialization routines
+ ! could be combined.
+ !
+ ! In model initialization:
+ ! - Call glad_initialize once
+ ! - Call glad_initialize_instance once per instance
+ ! - Call glad_get_grid_size once per instance
+ ! (this is needed so that the caller can allocate arrays appropriately)
+ ! - Call glad_get_initial_outputs once per instance
+ ! - Call glad_initialization_wrapup once
+ !
+ ! In the model run loop:
+ ! - Call glad_gcm once per instance
+ !---------------------------------------------------------------------------------------
+
+ public :: glad_initialize
+ public :: glad_initialize_instance
+ public :: glad_get_grid_size
+ public :: glad_get_initial_outputs
+ public :: glad_initialization_wrapup
+
+ public :: glad_get_grid_indices
+ public :: glad_get_lat_lon
+ public :: glad_get_areas
+
+ public :: glad_gcm
+
+ public :: end_glad
+
+ !---------------------------------------------------------------------------------------
+ ! Some notes on coupling to the Community Earth System Model (CESM). These may be applicable
+ ! for coupling to other GCMs:
+ !
+ ! When coupled to CESM, Glad receives two fields from the coupler on the ice sheet grid:
+ ! qsmb = surface mass balance (kg/m^2/s)
+ ! tsfc = surface ground temperature (deg C)
+ ! Both qsmb and tsfc are computed in the CESM land model.
+ ! Seven fields are returned to CESM on the ice sheet grid:
+ ! ice_covered = whether a grid cell is ice-covered [0,1]
+ ! topo = surface elevation (m)
+ ! hflx = heat flux from the ice interior to the surface (W/m^2)
+ ! rofi = ice runoff (i.e., calving) (kg/m^2/s)
+ ! rofl = liquid runoff (i.e., basal melting; the land model handles sfc runoff) (kg/m^2/s)
+ ! ice_sheet_grid_mask = mask of ice sheet grid coverage
+ ! icemask_coupled_fluxes = mask of ice sheet grid coverage where we are potentially
+ ! sending non-zero fluxes
+ !
+ ! Note about ice_sheet_grid_mask and icemask_coupled_fluxes: ice_sheet_grid_mask is
+ ! non-zero wherever CISM is operating - i.e., grid cells with icesheet or bare land (but
+ ! not ocean). icemask_coupled_fluxes is similar, but is 0 for icesheet instances that
+ ! have zero_gcm_fluxes = .true. Thus, icemask_coupled_fluxes can be used to determine
+ ! the regions of the world in which CISM is operating and potentially sending non-zero
+ ! fluxes to the climate model.
+ !
+ ! The land model has the option to update its ice coverage and surface elevation, given
+ ! the fields returned from Glad.
+ !
+ !---------------------------------------------------------------------------------------
+
+contains
+
+ subroutine glad_initialize(params, time_step, paramfile, daysinyear, start_time, &
+ gcm_restart, gcm_restart_file, gcm_debug, gcm_fileunit)
+
+ ! Initialize the model for runs coupled to a GCM. This routine initializes variables
+ ! shared between instances. See above for documentation of the full initialization
+ ! sequence.
+
+ ! Subroutine argument declarations --------------------------------------------------------
+
+ type(glad_params), intent(inout) :: params !> parameters to be set
+ integer, intent(in) :: time_step !> Timestep of calling model (hours)
+ character(*),dimension(:), intent(in) :: paramfile !> array of configuration filenames.
+ integer, optional,intent(in) :: daysinyear !> Number of days in the year
+ integer, optional,intent(in) :: start_time !> Time of first call to glad (hours)
+ logical, optional,intent(in) :: gcm_restart ! logical flag to restart from a GCM restart file
+ character(*), optional,intent(in) :: gcm_restart_file ! restart filename for a GCM restart
+ ! (currently assumed to be CESM)
+ logical, optional,intent(in) :: gcm_debug ! logical flag from GCM to output debug information
+ integer, optional,intent(in) :: gcm_fileunit! fileunit for reading config files
+
+ ! Internal variables -----------------------------------------------------------------------
+
+ type(ConfigSection), pointer :: global_config
+
+ ! Begin subroutine code --------------------------------------------------------------------
+
+
+ if (present(gcm_debug)) then
+ GLC_DEBUG = gcm_debug
+ endif
+
+ if (GLC_DEBUG .and. main_task) then
+ write(stdout,*) 'Initializing glad'
+ end if
+
+ ! Initialise start time and calling model time-step (time_step = integer number of hours)
+ ! We ignore t=0 by default
+
+ params%time_step = time_step
+
+ ! Note: start_time = nhour_glad = 0 for an initial run.
+ ! Does this create problems given that Glad convention is to ignore t = 0?
+
+ if (present(start_time)) then
+ params%start_time = start_time
+ else
+ params%start_time = time_step
+ end if
+
+ params%gcm_restart = .false.
+ if (present(gcm_restart)) then
+ params%gcm_restart = gcm_restart
+ endif
+
+ params%gcm_restart_file = ''
+ if (present(gcm_restart_file)) then
+ params%gcm_restart_file = gcm_restart_file
+ endif
+
+ params%gcm_fileunit = 99
+ if (present(gcm_fileunit)) then
+ params%gcm_fileunit = gcm_fileunit
+ endif
+
+ if (GLC_DEBUG .and. main_task) then
+ write(stdout,*) 'time_step =', params%time_step
+ write(stdout,*) 'start_time =', params%start_time
+ end if
+
+ ! Initialise year-length -------------------------------------------------------------------
+
+ if (present(daysinyear)) then
+ call glad_set_year_length(daysinyear)
+ end if
+
+ ! ---------------------------------------------------------------
+ ! Determine how many instances there are, according to what
+ ! configuration files we've been provided with
+ ! ---------------------------------------------------------------
+
+ if (GLC_DEBUG .and. main_task) then
+ write(stdout,*) 'Read paramfile'
+ write(stdout,*) 'paramfile =', paramfile
+ end if
+
+ if (size(paramfile) == 1) then
+ ! Load the configuration file into the linked list
+ call ConfigRead(process_path(paramfile(1)), global_config, params%gcm_fileunit)
+ ! Parse the list
+ call glad_readconfig(global_config, params%ninstances, params%config_fnames, paramfile)
+ else
+ params%ninstances = size(paramfile)
+ allocate(params%config_fnames(params%ninstances))
+ params%config_fnames(:) = paramfile(:)
+ end if
+
+ allocate(params%instances(params%ninstances))
+
+ if (GLC_DEBUG .and. main_task) then
+ write(stdout,*) 'Number of instances =', params%ninstances
+ end if
+
+ end subroutine glad_initialize
+
+ !===================================================================
+
+ subroutine glad_initialize_instance(params, instance_index)
+
+ ! Initialize one instance in the params structure. See above for documentation of
+ ! the full initialization sequence.
+
+ use glad_initialise, only : glad_i_initialise_gcm
+
+ ! Subroutine argument declarations --------------------------------------------------------
+
+ type(glad_params), intent(inout) :: params !> parameters to be set
+ integer, intent(in) :: instance_index !> index of current ice sheet instance
+
+ ! Internal variables -----------------------------------------------------------------------
+
+ type(ConfigSection), pointer :: instance_config
+
+ ! Begin subroutine code --------------------------------------------------------------------
+
+ if (GLC_DEBUG .and. main_task) then
+ write(stdout,*) 'Read config file and initialize instance #', instance_index
+ end if
+
+ call ConfigRead(process_path(params%config_fnames(instance_index)),&
+ instance_config, params%gcm_fileunit)
+
+ call glad_i_initialise_gcm(instance_config, params%instances(instance_index), &
+ params%start_time, params%time_step, &
+ params%gcm_restart, params%gcm_restart_file, &
+ params%gcm_fileunit )
+
+ end subroutine glad_initialize_instance
+
+ !===================================================================
+
+ subroutine glad_get_grid_size(params, instance_index, &
+ ewn, nsn, npts, &
+ ewn_tot, nsn_tot, npts_tot)
+
+ ! Get the size of a grid corresponding to this instance.
+ !
+ ! Returns both the size of local arrays (ewn, nsn, npts) and the size of global arrays
+ ! (ewn_tot, nsn_tot, npts_tot).
+ !
+ ! The size is returned withOUT halo cells - note that the other routines here assume
+ ! that inputs and outputs do not have halo cells.
+ !
+ ! The caller can then allocate arrays (inputs to and outputs from glad) with size
+ ! (ewn, nsn).
+
+ use parallel, only : own_ewn, own_nsn, global_ewn, global_nsn
+
+ type(glad_params), intent(in) :: params
+ integer, intent(in) :: instance_index ! index of current ice sheet instance
+ integer, intent(out) :: ewn ! number of east-west points owned by this proc (first dimension of arrays)
+ integer, intent(out) :: nsn ! number of north-south points owned by this proc (second dimension of arrays)
+ integer, intent(out) :: npts ! total number of points owned by this proc
+ integer, intent(out) :: ewn_tot ! total number of east-west points in grid
+ integer, intent(out) :: nsn_tot ! total number of north-south points in grid
+ integer, intent(out) :: npts_tot ! total number of points in grid
+
+ ewn = own_ewn
+ nsn = own_nsn
+ npts = ewn * nsn
+
+ ewn_tot = global_ewn
+ nsn_tot = global_nsn
+ npts_tot = ewn_tot * nsn_tot
+
+ end subroutine glad_get_grid_size
+
+ !===================================================================
+
+ subroutine glad_get_initial_outputs(params, instance_index, &
+ ice_covered, topo, &
+ rofi, rofl, hflx, &
+ ice_sheet_grid_mask, &
+ icemask_coupled_fluxes, &
+ output_flag)
+
+ ! Get initial outputs for one instance. See above for documentation of the full
+ ! initialization sequence.
+ !
+ ! Output arrays are assumed to NOT have halo cells.
+
+ ! Subroutine argument declarations --------------------------------------------------------
+
+ type(glad_params), intent(in) :: params
+ integer, intent(in) :: instance_index !> index of current ice sheet instance
+
+ real(dp),dimension(:,:),intent(out) :: ice_covered ! whether each grid cell is ice-covered [0,1]
+ real(dp),dimension(:,:),intent(out) :: topo ! output surface elevation (m)
+ real(dp),dimension(:,:),intent(out) :: hflx ! output heat flux (W/m^2, positive down)
+ real(dp),dimension(:,:),intent(out) :: rofi ! output ice runoff (kg/m^2/s = mm H2O/s)
+ real(dp),dimension(:,:),intent(out) :: rofl ! output liquid runoff (kg/m^2/s = mm H2O/s)
+ real(dp),dimension(:,:),intent(out) :: ice_sheet_grid_mask !mask of ice sheet grid coverage
+ real(dp),dimension(:,:),intent(out) :: icemask_coupled_fluxes !mask of ice sheet grid coverage where we are potentially sending non-zero fluxes
+
+ logical, optional,intent(out) :: output_flag !> Flag to show output set (provided for consistency)
+
+ ! Begin subroutine code --------------------------------------------------------------------
+
+ call glad_set_output_fields(params%instances(instance_index), &
+ ice_covered, topo, rofi, rofl, hflx, &
+ ice_sheet_grid_mask, icemask_coupled_fluxes)
+
+ if (present(output_flag)) output_flag = .true.
+
+ end subroutine glad_get_initial_outputs
+
+ !===================================================================
+
+ subroutine glad_initialization_wrapup(params, ice_dt)
+
+ type(glad_params), intent(inout) :: params !> parameters to be set
+ integer, optional,intent(out) :: ice_dt !> Ice dynamics time-step in hours
+
+ ! Wrapup glad initialization - perform error checks, etc. See above for documentation
+ ! of the full initialization sequence
+
+ ! Check that all mass-balance time-steps are the same length and
+ ! assign that value to the top-level variable
+
+ params%tstep_mbal = check_mbts(params%instances(:)%mbal_tstep)
+
+ if (present(ice_dt)) then
+ ice_dt = check_mbts(params%instances(:)%ice_tstep)
+ end if
+
+ if (GLC_DEBUG .and. main_task) then
+ write(stdout,*) 'tstep_mbal =', params%tstep_mbal
+ write(stdout,*) 'start_time =', params%start_time
+ write(stdout,*) 'time_step =', params%time_step
+ if (present(ice_dt)) write(stdout,*) 'ice_dt =', ice_dt
+ end if
+
+ ! Check time-steps divide into one another appropriately.
+
+ if (.not. (mod (params%tstep_mbal, params%time_step) == 0)) then
+ call write_log('The mass-balance timestep must be an integer multiple of the forcing time-step', &
+ GM_FATAL,__FILE__,__LINE__)
+ end if
+
+
+ end subroutine glad_initialization_wrapup
+
+ !===================================================================
+
+ subroutine glad_get_grid_indices(params, instance_index, &
+ global_indices, local_indices)
+
+ ! Get 1-d indices for each grid cell.
+ !
+ ! The global indices are unique across all tasks (i.e., the global grid). The local
+ ! indices go from 1 .. ncells on each task. The global indices increase going from
+ ! left to right, and then from bottom to top. So the indices for the bottom
+ ! (southernmost) row go 1 .. (# east-west points), etc. The local indices go in the
+ ! same order.
+ !
+ ! The global_indices and local_indices arrays should NOT include halo cells. The
+ ! returned indices also ignore halo cells.
+
+ use parallel, only : own_ewn, own_nsn, global_row_offset, global_col_offset, global_ewn
+
+ ! Subroutine argument declarations --------------------------------------------------------
+
+ type(glad_params), intent(in) :: params
+ integer, intent(in) :: instance_index ! index of current ice sheet index
+ integer, intent(out) :: global_indices(:,:)
+ integer, intent(out) :: local_indices(:,:)
+
+ ! Internal variables -----------------------------------------------------------------------
+
+ integer :: own_points ! number of points this proc is responsible for
+ integer, allocatable :: counts(:) ! count number of times each local index has been set
+ integer :: local_row, local_col
+ integer :: global_row, global_col
+ integer :: local_index, global_index
+ character(len=*), parameter :: subname = 'glad_get_grid_indices'
+
+ ! Begin subroutine code --------------------------------------------------------------------
+
+ ! Perform error checking on inputs
+
+ if (size(global_indices, 1) /= own_ewn .or. size(global_indices, 2) /= own_nsn) then
+ call write_log(subname // ' ERROR: Wrong size for global_indices', &
+ GM_FATAL, __FILE__, __LINE__)
+ end if
+
+ if (size(local_indices, 1) /= own_ewn .or. size(local_indices, 2) /= own_nsn) then
+ call write_log(subname // ' ERROR: Wrong size for local_indices', &
+ GM_FATAL, __FILE__, __LINE__)
+ end if
+
+ ! Set global and local indices
+
+ own_points = own_ewn * own_nsn
+ allocate(counts(own_points))
+ counts(:) = 0
+
+ do local_row = 1, own_nsn
+ do local_col = 1, own_ewn
+ local_index = (local_row - 1)*own_ewn + local_col
+ if (local_index < 1 .or. local_index > own_points) then
+ write(stdout,*) subname//' ERROR: local_index out of bounds: ', &
+ local_index, own_points
+ call write_log(subname // ' ERROR: local_index out of bounds', &
+ GM_FATAL, __FILE__, __LINE__)
+ end if
+ local_indices(local_col,local_row) = local_index
+ counts(local_index) = counts(local_index) + 1
+
+ global_row = local_row + global_row_offset
+ global_col = local_col + global_col_offset
+ global_index = (global_row - 1)*global_ewn + global_col
+ global_indices(local_col,local_row) = global_index
+ end do
+ end do
+
+ ! Make sure that each local index has been assigned exactly once
+ if (any(counts /= 1)) then
+ call write_log(subname // ' ERROR: not all local indices have been assigned exactly once', &
+ GM_FATAL, __FILE__, __LINE__)
+ end if
+
+ end subroutine glad_get_grid_indices
+
+ !===================================================================
+
+ subroutine glad_get_lat_lon(params, instance_index, &
+ lats, lons)
+
+ ! Get latitude and longitude for each grid cell
+
+ ! Output arrays do NOT have halo cells
+
+ use parallel, only : own_ewn, own_nsn, parallel_convert_haloed_to_nonhaloed
+
+ ! Subroutine argument declarations --------------------------------------------------------
+
+ type(glad_params), intent(in) :: params
+ integer, intent(in) :: instance_index ! index of current ice sheet index
+ real(dp), intent(out) :: lats(:,:) ! latitudes (degrees)
+ real(dp), intent(out) :: lons(:,:) ! longitudes (degrees)
+
+ ! Internal variables -----------------------------------------------------------------------
+ character(len=*), parameter :: subname = 'glad_get_lat_lon'
+
+ ! Begin subroutine code --------------------------------------------------------------------
+
+ ! Perform error checking on inputs
+
+ if (size(lats, 1) /= own_ewn .or. size(lats, 2) /= own_nsn) then
+ call write_log(subname // ' ERROR: Wrong size for lats', &
+ GM_FATAL, __FILE__, __LINE__)
+ end if
+
+ if (size(lons, 1) /= own_ewn .or. size(lons, 2) /= own_nsn) then
+ call write_log(subname // ' ERROR: Wrong size for lons', &
+ GM_FATAL, __FILE__, __LINE__)
+ end if
+
+ call parallel_convert_haloed_to_nonhaloed(params%instances(instance_index)%lat, lats)
+ call parallel_convert_haloed_to_nonhaloed(params%instances(instance_index)%lon, lons)
+
+ end subroutine glad_get_lat_lon
+
+ !===================================================================
+
+ subroutine glad_get_areas(params, instance_index, areas)
+
+ ! Get area of each grid cell
+
+ ! Subroutine argument declarations --------------------------------------------------------
+
+ type(glad_params), intent(in) :: params
+ integer, intent(in) :: instance_index ! index of current ice sheet index
+ real(dp), intent(out) :: areas(:,:) ! areas (m^2)
+
+ areas(:,:) = get_dns(params%instances(instance_index)%model) * &
+ get_dew(params%instances(instance_index)%model)
+
+ end subroutine glad_get_areas
+
+
+ !===================================================================
+
+ subroutine glad_gcm(params, instance_index, time, &
+ qsmb, tsfc, &
+ ice_covered, topo, &
+ rofi, rofl, hflx, &
+ ice_sheet_grid_mask, &
+ icemask_coupled_fluxes, &
+ output_flag, ice_tstep)
+
+ ! Main Glad subroutine for GCM coupling.
+ !
+ ! It does all necessary temporal averaging,
+ ! and calls the dynamic ice sheet model when required.
+ !
+ ! Input fields should be taken as means over the period since the last call.
+ ! See the user documentation for more information.
+ !
+ ! Input fields are assumed to NOT have halo cells
+
+ use glimmer_utils
+ use glad_timestep, only: glad_i_tstep_gcm
+ use glimmer_log
+ use glimmer_paramets, only: scyr
+ use parallel, only : parallel_convert_nonhaloed_to_haloed
+ use glide_types, only : get_ewn, get_nsn
+ use glad_output_fluxes, only : calculate_average_output_fluxes
+
+ implicit none
+
+ ! Subroutine argument declarations -------------------------------------------------------------
+
+ type(glad_params), intent(inout) :: params !> parameters for this run
+ integer, intent(in) :: instance_index !> index of current ice sheet instance
+ integer, intent(in) :: time !> Current model time (hours)
+
+ real(dp),dimension(:,:),intent(in) :: qsmb ! input surface mass balance of glacier ice (kg/m^2/s)
+ real(dp),dimension(:,:),intent(in) :: tsfc ! input surface ground temperature (deg C)
+
+ real(dp),dimension(:,:),intent(inout) :: ice_covered ! whether each grid cell is ice-covered [0,1]
+ real(dp),dimension(:,:),intent(inout) :: topo ! output surface elevation (m)
+ real(dp),dimension(:,:),intent(inout) :: hflx ! output heat flux (W/m^2, positive down)
+ real(dp),dimension(:,:),intent(inout) :: rofi ! output ice runoff (kg/m^2/s = mm H2O/s)
+ real(dp),dimension(:,:),intent(inout) :: rofl ! output liquid runoff (kg/m^2/s = mm H2O/s)
+ real(dp),dimension(:,:),intent(inout) :: ice_sheet_grid_mask !mask of ice sheet grid coverage
+ real(dp),dimension(:,:),intent(inout) :: icemask_coupled_fluxes !mask of ice sheet grid coverage where we are potentially sending non-zero fluxes
+
+ logical,optional,intent(out) :: output_flag ! Set true if outputs are set
+ logical,optional,intent(out) :: ice_tstep ! Set when an ice dynamic timestep has been done
+ ! and new output is available
+
+ ! Internal variables ----------------------------------------------------------------------------
+
+ integer :: ewn,nsn ! dimensions of local grid
+
+ ! version of input fields with halo cells
+ real(dp),dimension(:,:),allocatable :: qsmb_haloed
+ real(dp),dimension(:,:),allocatable :: tsfc_haloed
+
+ logical :: icets
+ character(250) :: message
+
+ integer :: av_start_time ! value of time from the last occasion averaging was restarted (hours)
+
+ ! Begin subroutine code --------------------------------------------------------------------
+
+ ! Reset output flag
+
+ if (present(output_flag)) output_flag = .false.
+ if (present(ice_tstep)) ice_tstep = .false.
+
+ ! Accumulate input fields for later averaging
+
+ ewn = get_ewn(params%instances(instance_index)%model)
+ nsn = get_nsn(params%instances(instance_index)%model)
+ allocate(qsmb_haloed(ewn,nsn))
+ allocate(tsfc_haloed(ewn,nsn))
+ call parallel_convert_nonhaloed_to_haloed(qsmb, qsmb_haloed)
+ call parallel_convert_nonhaloed_to_haloed(tsfc, tsfc_haloed)
+
+ call accumulate_averages(params%instances(instance_index)%glad_inputs, &
+ qsmb = qsmb_haloed, tsfc = tsfc_haloed, time = time)
+
+ ! ---------------------------------------------------------
+ ! If this is a mass balance timestep, prepare global fields, and do a timestep
+ ! for each model instance
+ ! ---------------------------------------------------------
+
+ av_start_time = get_av_start_time(params%instances(instance_index)%glad_inputs)
+
+ if (mod (time - av_start_time, params%time_step) /= 0) then
+
+ write(message,*) 'Unexpected calling of GLAD at time ', time
+ call write_log(message,GM_FATAL,__FILE__,__LINE__)
+
+ else if (time - av_start_time + params%time_step > params%tstep_mbal) then
+
+ write(message,*) &
+ 'Incomplete forcing of GLAD mass-balance time-step detected at time ', time
+ call write_log(message,GM_FATAL,__FILE__,__LINE__)
+
+ else if (time - av_start_time + params%time_step == params%tstep_mbal) then
+
+ ! Set output_flag
+
+ ! At present, outputs are done for each mass-balance timestep, since
+ ! that involved least change to the code. However, it might be good
+ ! to change the output to occur with user-specified frequency.
+
+ if (present(output_flag)) output_flag = .true.
+
+ ! Do a timestep for this instance
+
+ if (time == params%instances(instance_index)%next_time) then
+
+ params%instances(instance_index)%next_time = &
+ params%instances(instance_index)%next_time + &
+ params%instances(instance_index)%mbal_tstep
+
+ ! Calculate averages by dividing by number of steps elapsed
+ ! since last model timestep.
+
+ call calculate_averages(params%instances(instance_index)%glad_inputs, &
+ qsmb = params%instances(instance_index)%acab, &
+ tsfc = params%instances(instance_index)%artm)
+
+ ! Calculate total surface mass balance - multiply by time since last model timestep
+ ! Note on units: We want acab to have units of meters w.e. (accumulated over mass balance time step)
+ ! Initial units are kg m-2 s-1 = mm s-1
+ ! Divide by 1000 to convert from mm to m
+ ! Multiply by hours2seconds = 3600 to convert from 1/s to 1/hr. (tstep_mbal has units of hours)
+
+ !TODO - Modify code so that qsmb and acab are always in kg m-2 s-1 water equivalent?
+ params%instances(instance_index)%acab(:,:) = &
+ params%instances(instance_index)%acab(:,:) * &
+ params%tstep_mbal * hours2seconds / 1000.d0
+
+ if (GLC_DEBUG .and. main_task) write(stdout,*) 'Take a glad time step, instance', instance_index
+ call glad_i_tstep_gcm(time, &
+ params%instances(instance_index), &
+ icets)
+
+ call calculate_average_output_fluxes( &
+ params%instances(instance_index)%glad_output_fluxes, &
+ rofi_tavg = params%instances(instance_index)%rofi_tavg, &
+ rofl_tavg = params%instances(instance_index)%rofl_tavg, &
+ hflx_tavg = params%instances(instance_index)%hflx_tavg)
+
+ call glad_set_output_fields(params%instances(instance_index), &
+ ice_covered, topo, rofi, rofl, hflx, &
+ ice_sheet_grid_mask, icemask_coupled_fluxes)
+
+
+ ! Set flag
+ if (present(ice_tstep)) then
+ ice_tstep = (ice_tstep .or. icets)
+ end if
+
+ endif ! time = next_time
+
+ ! ---------------------------------------------------------
+ ! Reset averaging fields, flags and counters
+ ! ---------------------------------------------------------
+
+ call reset_glad_input_averages(params%instances(instance_index)%glad_inputs, &
+ next_av_start = time + params%time_step)
+
+ if (GLC_DEBUG .and. main_task) then
+ write(stdout,*) 'Done in glad_gcm'
+ endif
+
+ endif ! time - av_start_time + params%time_step > params%tstep_mbal
+
+ end subroutine glad_gcm
+
+ !===================================================================
+
+ subroutine end_glad(params,close_logfile)
+
+ !> tidy-up operations for Glad
+ use glad_initialise
+ use glimmer_log
+ implicit none
+
+ type(glad_params),intent(inout) :: params ! parameters for this run
+ logical, intent(in), optional :: close_logfile ! if true, then close the log file
+ ! (GCM may do this elsewhere)
+ integer :: i
+
+ ! end individual instances
+
+ do i = 1, params%ninstances
+ call glad_i_end(params%instances(i))
+ enddo
+
+ if (present(close_logfile)) then
+ if (close_logfile) call close_log
+ else
+ call close_log
+ endif
+
+ deallocate(params%config_fnames)
+ deallocate(params%instances)
+
+ end subroutine end_glad
+
+ !----------------------------------------------------------------------
+ ! PRIVATE INTERNAL GLIMMER SUBROUTINES FOLLOW.............
+ !----------------------------------------------------------------------
+
+ subroutine glad_set_output_fields(instance, &
+ ice_covered, topo, &
+ rofi, rofl, hflx, &
+ ice_sheet_grid_mask, &
+ icemask_coupled_fluxes)
+
+ ! Sets output fields for this instance.
+ !
+ ! Arguments are assumed to NOT have halo cells. This routine handles the removal of
+ ! the halo cells.
+
+ use glad_output_states, only : set_output_states
+ use parallel, only : parallel_convert_haloed_to_nonhaloed
+ use glide_types, only : get_ewn, get_nsn
+
+ ! Subroutine argument declarations --------------------------------------------------------
+
+ type(glad_instance), intent(in) :: instance
+
+ real(dp),dimension(:,:),intent(out) :: ice_covered ! whether each grid cell is ice-covered [0,1]
+ real(dp),dimension(:,:),intent(out) :: topo ! output surface elevation (m)
+ real(dp),dimension(:,:),intent(out) :: hflx ! output heat flux (W/m^2, positive down)
+ real(dp),dimension(:,:),intent(out) :: rofi ! output ice runoff (kg/m^2/s = mm H2O/s)
+ real(dp),dimension(:,:),intent(out) :: rofl ! output liquid runoff (kg/m^2/s = mm H2O/s)
+ real(dp),dimension(:,:),intent(out) :: ice_sheet_grid_mask !mask of ice sheet grid coverage
+ real(dp),dimension(:,:),intent(out) :: icemask_coupled_fluxes !mask of ice sheet grid coverage where we are potentially sending non-zero fluxes
+
+ ! Internal variables -----------------------------------------------------------------------
+
+ integer :: ewn,nsn ! dimensions of local grid
+
+ ! temporary versions of output fields with halo cells
+ real(dp),dimension(:,:),allocatable :: ice_covered_haloed
+ real(dp),dimension(:,:),allocatable :: topo_haloed
+ real(dp),dimension(:,:),allocatable :: hflx_haloed
+ real(dp),dimension(:,:),allocatable :: rofi_haloed
+ real(dp),dimension(:,:),allocatable :: rofl_haloed
+ real(dp),dimension(:,:),allocatable :: ice_sheet_grid_mask_haloed
+ real(dp),dimension(:,:),allocatable :: icemask_coupled_fluxes_haloed
+
+ ! Begin subroutine code --------------------------------------------------------------------
+
+ ewn = get_ewn(instance%model)
+ nsn = get_nsn(instance%model)
+
+ allocate(ice_covered_haloed(ewn,nsn))
+ allocate(topo_haloed(ewn,nsn))
+ allocate(hflx_haloed(ewn,nsn))
+ allocate(rofi_haloed(ewn,nsn))
+ allocate(rofl_haloed(ewn,nsn))
+ allocate(ice_sheet_grid_mask_haloed(ewn,nsn))
+ allocate(icemask_coupled_fluxes_haloed(ewn,nsn))
+
+ call set_output_states(instance, &
+ ice_covered_haloed, topo_haloed, ice_sheet_grid_mask_haloed)
+
+ if (instance%zero_gcm_fluxes == ZERO_GCM_FLUXES_TRUE) then
+ icemask_coupled_fluxes_haloed(:,:) = 0.d0
+ hflx_haloed(:,:) = 0.d0
+ rofi_haloed(:,:) = 0.d0
+ rofl_haloed(:,:) = 0.d0
+ else
+ icemask_coupled_fluxes_haloed(:,:) = ice_sheet_grid_mask_haloed(:,:)
+ hflx_haloed(:,:) = instance%hflx_tavg(:,:)
+ rofi_haloed(:,:) = instance%rofi_tavg(:,:)
+ rofl_haloed(:,:) = instance%rofl_tavg(:,:)
+ end if
+
+ call parallel_convert_haloed_to_nonhaloed(ice_covered_haloed, ice_covered)
+ call parallel_convert_haloed_to_nonhaloed(topo_haloed, topo)
+ call parallel_convert_haloed_to_nonhaloed(hflx_haloed, hflx)
+ call parallel_convert_haloed_to_nonhaloed(rofi_haloed, rofi)
+ call parallel_convert_haloed_to_nonhaloed(rofl_haloed, rofl)
+ call parallel_convert_haloed_to_nonhaloed(ice_sheet_grid_mask_haloed, ice_sheet_grid_mask)
+ call parallel_convert_haloed_to_nonhaloed(icemask_coupled_fluxes_haloed, icemask_coupled_fluxes)
+
+ end subroutine glad_set_output_fields
+
+ !TODO - Move subroutine glad_readconfig to a glad_setup module, in analogy to glide_setup?
+
+ subroutine glad_readconfig(config, ninstances, fnames, infnames)
+
+ !> Determine whether a given config file is a
+ !> top-level glad config file, and return parameters
+ !> accordingly.
+
+ use glimmer_config
+ use glimmer_log
+ implicit none
+
+ ! Arguments -------------------------------------------
+
+ type(ConfigSection), pointer :: config !> structure holding sections of configuration file
+ integer, intent(out) :: ninstances !> Number of instances to create
+ character(fname_length),dimension(:),pointer :: fnames !> list of filenames (output)
+ character(fname_length),dimension(:) :: infnames !> list of filenames (input)
+
+ ! Internal variables ----------------------------------
+
+ type(ConfigSection), pointer :: section
+ character(len=100) :: message
+ integer :: i
+
+ if (associated(fnames)) nullify(fnames)
+
+ call GetSection(config,section,'GLAD')
+ if (associated(section)) then
+ call GetValue(section,'n_instance',ninstances)
+ allocate(fnames(ninstances))
+ do i=1,ninstances
+ call GetSection(section%next,section,'GLAD instance')
+ if (.not.associated(section)) then
+ write(message,*) 'Must specify ',ninstances,' instance config files'
+ call write_log(message,GM_FATAL,__FILE__,__LINE__)
+ end if
+ call GetValue(section,'name',fnames(i))
+ end do
+ else
+ ninstances=1
+ allocate(fnames(1))
+ fnames=infnames
+ end if
+
+ ! Print some configuration information
+
+!!$ call write_log('GLAD global')
+!!$ call write_log('------------')
+!!$ write(message,*) 'number of instances :',params%ninstances
+!!$ call write_log(message)
+!!$ call write_log('')
+
+ end subroutine glad_readconfig
+
+
+ !========================================================
+
+ integer function check_mbts(timesteps)
+
+ !> Checks to see that all mass-balance time-steps are
+ !> the same. Flags a fatal error if not, else assigns that
+ !> value to the output
+
+ use glimmer_log
+
+ implicit none
+
+ integer,dimension(:) :: timesteps !> Array of mass-balance timsteps
+
+ integer :: n,i
+
+ n = size(timesteps)
+ if (n==0) then
+ check_mbts = 0
+ return
+ endif
+
+ check_mbts = timesteps(1)
+
+ do i = 2,n
+ if (timesteps(i) /= check_mbts) then
+ call write_log('All instances must have the same mass-balance and ice timesteps', &
+ GM_FATAL,__FILE__,__LINE__)
+ endif
+ enddo
+
+ end function check_mbts
+
+!++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+end module glad_main
+
+!++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
diff --git a/components/cism/glimmer-cism/libglad/glad_mbal_coupling.F90 b/components/cism/glimmer-cism/libglad/glad_mbal_coupling.F90
new file mode 100644
index 0000000000..033573809c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_mbal_coupling.F90
@@ -0,0 +1,164 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glad_mbal_coupling.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glad_mbal_coupling
+
+ use glimmer_config
+ use glimmer_global, only : dp
+
+ implicit none
+
+ ! Module to handle the accumulation of inputs.
+
+ ! Note that this module has some functionality in common with glad_input_averages, but
+ ! they are used at different stages in the time loops.
+
+ type glad_mbc
+ real(dp),dimension(:,:),pointer :: acab_save => null() ! used to accumulate mass-balance
+ real(dp),dimension(:,:),pointer :: artm_save => null() ! used to average air-temperature
+ real(dp),dimension(:,:),pointer :: acab => null() ! Instantaneous mass-balance
+ real(dp),dimension(:,:),pointer :: artm => null() ! Instantaneous air temperature
+ integer :: av_count = 0 ! Counter for averaging inputs
+ logical :: new_accum = .true.
+ integer :: start_time ! the time we started averaging (hours)
+ integer :: tstep ! Timestep of mass-balance scheme in hours
+ end type glad_mbc
+
+contains
+
+ subroutine glad_mbc_init(params,lgrid)
+
+ ! Initialize the glad_mbc structure ('params').
+
+ ! NOTE(wjs, 2015-03-19) In glint, when using SMB coupling, this was done in
+ ! glint_downscale.F90: glint_init_input_gcm (rather than in glint_mbc_init). However,
+ ! for simplicity and modularity, I am moving operations like this that act on glad_mbc
+ ! into this glad_mbal_coupling module.
+
+ use glimmer_coordinates
+ use glad_constants, only : years2hours
+
+ type(glad_mbc) :: params
+ type(coordsystem_type) :: lgrid
+
+ ! Deallocate if necessary
+
+ if (associated(params%acab_save)) deallocate(params%acab_save)
+ if (associated(params%artm_save)) deallocate(params%artm_save)
+ if (associated(params%acab)) deallocate(params%acab)
+ if (associated(params%artm)) deallocate(params%artm)
+
+ ! Allocate arrays and zero
+
+ call coordsystem_allocate(lgrid,params%acab_save); params%acab_save = 0.d0
+ call coordsystem_allocate(lgrid,params%artm_save); params%artm_save = 0.d0
+ call coordsystem_allocate(lgrid,params%acab); params%acab = 0.d0
+ call coordsystem_allocate(lgrid,params%artm); params%artm = 0.d0
+
+ ! Set default mass balance time step
+ !
+ ! This is the default value that was being used in glint for the MASS_BALANCE_GCM
+ ! scheme (some other schemes used different defaults)
+ params%tstep = nint(years2hours) ! mbal tstep = 1 year
+
+ end subroutine glad_mbc_init
+
+!++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine glad_accumulate_input_gcm(params, time, acab, artm)
+
+ ! In glint, this was done in glint_downscale.F90
+
+ type(glad_mbc) :: params
+ integer :: time
+
+ real(dp),dimension(:,:),intent(in) :: acab ! Surface mass balance (m)
+ real(dp),dimension(:,:),intent(in) :: artm ! Mean air temperature (degC)
+
+ ! Things to do the first time
+
+ if (params%new_accum) then
+
+ params%new_accum = .false.
+ params%av_count = 0
+
+ ! Initialise
+
+ params%acab_save = 0.d0
+ params%artm_save = 0.d0
+ params%start_time = time
+
+ end if
+
+ params%av_count = params%av_count + 1
+
+ ! Accumulate
+
+ params%acab_save = params%acab_save + acab
+ params%artm_save = params%artm_save + artm
+
+ ! Copy instantaneous fields
+
+ params%acab = acab
+ params%artm = artm
+
+ end subroutine glad_accumulate_input_gcm
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine glad_average_input_gcm(params, dt, acab, artm)
+
+ ! In glint, this was done in glint_downscale.F90
+
+ use glad_constants, only: hours2years
+
+ type(glad_mbc) :: params
+ integer, intent(in) :: dt !> mbal accumulation time (hours)
+ real(dp),dimension(:,:),intent(out) :: artm !> Mean air temperature (degC)
+ real(dp),dimension(:,:),intent(out) :: acab !> Mass-balance (m/yr)
+
+ if (.not. params%new_accum) then
+ params%artm_save = params%artm_save / real(params%av_count,dp)
+ end if
+ artm = params%artm_save
+
+ ! Note: acab_save has units of m, but acab has units of m/yr
+ acab = params%acab_save / real(dt*hours2years,dp)
+
+ params%new_accum = .true.
+
+ end subroutine glad_average_input_gcm
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++
+
+
+end module glad_mbal_coupling
+
+!++++++++++++++++++++++++++++++++++++++++++++++++++++++
diff --git a/components/cism/glimmer-cism/libglad/glad_mbal_io.F90.default b/components/cism/glimmer-cism/libglad/glad_mbal_io.F90.default
new file mode 100644
index 0000000000..39c0759f0a
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_mbal_io.F90.default
@@ -0,0 +1,878 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+! WARNING: this file was automatically generated on
+! Fri, 03 Apr 2015 18:33:13 +0000
+! from ncdf_template.F90.in
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+! WJS (1-30-12): The following (turning optimization off) is needed as a workaround for an
+! xlf compiler bug, at least in IBM XL Fortran for AIX, V12.1 on bluefire
+#ifdef CPRIBM
+@PROCESS OPT(0)
+#endif
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! ncdf_template.F90.in - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#define NCO outfile%nc
+#define NCI infile%nc
+
+
+module glad_mbal_io
+ ! template for creating subsystem specific I/O routines
+ ! written by Magnus Hagdorn, 2004
+
+ use glad_type
+
+ implicit none
+
+ private :: get_xtype, is_enabled, is_enabled_0dint, is_enabled_1dint, is_enabled_2dint, is_enabled_0dreal, is_enabled_1dreal, is_enabled_2dreal, is_enabled_3dreal
+
+ character(310), save :: restart_variable_list='' ! list of variables needed for a restart
+!TODO change 310 to a variable - see glimmer_ncdf.F90 in the definition for type glimmer_nc_stat for other instances of this value.
+
+ interface is_enabled ! MJH 10/21/13: Interface needed for determining if arrays have been enabled. See notes below in glad_mbal_io_create.
+ module procedure is_enabled_0dint
+ module procedure is_enabled_1dint
+ module procedure is_enabled_2dint
+ module procedure is_enabled_0dreal
+ module procedure is_enabled_1dreal
+ module procedure is_enabled_2dreal
+ module procedure is_enabled_3dreal
+ end interface is_enabled
+
+contains
+
+ !*****************************************************************************
+ ! netCDF output
+ !*****************************************************************************
+ subroutine glad_mbal_io_createall(model,data,outfiles)
+ ! open all netCDF files for output
+ use glad_type
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(glide_global_type) :: model
+ type(glad_instance) :: data ! MJH 10/21/13: Making 'data' mandatory. See notes below in glad_mbal_io_create
+ type(glimmer_nc_output),optional,pointer :: outfiles
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ do while(associated(oc))
+ call glad_mbal_io_create(oc,model,data)
+ oc=>oc%next
+ end do
+ end subroutine glad_mbal_io_createall
+
+ subroutine glad_mbal_io_writeall(data,model,atend,outfiles,time)
+ ! if necessary write to netCDF files
+ use glad_type
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(glad_instance) :: data
+ type(glide_global_type) :: model
+ logical, optional :: atend
+ type(glimmer_nc_output),optional,pointer :: outfiles
+ real(dp),optional :: time
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+ logical :: forcewrite=.false.
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ if (present(atend)) then
+ forcewrite = atend
+ end if
+
+ do while(associated(oc))
+#ifdef HAVE_AVG
+ if (oc%do_averages) then
+ call glad_mbal_avg_accumulate(oc,data,model)
+ end if
+#endif
+ call glimmer_nc_checkwrite(oc,model,forcewrite,time)
+ if (oc%nc%just_processed) then
+ ! write standard variables
+ call glad_mbal_io_write(oc,data)
+#ifdef HAVE_AVG
+ if (oc%do_averages) then
+ call glad_mbal_avg_reset(oc,data)
+ end if
+#endif
+ end if
+ oc=>oc%next
+ end do
+ end subroutine glad_mbal_io_writeall
+
+ subroutine glad_mbal_io_create(outfile,model,data)
+ use parallel
+ use glide_types
+ use glad_type
+ use glimmer_ncdf
+ use glimmer_ncio
+ use glimmer_map_types
+ use glimmer_log
+ use glimmer_paramets
+ use glimmer_scales
+ use glimmer_log
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ type(glide_global_type) :: model
+ type(glad_instance) :: data ! MJH 10/21/13: Making 'data' mandatory. See note below
+
+ integer status,varid,pos
+
+ ! MJH 10/21/13: Local variables needed for checking if a variable is enabled.
+ real(dp) :: tavgf
+ integer :: up
+
+ integer :: level_dimid
+ integer :: lithoz_dimid
+ integer :: staglevel_dimid
+ integer :: stagwbndlevel_dimid
+ integer :: time_dimid
+ integer :: x0_dimid
+ integer :: x1_dimid
+ integer :: y0_dimid
+ integer :: y1_dimid
+
+ ! defining dimensions
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'level',model%general%upn,level_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'level',level_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'lithoz',model%lithot%nlayer,lithoz_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'lithoz',lithoz_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'staglevel',model%general%upn-1,staglevel_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'staglevel',staglevel_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'stagwbndlevel',model%general%upn+1,stagwbndlevel_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'stagwbndlevel',stagwbndlevel_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_inq_dimid(NCO%id,'time',time_dimid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'x0',global_ewn-1,x0_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'x0',x0_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'x1',global_ewn,x1_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'x1',x1_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'y0',global_nsn-1,y0_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'y0',y0_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'y1',global_nsn,y1_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'y1',y1_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ ! Expanding restart variables: if 'restart' or 'hot' is present, we remove that
+ ! word from the variable list, and flip the restartfile flag.
+ ! In CISM 2.0, 'restart' is the preferred name to represent restart variables,
+ ! but 'hot' is supported for backward compatibility. Thus, we check for both.
+ NCO%vars = ' '//trim(adjustl(NCO%vars))//' ' ! Need to maintain a space at beginning and end of list
+ ! expanding restart variables
+ pos = index(NCO%vars,' restart ')
+ if (pos.ne.0) then
+ NCO%vars = NCO%vars(:pos)//NCO%vars(pos+8:)
+ NCO%restartfile = .true.
+ end if
+ pos = index(NCO%vars,' hot ')
+ if (pos.ne.0) then
+ NCO%vars = NCO%vars(:pos)//NCO%vars(pos+4:)
+ NCO%restartfile = .true.
+ end if
+ ! Now apply necessary changes if the file is a restart file.
+ if (NCO%restartfile) then
+ if ((len_trim(NCO%vars) + len_trim(restart_variable_list) + 2) >= len(NCO%vars) ) then
+ call write_log('Adding restart variables has made the list of output variables too long for file ' // NCO%filename, GM_FATAL)
+ else
+ ! Expand the restart variable list
+ ! Need to maintain a space at beginning and end of list
+ NCO%vars = trim(NCO%vars) // ' ' // trim(restart_variable_list) // ' ' ! (a module variable)
+ ! Set the xtype to be double (required for an exact restart)
+ outfile%default_xtype = NF90_DOUBLE
+ endif
+ end if
+
+ ! Convert temp and flwa to versions on stag grid, if needed
+ ! Note: this check must occur after restart variables are expanded which happens in glimmer_nc_readparams
+ call check_for_tempstag(model%options%whichdycore,NCO)
+
+ ! checking if we need to handle time averages
+ pos = index(NCO%vars,"_tavg")
+ if (pos.ne.0) then
+ outfile%do_averages = .True.
+ end if
+
+ ! Now that the output variable list is finalized, make sure we aren't truncating what the user intends to be output.
+ ! Note: this only checks that the text in the variable list does not extend to within one character of the end of the variable.
+ ! It does not handle the case where the user exactly fills the allowable length with variables or has a too-long list with more than one space between variable names.
+ if ((len_trim(NCO%vars) + 1 ) >= len(NCO%vars)) then
+ call write_log('The list of output variables is too long for file ' // NCO%filename, GM_FATAL)
+ endif
+
+
+ ! MJH, 10/21/13: In the auto-generated code below, the creation of each output variable is wrapped by a check if the data for that
+ ! variable has a size greater than 0. This is because of recently added checks in glide_types.F90 that don't fully allocate
+ ! some variables if certain model options are disabled. This is to lower memory requirements while running the model.
+ ! The reason they have to be allocated with size zero rather than left unallocated is because the data for
+ ! some netCDF output variables is defined with math, which causes an error if the operands are unallocated.
+ ! Note that if a variable is not created, then it will not be subsequently written to.
+ ! Also note that this change requires that data be a mandatory argument to this subroutine.
+
+ ! Some output variables will need tavgf. The value does not matter, but it must exist.
+ ! Nonetheless, for completeness give it the proper value that it has in glad_mbal_io_write.
+ tavgf = outfile%total_time
+ if (tavgf.ne.0.d0) then
+ tavgf = 1.d0/tavgf
+ end if
+ ! Similarly, some output variables use the variable up. Give it value of 0 here.
+ up = 0
+
+ ! level -- sigma layers
+ if (.not.outfile%append) then
+ call write_log('Creating variable level')
+ status = parallel_def_var(NCO%id,'level',get_xtype(outfile,NF90_FLOAT),(/level_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'formula_terms', 'sigma: level topo: topg thick: thk')
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'sigma layers')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_sigma_coordinate')
+ status = parallel_put_att(NCO%id, varid, 'units', '1')
+ end if
+
+ ! lithoz -- vertical coordinate of lithosphere layer
+ if (.not.outfile%append) then
+ call write_log('Creating variable lithoz')
+ status = parallel_def_var(NCO%id,'lithoz',get_xtype(outfile,NF90_FLOAT),(/lithoz_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'vertical coordinate of lithosphere layer')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ end if
+
+ ! staglevel -- stag sigma layers
+ if (.not.outfile%append) then
+ call write_log('Creating variable staglevel')
+ status = parallel_def_var(NCO%id,'staglevel',get_xtype(outfile,NF90_FLOAT),(/staglevel_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'positive', 'down')
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'stag sigma layers')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_stag_sigma_coordinate')
+ status = parallel_put_att(NCO%id, varid, 'units', '1')
+ end if
+
+ ! stagwbndlevel -- stag sigma layers with boundaries
+ if (.not.outfile%append) then
+ call write_log('Creating variable stagwbndlevel')
+ status = parallel_def_var(NCO%id,'stagwbndlevel',get_xtype(outfile,NF90_FLOAT),(/stagwbndlevel_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'positive', 'down')
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'stag sigma layers with boundaries')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_stag_sigma_coordinate_with_bnd')
+ status = parallel_put_att(NCO%id, varid, 'units', '1')
+ end if
+
+ ! x0 -- Cartesian x-coordinate, velocity grid
+ if (.not.outfile%append) then
+ call write_log('Creating variable x0')
+ status = parallel_def_var(NCO%id,'x0',get_xtype(outfile,NF90_FLOAT),(/x0_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Cartesian x-coordinate, velocity grid')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'projection_x_coordinate')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ end if
+
+ ! x1 -- Cartesian x-coordinate
+ if (.not.outfile%append) then
+ call write_log('Creating variable x1')
+ status = parallel_def_var(NCO%id,'x1',get_xtype(outfile,NF90_FLOAT),(/x1_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Cartesian x-coordinate')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'projection_x_coordinate')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ end if
+
+ ! y0 -- Cartesian y-coordinate, velocity grid
+ if (.not.outfile%append) then
+ call write_log('Creating variable y0')
+ status = parallel_def_var(NCO%id,'y0',get_xtype(outfile,NF90_FLOAT),(/y0_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Cartesian y-coordinate, velocity grid')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'projection_y_coordinate')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ end if
+
+ ! y1 -- Cartesian y-coordinate
+ if (.not.outfile%append) then
+ call write_log('Creating variable y1')
+ status = parallel_def_var(NCO%id,'y1',get_xtype(outfile,NF90_FLOAT),(/y1_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Cartesian y-coordinate')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'projection_y_coordinate')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ end if
+
+ ! instant_acab -- instantaneous mass-balance
+ pos = index(NCO%vars,' instant_acab ')
+ status = parallel_inq_varid(NCO%id,'instant_acab',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+12) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%mbal_accum%acab)) then
+ call write_log('Creating variable instant_acab')
+ status = parallel_def_var(NCO%id,'instant_acab',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'instantaneous mass-balance')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable instant_acab was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! instant_artm -- instantaneous air temperature
+ pos = index(NCO%vars,' instant_artm ')
+ status = parallel_inq_varid(NCO%id,'instant_artm',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+12) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%mbal_accum%artm)) then
+ call write_log('Creating variable instant_artm')
+ status = parallel_def_var(NCO%id,'instant_artm',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'instantaneous air temperature')
+ status = parallel_put_att(NCO%id, varid, 'units', 'degC')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable instant_artm was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ end subroutine glad_mbal_io_create
+
+ subroutine glad_mbal_io_write(outfile,data)
+ use parallel
+ use glad_type
+ use glimmer_ncdf
+ use glimmer_paramets
+ use glimmer_scales
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(glad_instance) :: data
+ ! the model instance
+
+ ! local variables
+ real(dp) :: tavgf
+ integer status, varid
+ integer up
+
+ tavgf = outfile%total_time
+ if (tavgf.ne.0.d0) then
+ tavgf = 1.d0/tavgf
+ end if
+
+ ! write variables
+ status = parallel_inq_varid(NCO%id,'instant_acab',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%mbal_accum%acab, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'instant_artm',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%mbal_accum%artm, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ end subroutine glad_mbal_io_write
+
+
+ subroutine glad_mbal_add_to_restart_variable_list(vars_to_add)
+ ! This subroutine adds variables to the list of variables needed for a restart.
+ ! It is a public subroutine that allows other parts of the model to modify the list,
+ ! which is a module level variable. MJH 1/17/2013
+
+ use glimmer_log
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+ character(len=*), intent (in) :: vars_to_add ! list of variable(s) to be added to the list of restart variables
+ !character(*), intent (inout) :: restart_variable_list ! list of variables needed to perform an exact restart - module variable
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ !------------------------------------------------------------------------------------
+
+ ! Add the variables to the list so long as they don't make the list too long.
+ if ( (len_trim(restart_variable_list) + 1 + len_trim(vars_to_add)) > len(restart_variable_list)) then
+ call write_log('Adding restart variables has made the restart variable list too long.',GM_FATAL)
+ else
+ restart_variable_list = trim(adjustl(restart_variable_list)) // ' ' // trim(vars_to_add)
+ !call write_log('Adding to glad_mbal restart variable list: ' // trim(vars_to_add) )
+ endif
+
+ end subroutine glad_mbal_add_to_restart_variable_list
+
+
+ ! Functions for the interface 'is_enabled'. These are needed by the auto-generated code in glad_mbal_io_create
+ ! to determine if a variable is 'turned on', and should be written.
+
+ function is_enabled_0dint(var)
+ integer, intent(in) :: var
+ logical :: is_enabled_0dint
+ is_enabled_0dint = .true. ! scalars are always enabled
+ return
+ end function is_enabled_0dint
+
+ function is_enabled_1dint(var)
+ integer, dimension(:), pointer, intent(in) :: var
+ logical :: is_enabled_1dint
+ if (associated(var)) then
+ is_enabled_1dint = .true.
+ else
+ is_enabled_1dint = .false.
+ endif
+ return
+ end function is_enabled_1dint
+
+ function is_enabled_2dint(var)
+ integer, dimension(:,:), pointer, intent(in) :: var
+ logical :: is_enabled_2dint
+ if (associated(var)) then
+ is_enabled_2dint = .true.
+ else
+ is_enabled_2dint = .false.
+ endif
+ return
+ end function is_enabled_2dint
+
+ function is_enabled_0dreal(var)
+ real(dp), intent(in) :: var
+ logical :: is_enabled_0dreal
+ is_enabled_0dreal = .true. ! scalars are always enabled
+ return
+ end function is_enabled_0dreal
+
+ function is_enabled_1dreal(var)
+ real(dp), dimension(:), pointer, intent(in) :: var
+ logical :: is_enabled_1dreal
+ if (associated(var)) then
+ is_enabled_1dreal = .true.
+ else
+ is_enabled_1dreal = .false.
+ endif
+ return
+ end function is_enabled_1dreal
+
+ function is_enabled_2dreal(var)
+ real(dp), dimension(:,:), pointer, intent(in) :: var
+ logical :: is_enabled_2dreal
+ if (associated(var)) then
+ is_enabled_2dreal = .true.
+ else
+ is_enabled_2dreal = .false.
+ endif
+ return
+ end function is_enabled_2dreal
+
+ function is_enabled_3dreal(var)
+ real(dp), dimension(:,:,:), pointer, intent(in) :: var
+ logical :: is_enabled_3dreal
+ if (associated(var)) then
+ is_enabled_3dreal = .true.
+ else
+ is_enabled_3dreal = .false.
+ endif
+ return
+ end function is_enabled_3dreal
+
+
+ !*****************************************************************************
+ ! netCDF input
+ !*****************************************************************************
+ subroutine glad_mbal_io_readall(data, model, filetype)
+ ! read from netCDF file
+ use glad_type
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(glad_instance) :: data
+ type(glide_global_type) :: model
+ integer, intent(in), optional :: filetype ! 0 for input, 1 for forcing; defaults to input
+
+ ! local variables
+ type(glimmer_nc_input), pointer :: ic
+ integer :: filetype_local
+
+ if (present(filetype)) then
+ filetype_local = filetype
+ else
+ filetype_local = 0 ! default to input type
+ end if
+
+ if (filetype_local == 0) then
+ ic=>model%funits%in_first
+ else
+ ic=>model%funits%frc_first
+ endif
+ do while(associated(ic))
+ call glimmer_nc_checkread(ic,model)
+ if (ic%nc%just_processed) then
+ call glad_mbal_io_read(ic,data)
+ end if
+ ic=>ic%next
+ end do
+ end subroutine glad_mbal_io_readall
+
+
+ subroutine glad_mbal_read_forcing(data, model)
+ ! Read data from forcing files
+ use glimmer_log
+ use glide_types
+ use glimmer_ncdf
+
+ implicit none
+ type(glad_instance) :: data
+ type(glide_global_type), intent(inout) :: model
+
+ ! Locals
+ type(glimmer_nc_input), pointer :: ic
+ integer :: t
+ real(dp) :: eps ! a tolerance to use for stepwise constant forcing
+
+ ! Make eps a fraction of the time step.
+ eps = model%numerics%tinc * 1.0d-4
+
+ ! read forcing files
+ ic=>model%funits%frc_first
+ do while(associated(ic))
+
+ !print *, 'possible forcing times', ic%times
+
+ ! Find the current time in the file
+ do t = ic%nt, 1, -1 ! look through the time array backwards
+ if ( ic%times(t) <= model%numerics%time + eps) then
+ ! use the largest time that is smaller or equal to the current time (stepwise forcing)
+
+ ! Set the desired time to be read
+ ic%current_time = t
+ !print *, 'time, forcing index, forcing time', model%numerics%time, ic%current_time, ic%times(ic%current_time)
+ exit ! once we find the time, exit the loop
+ endif
+ end do
+
+ ! read all forcing fields present in this file for the time specified above
+ ic%nc%just_processed = .false. ! set this to false so it will be re-processed every time through - this ensures info gets written to the log, and that time levels don't get skipped.
+ call glad_mbal_io_readall(data, model, filetype=1)
+
+ ! move on to the next forcing file
+ ic=>ic%next
+ end do
+
+ end subroutine glad_mbal_read_forcing
+
+
+!------------------------------------------------------------------------------
+
+
+ subroutine glad_mbal_io_read(infile,data)
+ ! read variables from a netCDF file
+ use parallel
+ use glimmer_log
+ use glimmer_ncdf
+ use glad_type
+ use glimmer_paramets
+ use glimmer_scales
+ implicit none
+ type(glimmer_nc_input), pointer :: infile
+ ! structure containg output netCDF descriptor
+ type(glad_instance) :: data
+ ! the model instance
+
+ ! local variables
+ integer status,varid
+ integer up
+ real(dp) :: scaling_factor
+
+ ! read variables
+ end subroutine glad_mbal_io_read
+
+ subroutine glad_mbal_io_checkdim(infile,model,data)
+ ! check if dimension sizes in file match dims of model
+ use parallel
+ use glimmer_log
+ use glimmer_ncdf
+ use glide_types
+ use glad_type
+ implicit none
+ type(glimmer_nc_input), pointer :: infile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: model
+ type(glad_instance), optional :: data
+
+ integer status,dimid,dimsize
+ character(len=150) message
+
+ ! check dimensions
+ status = parallel_inq_dimid(NCI%id,'level',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.model%general%upn) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size level does not match: ', &
+ model%general%upn
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'lithoz',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.model%lithot%nlayer) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size lithoz does not match: ', &
+ model%lithot%nlayer
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'staglevel',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.model%general%upn-1) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size staglevel does not match: ', &
+ model%general%upn-1
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'stagwbndlevel',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.model%general%upn+1) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size stagwbndlevel does not match: ', &
+ model%general%upn+1
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'x0',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.global_ewn-1) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size x0 does not match: ', &
+ global_ewn-1
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'x1',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.global_ewn) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size x1 does not match: ', &
+ global_ewn
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'y0',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.global_nsn-1) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size y0 does not match: ', &
+ global_nsn-1
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'y1',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.global_nsn) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size y1 does not match: ', &
+ global_nsn
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ end subroutine glad_mbal_io_checkdim
+
+ !*****************************************************************************
+ ! calculating time averages
+ !*****************************************************************************
+#ifdef HAVE_AVG
+ subroutine glad_mbal_avg_accumulate(outfile,data,model)
+ use parallel
+ use glide_types
+ use glad_type
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: model
+ type(glad_instance) :: data
+
+ ! local variables
+ real(dp) :: factor
+ integer status, varid
+
+ ! increase total time
+ outfile%total_time = outfile%total_time + model%numerics%tinc
+ factor = model%numerics%tinc
+
+ end subroutine glad_mbal_avg_accumulate
+
+ subroutine glad_mbal_avg_reset(outfile,data)
+ use parallel
+ use glad_type
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(glad_instance) :: data
+
+ ! local variables
+ integer status, varid
+
+ ! reset total time
+ outfile%total_time = 0.d0
+
+ end subroutine glad_mbal_avg_reset
+#endif
+
+ !*********************************************************************
+ ! some private procedures
+ !*********************************************************************
+
+ !> apply default type to be used in netCDF file
+ integer function get_xtype(outfile,xtype)
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile !< derived type holding information about output file
+ integer, intent(in) :: xtype !< the external netCDF type
+
+ get_xtype = xtype
+
+ if (xtype.eq.NF90_REAL .and. outfile%default_xtype.eq.NF90_DOUBLE) then
+ get_xtype = NF90_DOUBLE
+ end if
+ if (xtype.eq.NF90_DOUBLE .and. outfile%default_xtype.eq.NF90_REAL) then
+ get_xtype = NF90_REAL
+ end if
+ end function get_xtype
+
+ !*********************************************************************
+ ! lots of accessor subroutines follow
+ !*********************************************************************
+ subroutine glad_mbal_get_instant_acab(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glad_type
+ implicit none
+ type(glad_instance) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = data%mbal_accum%acab
+ end subroutine glad_mbal_get_instant_acab
+
+ subroutine glad_mbal_set_instant_acab(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glad_type
+ implicit none
+ type(glad_instance) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%mbal_accum%acab = inarray
+ end subroutine glad_mbal_set_instant_acab
+
+ subroutine glad_mbal_get_instant_artm(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glad_type
+ implicit none
+ type(glad_instance) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = data%mbal_accum%artm
+ end subroutine glad_mbal_get_instant_artm
+
+ subroutine glad_mbal_set_instant_artm(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glad_type
+ implicit none
+ type(glad_instance) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%mbal_accum%artm = inarray
+ end subroutine glad_mbal_set_instant_artm
+
+
+end module glad_mbal_io
diff --git a/components/cism/glimmer-cism/libglad/glad_mbal_vars.def b/components/cism/glimmer-cism/libglad/glad_mbal_vars.def
new file mode 100644
index 0000000000..0b7c5fcd5c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_mbal_vars.def
@@ -0,0 +1,95 @@
+#[]
+#dimensions: time, y1, x1
+#units:
+#long_name:
+#data:
+#factor:
+
+# setup for code generator
+[VARSET]
+# prefix of the generated module
+name: glad_mbal
+# f90 type containing all necessary data
+datatype: glad_instance
+# module where type is defined
+datamod: glad_type
+
+[x0]
+dimensions: x0
+units: meter
+long_name: Cartesian x-coordinate, velocity grid
+standard_name: projection_x_coordinate
+dimlen: global_ewn-1
+
+[y0]
+dimensions: y0
+units: meter
+long_name: Cartesian y-coordinate, velocity grid
+standard_name: projection_y_coordinate
+dimlen: global_nsn-1
+
+[x1]
+dimensions: x1
+units: meter
+long_name: Cartesian x-coordinate
+standard_name: projection_x_coordinate
+dimlen: global_ewn
+
+[y1]
+dimensions: y1
+units: meter
+long_name: Cartesian y-coordinate
+standard_name: projection_y_coordinate
+dimlen: global_nsn
+
+# --- MJH 8/29/2014 -----------------------------------------------
+# Because glad is calling glide_nc_fillall() these glide dimension variables
+# need to be included here even though they are not used by glad, otherwise
+# a fatal error occurs (at least on some builds).
+# A more appropriate fix might be to create a glad_nc_fillall() that would not
+# try to write these variables to the output file.
+[level]
+dimensions: level
+units: 1
+long_name: sigma layers
+standard_name: land_ice_sigma_coordinate
+formula_terms: sigma: level topo: topg thick: thk
+dimlen: model%general%upn
+
+[staglevel]
+dimensions: staglevel
+units: 1
+long_name: stag sigma layers
+standard_name: land_ice_stag_sigma_coordinate
+positive: down
+dimlen: model%general%upn-1
+
+[stagwbndlevel]
+dimensions: stagwbndlevel
+units: 1
+long_name: stag sigma layers with boundaries
+standard_name: land_ice_stag_sigma_coordinate_with_bnd
+positive: down
+dimlen: model%general%upn+1
+
+[lithoz]
+dimensions: lithoz
+units: meter
+long_name: vertical coordinate of lithosphere layer
+dimlen: model%lithot%nlayer
+# ------------------------------------------------------------------
+
+[instant_acab]
+dimensions: time, y1, x1
+units: meter
+long_name: instantaneous mass-balance
+data: data%mbal_accum%acab
+coordinates: lon lat
+
+[instant_artm]
+dimensions: time, y1, x1
+units: degC
+long_name: instantaneous air temperature
+data: data%mbal_accum%artm
+coordinates: lon lat
+
diff --git a/components/cism/glimmer-cism/libglad/glad_output_fluxes.F90 b/components/cism/glimmer-cism/libglad/glad_output_fluxes.F90
new file mode 100644
index 0000000000..e6b626e82e
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_output_fluxes.F90
@@ -0,0 +1,159 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glad_output_fluxes.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glad_output_fluxes
+
+ !> This module defines a type and related operations for working with fluxes output to
+ !> the GCM. Its main purpose is to produce temporal averages of these outputs.
+
+ ! (Most of the code here used to be in glint_upscale.F90)
+
+ use glimmer_global, only: dp
+
+ implicit none
+ private
+
+ type, public :: glad_output_fluxes_type
+ private
+
+ integer :: av_count_output ! step counter
+
+ real(dp), dimension(:,:), pointer :: hflx_sum => null() ! conductive heat flux at top surface (W m-2)
+ real(dp), dimension(:,:), pointer :: rofi_sum => null() ! solid ice runoff (kg m-2 s-1)
+ real(dp), dimension(:,:), pointer :: rofl_sum => null() ! liquid runoff from basal/interior melting (kg m-2 s-1)
+ end type glad_output_fluxes_type
+
+ public :: initialize_glad_output_fluxes ! Initialize a glad_output_fluxes instance
+ public :: accumulate_output_fluxes ! Accumulate one time step's contribution to fluxes
+ public :: calculate_average_output_fluxes ! Compute and return time-average fluxes
+ public :: reset_output_fluxes ! Reset output_fluxes state to start a new averaging period
+
+contains
+
+ subroutine initialize_glad_output_fluxes(output_fluxes, ewn, nsn)
+ ! Initialize a glad_output_fluxes instance
+
+ type(glad_output_fluxes_type), intent(inout) :: output_fluxes
+
+ ! dimensions of local grid
+ integer, intent(in) :: ewn
+ integer, intent(in) :: nsn
+
+ allocate(output_fluxes%rofi_sum(ewn,nsn))
+ allocate(output_fluxes%rofl_sum(ewn,nsn))
+ allocate(output_fluxes%hflx_sum(ewn,nsn))
+
+ call reset_output_fluxes(output_fluxes)
+
+ end subroutine initialize_glad_output_fluxes
+
+ subroutine accumulate_output_fluxes(output_fluxes, model)
+ ! Given the calving, basal melting, and conductive heat flux fields from the dycore,
+ ! accumulate contributions to the rofi, rofl, and hflx fields to be sent to the coupler.
+
+ use glimmer_paramets, only: thk0, tim0
+ use glimmer_physcon, only : rhoi
+ use glide_types, only : glide_global_type
+
+ type(glad_output_fluxes_type), intent(inout) :: output_fluxes
+ type(glide_global_type), intent(in) :: model
+
+ output_fluxes%av_count_output = output_fluxes%av_count_output + 1
+
+ !--------------------------------------------------------------------
+ ! Accumulate solid runoff (calving)
+ !--------------------------------------------------------------------
+
+ ! Note on units: model%climate%calving has dimensionless ice thickness units
+ ! Multiply by thk0 to convert to meters of ice
+ ! Multiply by rhoi to convert to kg/m^2 water equiv.
+ ! Divide by (dt*tim0) to convert to kg/m^2/s
+
+ ! Convert to kg/m^2/s
+ output_fluxes%rofi_sum(:,:) = output_fluxes%rofi_sum(:,:) &
+ + model%climate%calving(:,:) * thk0 * rhoi / (model%numerics%dt * tim0)
+
+ !--------------------------------------------------------------------
+ ! Accumulate liquid runoff (basal melting)
+ !--------------------------------------------------------------------
+ !TODO - Add internal melting for enthalpy case
+
+ ! Note on units: model%temper%bmlt has dimensionless units of ice thickness per unit time
+ ! Multiply by thk0/tim0 to convert to meters ice per second
+ ! Multiply by rhoi to convert to kg/m^2/s water equiv.
+
+ ! Convert to kg/m^2/s
+ output_fluxes%rofl_sum(:,:) = output_fluxes%rofl_sum(:,:) &
+ + model%temper%bmlt(:,:) * thk0/tim0 * rhoi
+
+ !--------------------------------------------------------------------
+ ! Accumulate basal heat flux
+ !--------------------------------------------------------------------
+
+ ! Note on units: model%temper%ucondflx has units of W/m^2, positive down
+ ! Flip the sign so that hflx is positive up.
+
+ output_fluxes%hflx_sum(:,:) = output_fluxes%hflx_sum(:,:) &
+ - model%temper%ucondflx(:,:)
+
+ end subroutine accumulate_output_fluxes
+
+ subroutine calculate_average_output_fluxes(output_fluxes, rofi_tavg, rofl_tavg, hflx_tavg)
+ ! Compute and return time-average fluxes
+
+ type(glad_output_fluxes_type), intent(in) :: output_fluxes
+ real(dp), dimension(:,:), intent(out) :: rofi_tavg ! average solid ice runoff (kg m-2 s-1)
+ real(dp), dimension(:,:), intent(out) :: rofl_tavg ! average liquid runoff from basal/interior melting (kg m-2 s-1)
+ real(dp), dimension(:,:), intent(out) :: hflx_tavg ! average conductive heat flux at top surface (W m-2)
+
+ if (output_fluxes%av_count_output > 0) then
+ rofi_tavg(:,:) = output_fluxes%rofi_sum(:,:) / real(output_fluxes%av_count_output,dp)
+ rofl_tavg(:,:) = output_fluxes%rofl_sum(:,:) / real(output_fluxes%av_count_output,dp)
+ hflx_tavg(:,:) = output_fluxes%hflx_sum(:,:) / real(output_fluxes%av_count_output,dp)
+ else
+ rofi_tavg(:,:) = 0.d0
+ rofl_tavg(:,:) = 0.d0
+ hflx_tavg(:,:) = 0.d0
+ end if
+
+ end subroutine calculate_average_output_fluxes
+
+ subroutine reset_output_fluxes(output_fluxes)
+ ! Reset output_fluxes state to start a new averaging period
+
+ type(glad_output_fluxes_type), intent(inout) :: output_fluxes
+
+ output_fluxes%av_count_output = 0
+ output_fluxes%rofi_sum(:,:) = 0.d0
+ output_fluxes%rofl_sum(:,:) = 0.d0
+ output_fluxes%hflx_sum(:,:) = 0.d0
+ end subroutine reset_output_fluxes
+
+end module glad_output_fluxes
diff --git a/components/cism/glimmer-cism/libglad/glad_output_states.F90 b/components/cism/glimmer-cism/libglad/glad_output_states.F90
new file mode 100644
index 0000000000..9e135b75d2
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_output_states.F90
@@ -0,0 +1,159 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glad_output_states.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glad_output_states
+
+ ! This module defines routines for computing the output state variables that CISM sends
+ ! to a climate model.
+
+ use glimmer_global, only : dp
+ use glimmer_paramets, only : thk0
+ use glide_types, only : glide_global_type, glide_geometry
+
+ implicit none
+ private
+
+ public :: set_output_states ! set state fields output to a climate model
+
+contains
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine set_output_states(instance, &
+ ice_covered, topo, ice_sheet_grid_mask)
+
+ use glad_type, only : glad_instance
+
+ ! Arguments ----------------------------------------------------------------------------
+
+ type(glad_instance), intent(in) :: instance
+ real(dp),dimension(:,:),intent(out) :: ice_covered ! whether each grid cell is ice-covered [0,1]
+ real(dp),dimension(:,:),intent(out) :: topo ! output surface elevation (m)
+ real(dp),dimension(:,:),intent(out) :: ice_sheet_grid_mask !mask of ice sheet grid coverage
+
+ ! Internal variables ----------------------------------------------------------------------
+
+ integer :: nxl, nyl ! local grid dimensions
+ integer :: i, j ! indices
+
+ ! Begin subroutine code -------------------------------------------------------------------
+
+ ! Initialize arrays. This shouldn't be necessary (because, if the below code is
+ ! correct, all points should be explicitly assigned some value), but adds some safety
+ ! in case any bugs creep into the below code.
+ ice_covered(:,:) = 0.d0
+ topo(:,:) = 0.d0
+ ice_sheet_grid_mask(:,:) = 0.d0
+
+ nxl = instance%lgrid%size%pt(1)
+ nyl = instance%lgrid%size%pt(2)
+
+ do j = 1, nyl
+ do i = 1, nxl
+ if (is_in_active_grid(instance%model%geometry, i, j)) then
+ ice_sheet_grid_mask(i,j) = 1.d0
+
+ if (is_ice_covered(instance%model%geometry, i, j)) then
+ ice_covered(i,j) = 1.d0
+ else
+ ice_covered(i,j) = 0.d0
+ end if
+
+ ! Note that we use the same method for computing topo whether this point is
+ ! ice-covered or ice-free. This is in contrast to the method for computing
+ ! ice-free topo in glint_upscaling_gcm.
+ topo(i,j) = thk0 * instance%model%geometry%usrf(i,j)
+
+ else
+ ! Note that this logic implies that if (in theory) we had an ice-covered
+ ! point outside the "active grid", it will get classified as ice-free for
+ ! these purposes. This mimics the logic currently in glint_upscaling_gcm.
+ ice_sheet_grid_mask(i,j) = 0.d0
+ ice_covered(i,j) = 0.d0
+ topo(i,j) = 0.d0
+ end if
+
+ end do
+ end do
+
+ end subroutine set_output_states
+
+
+ !===================================================================
+
+ logical function is_in_active_grid(geometry, i, j)
+ ! Return true if the given point is inside the "active grid". The active grid includes
+ ! any point that can receive a positive surface mass balance, which includes any
+ ! point classified as land or ice sheet.
+ type(glide_geometry), intent(in) :: geometry
+ integer, intent(in) :: i, j ! point of interest
+
+ real(dp) :: usrf ! surface elevation (m)
+
+ ! TODO(wjs, 2015-03-18) Could the logic here be replaced by the use of some existing
+ ! mask? For now I am simply re-implementing the logic that was in glint.
+
+ usrf = thk0 * geometry%usrf(i,j)
+
+ if (usrf > 0.d0) then
+ ! points not at sea level are assumed to be land or ice sheet
+ is_in_active_grid = .true.
+ else
+ is_in_active_grid = .false.
+ end if
+
+ end function is_in_active_grid
+
+ !===================================================================
+
+ logical function is_ice_covered(geometry, i, j)
+ ! Return true if the given point is ice-covered
+
+ use glad_constants, only : min_thck
+
+ type(glide_geometry), intent(in) :: geometry
+ integer, intent(in) :: i, j ! point of interest
+
+ real(dp) :: thck ! ice thickness (m)
+
+ ! TODO(wjs, 2015-03-18) The logic here should probably be replaced by the use of some
+ ! existing mask. For now I am simply re-implementing the logic that was in glint.
+
+ thck = thk0 * geometry%thck(i,j)
+
+ if (thck > min_thck) then
+ is_ice_covered = .true.
+ else
+ is_ice_covered = .false.
+ end if
+
+ end function is_ice_covered
+
+end module glad_output_states
diff --git a/components/cism/glimmer-cism/libglad/glad_restart_gcm.F90 b/components/cism/glimmer-cism/libglad/glad_restart_gcm.F90
new file mode 100644
index 0000000000..d40597c0c0
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_restart_gcm.F90
@@ -0,0 +1,95 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glad_restart_gcm.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+
+ module glad_restart_gcm
+
+!BOP
+! !MODULE: glad_restart_gcm
+
+! !DESCRIPTION:
+! Contains routines for specialized restarts called by GCMs
+!
+! !REVISION HISTORY:
+!
+! !USES:
+
+ implicit none
+ private
+ save
+
+! !PUBLIC MEMBER FUNCTIONS:
+
+ public :: glad_read_restart_gcm
+
+!----------------------------------------------------------------------
+!
+! module variables
+!
+!----------------------------------------------------------------------
+
+!EOP
+!BOC
+!EOC
+!***********************************************************************
+
+ contains
+
+!***********************************************************************
+!BOP
+! !IROUTINE: glad_read_restart_gcm
+! !INTERFACE:
+
+ subroutine glad_read_restart_gcm(model, restart_filename)
+
+ use glide_types
+ implicit none
+ type(glide_global_type), intent(inout) :: model
+ character(*), intent(in ) :: restart_filename
+
+ ! local variables
+ type(glimmer_nc_input), pointer :: ic => null()
+
+ ! create the input unit
+ allocate(ic)
+ ic%get_time_slice = 1
+ ic%nc%filename = trim(restart_filename)
+ ic%nc%vars = ' restart '
+ ic%nc%restartfile = .true.
+ ic%nc%vars_copy = ic%nc%vars
+
+ ! add the input unit to the model
+ ! note that the model will do the actual reading of data
+ model%funits%in_first => ic
+
+ end subroutine glad_read_restart_gcm
+
+!-----------------------------------------------------------------------
+
+end module glad_restart_gcm
+
+!-----------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglad/glad_timestep.F90 b/components/cism/glimmer-cism/libglad/glad_timestep.F90
new file mode 100644
index 0000000000..abdaf23c79
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_timestep.F90
@@ -0,0 +1,348 @@
+#ifdef CPRIBM
+@PROCESS ALIAS_SIZE(107374182)
+#endif
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glad_timestep.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_mask.inc"
+
+module glad_timestep
+ !> timestep of a GLAD instance
+
+ use glad_type
+ use glad_constants
+ use glimmer_global, only: dp
+ implicit none
+
+ private
+ public glad_i_tstep_gcm
+
+contains
+
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine glad_i_tstep_gcm(time, instance, &
+ ice_tstep)
+
+ ! Performs time-step of an ice model instance.
+ ! Input quantities here are accumulated/average totals since the last call.
+ ! Global output arrays are only valid on the main task.
+ !
+ use glimmer_paramets
+ use glimmer_physcon, only: rhow, rhoi
+ use glimmer_log
+ use glimmer_coordinates, only: coordsystem_allocate
+ use glide
+ use glissade
+ use glide_io
+ use glad_mbal_coupling, only : glad_accumulate_input_gcm, glad_average_input_gcm
+ use glad_io
+ use glad_mbal_io
+ use glide_diagnostics
+ use parallel, only: tasks, main_task, this_rank
+ use glad_output_fluxes, only : accumulate_output_fluxes, reset_output_fluxes
+
+ implicit none
+
+ ! ------------------------------------------------------------------------
+ ! Arguments
+ ! ------------------------------------------------------------------------
+
+ integer, intent(in) :: time ! Current time in hours
+ type(glad_instance), intent(inout) :: instance ! Model instance
+ logical, intent(out) :: ice_tstep ! Set if we have done an ice time step
+
+ ! ------------------------------------------------------------------------
+ ! Internal variables
+ ! ------------------------------------------------------------------------
+
+ real(dp),dimension(:,:),pointer :: thck_temp => null() ! temporary array for volume calcs
+
+ integer :: i, il, jl
+
+ if (GLC_DEBUG .and. main_task) then
+ print*, 'In glad_i_tstep_gcm'
+ endif
+
+ ice_tstep = .false.
+
+ call coordsystem_allocate(instance%lgrid, thck_temp)
+
+ ! ------------------------------------------------------------------------
+ ! Sort out some local orography and remove bathymetry. This relies on the
+ ! point 1,1 being underwater. However, it's a better method than just
+ ! setting all points < 0.0 to zero
+ ! ------------------------------------------------------------------------
+
+ !Note: Call to glad_remove_bath is commented out for now. Not sure if it is needed in GCM runs.
+!! call glide_get_usurf(instance%model, instance%local_orog)
+!! call glad_remove_bath(instance%local_orog,1,1)
+
+ ! Get ice thickness ----------------------------------------
+
+ call glide_get_thk(instance%model,thck_temp)
+
+ ! Accumulate Glide input fields, acab and artm
+ ! Note: At this point, instance%acab has units of m
+ ! Upon averaging (in glad_average_input_gcm), units are converted to m/yr
+
+ call glad_accumulate_input_gcm(instance%mbal_accum, time, &
+ instance%acab, instance%artm)
+
+
+ if (GLC_DEBUG .and. main_task) then
+ write(stdout,*) ' '
+ write(stdout,*) 'In glad_i_tstep_gcm, time =', time
+ write(stdout,*) 'next_time =', instance%next_time
+ write(stdout,*) 'Check for ice dynamics timestep'
+ write(stdout,*) 'time =', time
+ write(stdout,*) 'start_time =', instance%mbal_accum%start_time
+ write(stdout,*) 'mbal_step =', instance%mbal_tstep
+ write(stdout,*) 'mbal_accum_time =', instance%mbal_accum_time
+ write(stdout,*) 'time-start_time+mbal_tstep =', time - instance%mbal_accum%start_time + instance%mbal_tstep
+ write(stdout,*) 'ice_tstep =', instance%ice_tstep
+ write(stdout,*) 'n_icetstep =', instance%n_icetstep
+ end if
+
+ ! ------------------------------------------------------------------------
+ ! ICE TIMESTEP begins HERE ***********************************************
+ ! ------------------------------------------------------------------------
+
+ if (time - instance%mbal_accum%start_time + instance%mbal_tstep == instance%mbal_accum_time) then
+
+ if (instance%mbal_accum_time < instance%ice_tstep) then
+ instance%next_time = instance%next_time + instance%ice_tstep - instance%mbal_tstep
+ end if
+
+ ice_tstep = .true.
+
+ call reset_output_fluxes(instance%glad_output_fluxes)
+
+ ! ---------------------------------------------------------------------
+ ! Timestepping for ice sheet model
+ ! ---------------------------------------------------------------------
+
+ do i = 1, instance%n_icetstep
+
+ if (GLC_DEBUG .and. main_task) then
+ write (stdout,*) 'Ice sheet timestep, iteration =', i
+ end if
+
+ ! Get average values of acab and artm during mbal_accum_time
+ ! instance%acab has units of m/yr w.e. after averaging
+
+ call glad_average_input_gcm(instance%mbal_accum, instance%mbal_accum_time, &
+ instance%acab, instance%artm)
+
+ ! Calculate the initial ice volume (scaled and converted to water equivalent)
+ call glide_get_thk(instance%model,thck_temp)
+ thck_temp = thck_temp * rhoi/rhow
+
+ !Note: Call to glad_remove_bath is commented out for now. Not sure if it is needed in GCM runs.
+ ! Get latest upper-surface elevation (needed for masking)
+!! call glide_get_usurf(instance%model, instance%local_orog)
+!! call glad_remove_bath(instance%local_orog,1,1)
+
+ ! Mask out non-accumulation in ice-free areas
+
+ where(thck_temp <= 0.d0 .and. instance%acab < 0.d0)
+ instance%acab = 0.d0
+ end where
+
+ ! Set acab to zero for ocean cells (bed below sea level, no ice present)
+
+ where (GLIDE_IS_OCEAN(instance%model%geometry%thkmask))
+ instance%acab = 0.d0
+ endwhere
+
+ ! Put climate inputs in the appropriate places, with conversion ----------
+
+ ! Note on units:
+ ! For subroutine glide_set_acab, input acab is in m/yr ice; this value is multiplied
+ ! by tim0/(scyr*thk0) and copied to data%climate%acab.
+ ! Input artm is in deg C; this value is copied to data%climate%artm (no unit conversion).
+
+ !TODO - It is confusing to have units of m/yr w.e. for instance%acab, compared to units m/yr ice for Glide.
+ ! Change to use the same units consistently? E.g., switch to w.e. in Glide
+
+ call glide_set_acab(instance%model, instance%acab * rhow/rhoi)
+ call glide_set_artm(instance%model, instance%artm)
+
+ ! This will work only for single-processor runs
+ if (GLC_DEBUG .and. tasks==1) then
+ il = instance%model%numerics%idiag
+ jl = instance%model%numerics%jdiag
+ write (stdout,*) ' '
+ write (stdout,*) 'After glide_set_acab, glide_set_artm: i, j =', il, jl
+ write (stdout,*) 'acab (m/y), artm (C) =', instance%acab(il,jl)*rhow/rhoi, instance%artm(il,jl)
+ end if
+
+ ! Adjust glad acab for output
+
+ where (instance%acab < -thck_temp .and. thck_temp > 0.d0)
+ instance%acab = -thck_temp
+ end where
+
+ instance%glide_time = instance%glide_time + instance%model%numerics%tinc
+
+ ! call the dynamic ice sheet model (provided the ice is allowed to evolve)
+
+ if (instance%evolve_ice == EVOLVE_ICE_TRUE) then
+
+ if (instance%model%options%whichdycore == DYCORE_GLIDE) then
+
+ call glide_tstep_p1(instance%model, instance%glide_time)
+
+ call glide_tstep_p2(instance%model)
+
+ call glide_tstep_p3(instance%model)
+
+ else ! glam/glissade dycore
+
+!WHL - debug
+ print*, 'call glissade_tstep'
+
+ call glissade_tstep(instance%model, instance%glide_time)
+
+ endif
+
+ endif ! evolve_ice
+
+!WHL - debug
+ print*, 'write diagnostics'
+
+ ! write ice sheet diagnostics at specified interval (model%numerics%dt_diag)
+
+ call glide_write_diagnostics(instance%model, &
+ instance%model%numerics%time, &
+ tstep_count = instance%model%numerics%timecounter)
+
+ ! write netCDF output
+
+ call glide_io_writeall(instance%model,instance%model)
+ call glad_io_writeall(instance,instance%model)
+
+ ! Accumulate Glide output fields to be sent to GCM
+
+ call accumulate_output_fluxes(instance%glad_output_fluxes, instance%model)
+
+ end do ! instance%n_icetstep
+
+ end if ! time - instance%mbal_accum%start_time + instance%mbal_tstep == instance%mbal_accum_time
+
+!WHL - debug
+ print*, 'output instantaneous values'
+
+ ! Output instantaneous values
+
+ call glad_mbal_io_writeall(instance, instance%model, &
+ outfiles = instance%out_first, &
+ time = time*hours2years)
+
+ ! Deallocate
+
+ if (associated(thck_temp)) then
+ deallocate(thck_temp)
+ thck_temp => null()
+ endif
+
+ if (GLC_DEBUG .and. main_task) then
+ write(stdout,*) 'Done in glad_i_tstep_gcm'
+ endif
+
+ end subroutine glad_i_tstep_gcm
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !TODO - Rewrite glad_remove_bath to support multiple tasks?
+ ! Calls to this subroutine are currently commented out.
+
+ subroutine glad_remove_bath(orog,x,y)
+
+ ! Sets ocean areas to zero height, working recursively from
+ ! a known ocean point.
+
+ use glimmer_log
+ use parallel, only : tasks
+
+ real(dp),dimension(:,:),intent(inout) :: orog !> Orography --- used for input and output
+ integer, intent(in) :: x,y !> Location of starting point (index)
+
+ integer :: nx,ny
+
+ ! Currently, this routine is called assuming point 1,1 is ocean... this won't be true
+ ! when running on multiple processors, with a distributed grid
+ ! This can't be made a fatal error, because this is currently called even if we have
+ ! more than one task... the hope is just that the returned data aren't needed in CESM.
+ if (tasks > 1) then
+ call write_log('Use of glad_remove_bath currently assumes the use of only one task', &
+ GM_WARNING, __FILE__, __LINE__)
+ end if
+
+ nx=size(orog,1) ; ny=size(orog,2)
+
+ if (orog(x,y) < 0.d0) orog(x,y) = 0.d0
+ call glad_find_bath(orog,x,y,nx,ny)
+
+ end subroutine glad_remove_bath
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ recursive subroutine glad_find_bath(orog,x,y,nx,ny)
+
+ !> Recursive subroutine called by {\tt glimmer\_remove\_bath}.
+
+ real(dp),dimension(:,:),intent(inout) :: orog !> Orography --- used for input and output
+ integer, intent(in) :: x,y !> Starting point
+ integer, intent(in) :: nx,ny !> Size of array {\tt orography}
+
+ integer,dimension(4) :: xi = (/ -1,1,0,0 /)
+ integer,dimension(4) :: yi = (/ 0,0,-1,1 /)
+ integer :: ns = 4
+ integer :: i
+
+ do i=1,ns
+ if (x+xi(i) <= nx .and. x+xi(i) > 0 .and. &
+ y+yi(i) <= ny .and. y+yi(i) > 0) then
+ if (orog(x+xi(i),y+yi(i)) < 0.d0) then
+ orog(x+xi(i),y+yi(i)) = 0.d0
+ call glad_find_bath(orog,x+xi(i),y+yi(i),nx,ny)
+ endif
+ endif
+ enddo
+
+ end subroutine glad_find_bath
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+end module glad_timestep
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
diff --git a/components/cism/glimmer-cism/libglad/glad_type.F90 b/components/cism/glimmer-cism/libglad/glad_type.F90
new file mode 100644
index 0000000000..9e3941d07f
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_type.F90
@@ -0,0 +1,316 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glad_type.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#define NCO outfile%nc
+#define NCI infile%nc
+
+module glad_type
+
+ !> contains type definitions for GLAD
+
+ use glimmer_global, only: dp
+ use glide_types
+ use glad_input_averages, only : glad_input_averages_type, initialize_glad_input_averages
+ use glad_mbal_coupling, only : glad_mbc
+ use glad_output_fluxes, only : glad_output_fluxes_type, initialize_glad_output_fluxes
+
+ implicit none
+
+ ! Constants that describe the options available
+
+ ! basic Glad options
+
+ integer, parameter :: EVOLVE_ICE_FALSE = 0 ! do not let the ice sheet evolve
+ ! (hold the ice state fixed at initial condition)
+ integer, parameter :: EVOLVE_ICE_TRUE = 1 ! let the ice sheet evolve
+
+ integer, parameter :: ZERO_GCM_FLUXES_FALSE = 0 ! send true fluxes to the GCM
+ integer, parameter :: ZERO_GCM_FLUXES_TRUE = 1 ! zero out all fluxes sent to the GCM
+
+ !TODO - Add other Glad options here to avoid hardwiring of case numbers?
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glad_instance
+
+ !> Derived type holding information about ice model instance.
+ !> Note that variables used for downscaling & upscaling are only valid on the main task,
+ !> since all downscaling and upscaling is done there.
+
+ type(coordsystem_type) :: lgrid !> Local grid for interfacing with glide (grid on this task)
+ !> (WJS: Note that origin may be incorrect with multiple tasks;
+ !> as far as I can tell, this isn't currently a problem)
+ type(glad_input_averages_type) :: glad_inputs !> Time-averaged inputs from the climate model
+ type(glide_global_type) :: model !> The instance and all its arrays.
+ character(fname_length) :: paramfile !> The name of the configuration file.
+ integer :: ice_tstep !> Ice timestep in hours
+ integer :: mbal_tstep !> Mass-balance timestep in hours
+ integer :: mbal_accum_time !> Accumulation time for mass-balance (hours)
+ !> (defaults to ice time-step)
+ integer :: ice_tstep_multiply=1 !> Ice time multiplier (non-dimensional)
+ integer :: n_icetstep !> Number of ice time-steps per mass-balance accumulation
+ real(dp) :: glide_time !> Time as seen by glide (years)
+ integer :: next_time !> The next time we expect to be called (hours)
+
+ ! Climate inputs, on the local grid -------------------------
+
+ real(dp),dimension(:,:),pointer :: artm => null() !> Annual mean air temperature
+ real(dp),dimension(:,:),pointer :: acab => null() !> Annual mass balance (m/y water equiv)
+
+ ! Arrays to accumulate mass-balance quantities --------------
+
+ type(glad_mbc) :: mbal_accum
+
+ ! Climate options -------------------------------------------
+
+ integer :: evolve_ice = 1
+
+ !> Whether the ice sheet can evolve:
+ !> \begin{description}
+ !> \item[0] The ice sheet cannot evolve; hold fixed at initial state
+ !> \item[1] The ice sheet can evolve
+
+ logical :: test_coupling = .false.
+
+ integer :: zero_gcm_fluxes = ZERO_GCM_FLUXES_FALSE
+
+ !> Whether to zero out the fluxes (e.g., calving flux) sent to the GCM
+ !> \begin{description}
+ !> \item[0] send true fluxes to the GCM
+ !> \item[1] zero out all fluxes sent to the GCM
+ !> \end{description}
+
+ ! Latitude & longitude of model grid points
+ real(dp), dimension(:,:), pointer :: lat(:,:) => null()
+ real(dp), dimension(:,:), pointer :: lon(:,:) => null()
+
+ ! Fields for averaging dycore output
+ type(glad_output_fluxes_type) :: glad_output_fluxes
+ real(dp), dimension(:,:), pointer :: hflx_tavg => null() ! conductive heat flux at top surface (W m-2)
+ real(dp), dimension(:,:), pointer :: rofi_tavg => null() ! solid ice runoff (kg m-2 s-1)
+ real(dp), dimension(:,:), pointer :: rofl_tavg => null() ! liquid runoff from basal/interior melting (kg m-2 s-1)
+
+ ! Pointers to file input and output
+
+ type(glimmer_nc_output),pointer :: out_first => null() !> first element of linked list defining netCDF outputs
+ type(glimmer_nc_input), pointer :: in_first => null() !> first element of linked list defining netCDF inputs
+
+ end type glad_instance
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+contains
+
+ subroutine glad_i_allocate_gcm(instance, force_start)
+
+ ! Allocate top-level arrays in the model instance, and ice model arrays.
+
+ implicit none
+
+ type(glad_instance),intent(inout) :: instance !> Instance whose elements are to be allocated.
+ integer, intent(in) :: force_start !> glad forcing start time (hours)
+
+ integer :: ewn,nsn ! dimensions of local grid
+
+ ewn = get_ewn(instance%model)
+ nsn = get_nsn(instance%model)
+
+ ! First deallocate if necessary
+
+ if (associated(instance%artm)) deallocate(instance%artm)
+ if (associated(instance%acab)) deallocate(instance%acab)
+
+ if (associated(instance%lat)) deallocate(instance%lat)
+ if (associated(instance%lon)) deallocate(instance%lon)
+
+ if (associated(instance%rofi_tavg)) deallocate(instance%rofi_tavg)
+ if (associated(instance%rofl_tavg)) deallocate(instance%rofl_tavg)
+ if (associated(instance%hflx_tavg)) deallocate(instance%hflx_tavg)
+
+
+ ! Then reallocate and zero...
+
+ allocate(instance%artm(ewn,nsn)); instance%artm = 0.d0
+ allocate(instance%acab(ewn,nsn)); instance%acab = 0.d0
+
+ allocate(instance%lat(ewn,nsn)); instance%lat = 0.d0
+ allocate(instance%lon(ewn,nsn)); instance%lon = 0.d0
+
+ allocate(instance%rofi_tavg(ewn,nsn)); instance%rofi_tavg = 0.d0
+ allocate(instance%rofl_tavg(ewn,nsn)); instance%rofl_tavg = 0.d0
+ allocate(instance%hflx_tavg(ewn,nsn)); instance%hflx_tavg = 0.d0
+
+ call initialize_glad_input_averages(instance%glad_inputs, ewn=ewn, nsn=nsn, &
+ next_av_start=force_start)
+
+ call initialize_glad_output_fluxes(instance%glad_output_fluxes, ewn=ewn, nsn=nsn)
+
+ end subroutine glad_i_allocate_gcm
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !TODO - Move the next two subroutines to a new module called glad_setup?
+ ! This would be analogous to the organization of Glide.
+
+ subroutine glad_i_readconfig(instance,config)
+
+ !> read glad configuration
+
+ use glimmer_config
+ use glimmer_log
+ use glad_constants, only: years2hours
+
+ implicit none
+
+ ! Arguments
+
+ type(ConfigSection), pointer :: config !> structure holding sections of configuration file
+ type(glad_instance),intent(inout) :: instance !> The instance being initialised.
+
+ ! Internals
+
+ type(ConfigSection), pointer :: section
+ real(dp) :: mbal_time_temp ! Accumulation time in years
+
+ mbal_time_temp = -1.d0
+
+ call GetSection(config,section,'GLAD climate')
+ if (associated(section)) then
+ call GetValue(section,'evolve_ice',instance%evolve_ice)
+ call GetValue(section,'test_coupling',instance%test_coupling)
+ call GetValue(section,'mbal_accum_time',mbal_time_temp)
+ call GetValue(section,'ice_tstep_multiply',instance%ice_tstep_multiply)
+ call GetValue(section,'zero_gcm_fluxes',instance%zero_gcm_fluxes)
+ end if
+
+ if (mbal_time_temp > 0.0) then
+ instance%mbal_accum_time = mbal_time_temp * years2hours
+ else
+ instance%mbal_accum_time = -1
+ end if
+
+ call glad_nc_readparams(instance,config)
+
+ end subroutine glad_i_readconfig
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine glad_nc_readparams(instance,config)
+
+ !> read netCDF I/O related configuration file
+ !> based on glimmer_ncparams
+
+ use glimmer_config
+ use glimmer_ncparams, only: handle_output, handle_input, configstring
+ implicit none
+
+ type(glad_instance) :: instance !> GLAD instance
+ type(ConfigSection), pointer :: config !> structure holding sections of configuration file
+
+ ! local variables
+ type(ConfigSection), pointer :: section
+ type(glimmer_nc_output), pointer :: output
+ type(glimmer_nc_input), pointer :: input
+
+ ! Initialise local pointers
+ output => null()
+ input => null()
+
+ ! setup outputs
+ call GetSection(config,section,'GLAD output')
+ do while(associated(section))
+ output => handle_output(section,output,0.d0,configstring)
+ if (.not.associated(instance%out_first)) then
+ instance%out_first => output
+ end if
+ call GetSection(section%next,section,'GLAD output')
+ end do
+
+ ! setup inputs
+ call GetSection(config,section,'GLAD input')
+ do while(associated(section))
+ input => handle_input(section,input)
+ if (.not.associated(instance%in_first)) then
+ instance%in_first => input
+ end if
+ call GetSection(section%next,section,'GLAD input')
+ end do
+
+ output => null()
+ input => null()
+
+ end subroutine glad_nc_readparams
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine glad_i_printconfig(instance)
+
+ use glimmer_log
+ use glad_constants, only: hours2years
+ use parallel, only: tasks
+
+ implicit none
+
+ ! Argument
+
+ type(glad_instance),intent(inout) :: instance !> The instance to be printed
+
+ ! Internal
+
+ character(len=100) :: message
+
+ call write_log(' ')
+ call write_log('GLAD climate')
+ call write_log('-------------')
+ write(message,*) 'evolve_ice (0=fixed, 1=evolve): ',instance%evolve_ice
+ call write_log(message)
+ write(message,*) 'test_coupling: ',instance%test_coupling
+ call write_log(message)
+
+ if (instance%evolve_ice == EVOLVE_ICE_FALSE) then
+ call write_log('The ice sheet state will not evolve after initialization')
+ endif
+
+ if (instance%mbal_accum_time == -1) then
+ call write_log('Mass-balance accumulation time will be set to max(ice timestep, mbal timestep)')
+ else
+ write(message,*) 'Mass-balance accumulation time:',instance%mbal_accum_time * hours2years,' years'
+ call write_log(message)
+ end if
+
+ write(message,*) 'ice_tstep_multiply:',instance%ice_tstep_multiply
+ call write_log(message)
+
+ write(message,*) 'zero_gcm_fluxes: ', instance%zero_gcm_fluxes
+ call write_log(message)
+
+ end subroutine glad_i_printconfig
+
+end module glad_type
diff --git a/components/cism/glimmer-cism/libglad/glad_vars.def b/components/cism/glimmer-cism/libglad/glad_vars.def
new file mode 100644
index 0000000000..fe0f1c6ea5
--- /dev/null
+++ b/components/cism/glimmer-cism/libglad/glad_vars.def
@@ -0,0 +1,56 @@
+#[]
+#dimensions: time, y1, x1
+#units:
+#long_name:
+#data:
+#factor:
+
+# setup for code generator
+[VARSET]
+# prefix of the generated module
+name: glad
+# f90 type containing all necessary data
+datatype: glad_instance
+# module where type is defined
+datamod: glad_type
+
+[lat]
+dimensions: time, y1, x1
+units: degreeN
+long_name: latitude
+standard_name: latitude
+data: data%lat
+load: 1
+standard_name: latitude
+
+[lon]
+dimensions: time, y1, x1
+units: degreeE
+long_name: longitude
+data: data%lon
+load: 1
+standard_name: longitude
+
+[rofi_tavg]
+dimensions: time, y1, x1
+units: kg m-2 s-1
+long_name: solid calving flux
+data: data%rofi_tavg
+load: 1
+coordinates: lon lat
+
+[rofl_tavg]
+dimensions: time, y1, x1
+units: kg m-2 s-1
+long_name: liquid runoff flux
+data: data%rofl_tavg
+load: 1
+coordinates: lon lat
+
+[hflx_tavg]
+dimensions: time, y1, x1
+units: W m-2
+long_name: heat flux to ice surface
+data: data%hflx_tavg
+load: 1
+coordinates: lon lat
diff --git a/components/cism/glimmer-cism/libglide/felix_dycore_interface.F90 b/components/cism/glimmer-cism/libglide/felix_dycore_interface.F90
new file mode 100644
index 0000000000..be33358fe7
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/felix_dycore_interface.F90
@@ -0,0 +1,757 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! felix_dycore_interface.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+
+module felix_dycore_interface
+
+ use glimmer_physcon, only : scyr
+ use glimmer_paramets, only : vel0, tau0, vis0
+ use glide_types
+ use glimmer_log
+ use parallel
+ use glissade_grid_operators, only: glissade_stagger
+ !use glimmer_to_dycore
+
+ implicit none
+ private
+
+
+ !--------------------------------------------------------------------
+ !
+ ! Public parameters
+ !
+ !--------------------------------------------------------------------
+
+ !--------------------------------------------------------------------
+ !
+ ! Public member functions
+ !
+ !--------------------------------------------------------------------
+
+ public :: felix_velo_init, &
+ felix_velo_driver
+
+ !--------------------------------------------------------------------
+ !
+ ! Private module variables
+ !
+ !--------------------------------------------------------------------
+
+
+!***********************************************************************
+
+
+contains
+
+
+!***********************************************************************
+!
+! routine felix_velo_init
+!
+!> \brief Initializes the external Albany-FELIX velocity solver
+!> \author Irina Kalashnikova
+!> \date 13 September 2013
+!> \version SVN:$Id$
+!> \details
+!> This routine initializes the external Albany-FELIX ice velocity solver.
+!
+!-----------------------------------------------------------------------
+
+ subroutine felix_velo_init(model)
+
+ !-----------------------------------------------------------------
+ !
+ ! input variables
+ !
+ !-----------------------------------------------------------------
+
+ !-----------------------------------------------------------------
+ !
+ ! input/output variables
+ !
+ !-----------------------------------------------------------------
+
+ type(glide_global_type),intent(inout) :: model
+
+ !-----------------------------------------------------------------
+ !
+ ! output variables
+ !
+ !-----------------------------------------------------------------
+
+ !-----------------------------------------------------------------
+ !
+ ! local variables
+ !
+ !-----------------------------------------------------------------
+
+
+ if (this_rank == 0) print *, 'DEBUG: Inside felix_velo_init.'
+
+ ! === First do any preparations needed on the CISM side (if any)
+
+
+ ! === Now call the external Albany code for any init that it needs to do
+ !call gtd_init_dycore(model,dycore_model_index)
+ ! Doug - does this interface still make sense here?
+ ! Doug - what needs to change (if anything) if the code is compiled without
+ ! external Felix libraries? Do we need a stub module?
+ ! Doug - We might need to do some rearranging to make sure the call
+ ! to gtd_init_dycore_interface happens in the right place
+ ! (presumably in simple_glide/simple_felix/cism_driver).
+ ! (I think I see how to do this, but will wait for now.)
+
+ !--------------------------------------------------------------------
+ end subroutine felix_velo_init
+
+
+
+
+!***********************************************************************
+!
+! routine felix_velo_driver
+!
+!> \brief Makes preparations and calls the external Albany-FELIX velocity solver
+!> \author Irina Kalashnikova
+!> \date 13 September 2013
+!> \version SVN:$Id$
+!> \details
+!> This routine makes preparations and calls the external
+!> Albany-FELIX velocity solver.
+!
+!-----------------------------------------------------------------------
+
+ subroutine felix_velo_driver(model)
+
+ use glimmer_global, only : dp
+ use glimmer_physcon, only: gn, scyr
+ use glimmer_paramets, only: thk0, len0, vel0, vis0
+ use glimmer_log
+ use glide_types
+ use glide_mask
+
+ !-----------------------------------------------------------------
+ !
+ ! input variables
+ !
+ !-----------------------------------------------------------------
+
+ !-----------------------------------------------------------------
+ !
+ ! input/output variables
+ !
+ !-----------------------------------------------------------------
+
+ type(glide_global_type),intent(inout) :: model
+
+ !-----------------------------------------------------------------
+ !
+ ! local variables
+ !
+ !-----------------------------------------------------------------
+
+
+ call get_parallel_finite_element_mesh_data(model%general%ewn, model%general%nsn ,&
+ model%general%upn, &
+ model%numerics%sigma, &
+ nhalo, &
+ len0 * model%numerics%dew, &
+ len0 * model%numerics%dns, &
+ thk0 * model%geometry%thck, &
+ thk0 * model%geometry%usrf, &
+ thk0 * model%geometry%topg,&
+ thk0 * model%numerics%thklim, &
+ (tau0 / vel0 / scyr) *model%velocity%beta, &
+ (vis0*scyr) *model%temper%flwa)
+
+
+ !IK, 10/24/13, notes to self:
+ !To use constant flwa = 1e-16, set flow_law = 0 in input (config) file
+ !To use beta field from .nc file, set which_ho_babc = 5 in input (config)
+ !file; to use no-slip, set which_ho_babc = 4
+
+
+ !-----------------------------------------------------------------
+ !
+ ! output variables
+ !
+ !-----------------------------------------------------------------
+
+ !-----------------------------------------------------------------
+ !
+ ! local variables
+ !
+ !-----------------------------------------------------------------
+
+
+ if (this_rank == 0) print *, 'DEBUG: Inside felix_velo_driver.'
+
+ ! === First do any preparations needed on the CISM side
+
+
+ ! === Now call the external Albany code
+ !call gtd_run_dycore(dycore_model_index,cur_time,time_inc)
+ ! Doug - does this interface still make sense here?
+ ! Doug - what needs to change (if anything) if the code is compiled without
+ ! external Felix libraries? Do we need a stub module?
+
+
+ !--------------------------------------------------------------------
+ end subroutine felix_velo_driver
+
+
+
+!***********************************************************************
+! Private subroutines:
+!***********************************************************************
+
+
+
+!***********************************************************************
+!
+! routine get_parallel_finite_element_mesh_data
+!
+!> \author Irina Kalashnikova
+!> \date 18 September 2013
+!> \version SVN:$Id$
+!> \details
+!
+! Naming convention:
+! - cells, vertices are in 2D
+! - elements, nodes are in 3D
+!
+! The function get_parallel_finite_element_mesh_data creates a parallel mesh of
+! a given geometry using the data. In particular, global node and element IDs
+! are created, and an offset it added to the x and y coordinates on
+! multi-processor runs. The following are data that would be needed in
+! Albany/FELIX (so these would need to be passed through an interface b/w the 2
+! codes):
+!
+! xyz_at_nodes:
+! Double array of size (nx-1)*(ny-1)*nz x 3. It gives the x, y
+! and z coordinates of all the nodes on each processor.
+! Note: Right now this array consists of the full mesh, in
+! particular, non-active nodes have not been removed. This is OK for
+! Albany/FELIX -- the non-active nodes will not be assembled as they will not
+! appear in global_element_conn_active, the element connectivity array. We could
+! remove the non-active nodes at some point to avoid passing stuff b/w the
+! codes that isn't needed.
+! Note 2: the nodes need to be converted to km prior to being
+! passed to Albany/FELIX b/c Albany/FELIX works with meshes in km.
+!
+! global_node_id_owned_map:
+! Integer array of size (nx-1)*(ny-1)*nz x 1. This is
+! effectively a map from local node IDs to global node
+! IDs. It is 1-based, consistent w/ Fortran numbering (so the first node
+! is node number 1).
+!
+! global_element_conn_active:
+! Dynamically allocated integer array of size nCellsActive*(nz-1) x 8 where nCellsActive is
+! the number of active elements (with ice) in 2D. This array is the element
+! connectivity array. The 8 columns of this array give the element
+! connectivity (node #s defining a given element), 1-based.
+! Note: The global element numbering in global_element_conn_active will be
+! non-contiguous and there will be some element #s
+! missing (e.g., if elements 1 and 2 are not active, they will not appear in
+! global_element_conn_active). This is OK for Albany/FELIX. Also some of the nodes
+! (the non-active ones) will not appear in the connectivity array. This
+! is OK too.
+!
+! global_element_id_active_owned_map:
+! Dynamically allocated integer array of size
+! nCellsActive*(nz-1) x 1 where nCellsActive is the number of active
+! elements (with ice) in 2D. This is a map from local element IDs to global element IDs.
+! It is 1-based. Only active elements are included.
+!
+! global_basal_face_conn_active:
+! Dynamically allocated integer array of size nCellsActive x 5 where nCellsActive is the number of
+! active elements (with ice) in 2D. This array is the basal face connectivity
+! array. The first column gives the global number of the element to
+! which the face belongs. The next 4 columns give the face connectivity (node #s
+! defining the face of the element), again 1-based.
+! Note: Same comment as for global_element_conn_active.
+!
+! global_basal_face_id_active_owned_map:
+! Dynamically allocated integer array of size
+! nCellsActive x 1 where nCellsActive is the number of
+! active elements (with ice) in 2D. This is a map from local
+! face IDs to global face IDs. It is 1-based. Only active
+! faces are included.
+!
+! surf_height_at_nodes:
+! Double array of size (nx-1)*(ny-1)*nz. This is effectively
+! stagusrf extended to 3D (we need it as a 3D data structure in Albany/FELIX).
+! Note: Like the xyz_at_nodes array this would be defined at all the
+! nodes in the original mesh, so it would include non-active
+! nodes. We can change this at some point if we don't want to pass extra stuff b/w
+! codes. Note also that this needs to be converted to km as Albany/FELIX uses meshes in km not
+! meters.
+!
+!
+!-----------------------------------------------------------------------
+
+ subroutine get_parallel_finite_element_mesh_data(nx, ny, &
+ nz, sigma, &
+ nhalo, &
+ dx, dy, &
+ thck, usrf, &
+ topg, &
+ thklim, beta, &
+ flwa)
+
+ !-----------------------------------------------------------------
+ !
+ ! input variables
+ !
+ !-----------------------------------------------------------------
+
+ integer, intent(in) :: &
+ nx, ny, & ! number of grid cells in each direction
+ nz, & ! number of vertical levels where velocity is computed
+ ! (same as model%general%upn)
+ nhalo ! number of rows/columns of halo cells
+
+ real(dp), dimension(:), intent(in) :: &
+ sigma
+
+ real(dp), intent(in) :: &
+ dx, dy ! grid cell length and width (m)
+ ! assumed to have the same value for each grid
+ ! cell
+
+ real(dp), dimension(:,:), intent(in) :: &
+ thck, & ! ice thickness (m)
+ usrf, & ! upper surface elevation (m)
+ topg ! elevation of topography (m)
+
+
+ real(dp), intent(in) :: &
+ thklim ! minimum ice thickness for active cells (m)
+
+ real(dp), dimension(:,:), intent(in) :: &
+ beta ! basal traction parameter
+
+ real(dp), dimension(:,:,:), intent(in) :: &
+ flwa ! flow factor parameter
+
+
+ !-----------------------------------------------------------------
+ !
+ ! input/output variables
+ !
+ !-----------------------------------------------------------------
+
+ !-----------------------------------------------------------------
+ !
+ ! output variables
+ !
+ !-----------------------------------------------------------------
+
+ !IK, 9/8/13: xyz_at_nodes will need to be passed to Albany/FELIX
+ !These are divided by 1000 to convert from meters to km, as Albany/FELIX
+ !takes meshes in km.
+ !TO DO: make xyz_at_nodes have intent(out)
+ real(dp), dimension((nx-2*nhalo+1)*(ny-2*nhalo+1)*nz,3) :: &
+ xyz_at_nodes ! x, y and z coordinates of each vertex
+
+ !IK, 9/8/13: GlobalNodeID_3D will need to be passed to Albany/FELIX
+ !TO DO: make GlobalNodeID_3D have intent(out)
+ integer, dimension((nx-2*nhalo+1)*(ny-2*nhalo+1)*nz) :: &
+ global_node_id_owned_map !This is effectively a map from local -> global IDs
+ !for the full 3D mesh
+
+ !IK, 9/8/13: surf_height_at_nodes will need to be passed to Albany/FELIX
+ !These values are divided by 1000 to convert from meters to km, as
+ !Albany/FELIX
+ !takes meshes in km.
+ !TO DO: make surf_height_at_nodes have intent(out)
+ real(dp), dimension((nx-2*nhalo+1)*(ny-2*nhalo+1)*nz) :: &
+ surf_height_at_nodes !This is an extension of
+ !stagusrf to 3D
+
+ !IK, 9/8/13: global_element_conn_active will need to be passed to
+ !Albany/FELIX
+ !TO DO: make global_element_conn_active have intent(out)
+ integer, dimension(:, :), allocatable :: &
+ global_element_conn_active !Like global_element_conn but first column is
+ !removed and only active elements (cells)
+ !are included
+
+
+ !IK, 9/12/13: global_element_id_active_owned_map will need to be passed
+ !to Albany/FELIX
+ !TO DO: make global_element_id_active_owned_map have intent(out)
+ integer, dimension(:, :), allocatable :: &
+ global_element_id_active_owned_map !First column of global_element_conn but and only
+ !active elements (cells) are
+ !included.
+ !This is effectively a map from
+ !local -> global IDs for the
+ !elements
+
+ !TO DO: make global_basal_face_conn_active have intent(out)
+ integer, dimension(:, :), allocatable :: &
+ global_basal_face_conn_active !Like global_basal_face_conn but only active
+ !elements (cells) are included
+
+ !IK, 9/12/13: global_basal_face_id_active_owned_map will need to be passed to
+ !Albany/FELIX
+ !TO DO: make global_basal_face_id_active_owned_map have intent(out)
+ integer, dimension(:, :), allocatable :: &
+ global_basal_face_id_active_owned_map !First column of global_basal_face_conn but and only
+ !active elements (cells) are included.
+ !This is effectively a map from
+ !local -> global IDs for the
+ !elements
+
+ !IK, 10/24/13: beta_at_nodes will need to be passed to Albany/FELIX
+ !These values are divided by 1000 to convert from meters to kPa a m^(-1),
+ !as
+ !Albany/FELIX takes meshes in km (so beta needs to be converted to the
+ !appropriate units).
+ !TO DO: make beta_at_nodes have intent(out)
+ real(dp), dimension((nx-2*nhalo+1)*(ny-2*nhalo+1)*nz) :: &
+ beta_at_nodes !This is an extension of
+ !beta to 3D
+
+ !IK, 10/24/13: flwa_at_active_cells will need to be passed to Albany/FELIX
+ !This is the value of the flow factor at the elements
+ !These values are multilied by 1.0e12 to convert to Albany/FELIX units
+ !TO DO: make flwa_at_active_cells have intent(out)
+ real(dp), dimension(:, :), allocatable :: &
+ flwa_at_active_cells !This is essentially flwa in
+ !vector form and at only the active
+ !cells
+
+
+ !-----------------------------------------------------------------
+ !
+ ! local variables
+ !
+ !-----------------------------------------------------------------
+
+ integer, dimension(nx,ny) :: &
+ imask ! = 1 where ice is present, else = 0
+
+ logical, dimension(nx,ny) :: &
+ active_cell ! true for active cells (thck > thklim and border locally owned vertices)
+
+
+ real(dp), dimension(nx-1,ny-1) :: &
+ stagusrf, & ! upper surface averaged to vertices
+ stagthck ! ice thickness averaged to vertices
+
+ real(dp), dimension((nx-2*nhalo+1)*(ny-2*nhalo+1),2) :: &
+ xy_at_vertices ! x and y coordinates of each vertex
+
+ integer, dimension((nx-2*nhalo+1)*(ny-2*nhalo+1)) :: &
+ global_vertex_id_owned_map !global IDs of 2D mesh
+
+ integer, dimension((nx-2*nhalo)*(ny-2*nhalo)) :: &
+ global_cell_id_owned_map !global IDs of cells in 2D mesh
+
+ logical, dimension((nx-2*nhalo)*(ny-2*nhalo)) :: &
+ active_cell_vector !This is like active_cell except in vector
+ !form
+
+ logical, dimension((nx-2*nhalo)*(ny-2*nhalo)*(nz-1)) :: &
+ active_cell_vector3D !This is an extension of
+ !active_cell_vertex to 3D
+
+ integer, dimension((nx-2*nhalo)*(ny-2*nhalo)*(nz-1), 9) :: &
+ global_element_conn !First column is effectively a map from local -> global IDs
+ !Remaining 8 columns give element connectivity (with
+ !global node #s)
+
+ integer, dimension((nx-2*nhalo)*(ny-2*nhalo), 6) :: &
+ global_basal_face_conn !First column is effectively a map from local ->global IDs for basal faces
+ !Second column gives global # of element to which this
+ !boundary face belongs
+ !Next 4 columns give the connectivity for the boundary
+ !face
+
+ real(dp), dimension((nx-2*nhalo)*(ny-2*nhalo)*(nz-1)) :: &
+ flwa_at_cells !This is a vector form of flwa, defined at
+ !all the elements
+
+
+ integer :: i, j, k, l
+ real(dp) :: x, y !x and y coordinates of vertices
+ integer :: gnx, gny !for temporary calculation of global vertex/cell # in x global vertex/cell # in y
+ integer :: nNodes2D, nNodesProc2D !total # virtices, # vertices on this proc (in 2D)
+ integer :: nEles2D, nElesProc2D, nElesProc3D !total # cells (in 2D), # cells on this proc (in 2D), # elements on this proc (in 3D)
+ integer :: nodes_x !total # nodes in x
+ integer :: x_GID, y_GID, z_GID, x_GIDplus1, y_GIDplus1, z_GIDplus1, elem_GID, xy_plane !for creating element numbering
+ integer :: nCellsActive !# active cells (with ice) in 2D
+
+
+ !--------------------------------------------------------------------
+ ! TO DO (IK, 9/18/13):
+ ! - Make stuff that needs to be passed to Albany/FELIX an out argument of
+ ! this function
+ !--------------------------------------------------------------------
+
+ !IK, 9/9/13: printing for debug
+ !print *, 'In glissade_velo_higher_data! IK'
+ !print *, 'Proc #: ', this_rank
+ !print *, 'nx: ', nx
+ !print *, 'ny: ', ny
+ !print *, 'dx: ', dx
+ !print *, 'dy: ', dy
+ !print *, 'ewlb: ', ewlb
+ !print *, 'nslb: ', nslb
+ !print *, 'global_ewn: ', global_ewn
+ !print *, 'global_nsn:', global_nsn
+ !print *, 'nhalo:', nhalo
+ !print *, 'nz:', nz
+
+ !---------------------------------------------------------------------------------------
+ ! Creation of global node numbering of vertices/nodes (IK, 9/8/13)
+ !---------------------------------------------------------------------------------------
+
+ !IK, 9/8/13: first, create global vertices for 2D mesh to be extruded as 3D
+ !mesh
+ k = 1
+ do j = nhalo, ny-nhalo
+ do i = nhalo, nx-nhalo
+ x = (ewlb+1)*dx + i*dx !xVertex(i,j)
+ y = (nslb+1)*dy + j*dy !yVertex(i,j)
+ gnx = ewlb + 1 + i - nhalo + 1
+ gny = nslb + 1 + j - nhalo + 1
+ global_vertex_id_owned_map(k) = gnx + (global_ewn+1)*(gny - 1)
+ xy_at_vertices(k,1) = x/1000.0 !divide by 1000 to convert to km for
+ !Albany/FELIX
+ xy_at_vertices(k,2) = y/1000.0 !divide by 1000 to convert to km for
+ !Albany/FELIX
+ k = k + 1
+ enddo
+ enddo
+
+ !IK, 9/8/13: now, create global nodes for 3D mesh obtained by extruding 3D
+ !mesh in z-direction
+ !------------------------------------------------------------------------------
+ ! Compute masks:
+ ! mask = 1 where dynamically active ice is present, 0 elsewhere
+ !------------------------------------------------------------------------------
+
+ do j = 1, ny
+ do i = 1, nx
+ if (thck(i,j) > thklim) then
+ imask(i,j) = 1
+ else
+ imask(i,j) = 0
+ endif
+ enddo
+ enddo
+
+ !------------------------------------------------------------------------------
+ ! Compute ice thickness and upper surface on staggered grid
+ ! (requires that thck and usrf are up to date in halo cells)
+ !------------------------------------------------------------------------------
+
+ call glissade_stagger(nx, ny, &
+ thck, stagthck, &
+ imask, stagger_margin_in = 1)
+
+ call glissade_stagger(nx, ny, &
+ usrf, stagusrf, &
+ imask, stagger_margin_in = 1)
+
+ !------------------------------------------------------------------------------
+
+ nNodes2D = (global_ewn + 1)*(global_nsn + 1)
+ nNodesProc2D = (nx - 2*nhalo+1)*(ny- 2*nhalo+1)
+ do l = 1, nz !loop over vertical layers
+ global_node_id_owned_map((l-1)*nNodesProc2D + 1:nNodesProc2D*l) = global_vertex_id_owned_map + nNodes2D*(l - 1)
+ xyz_at_nodes((l-1)*nNodesProc2D + 1:nNodesProc2D*l, 1:2) = xy_at_vertices
+ enddo
+ !IK, 9/8/13: set z-coordinate of mesh
+ k = 1
+ do l = 1, nz
+ do j = nhalo, ny-nhalo
+ do i = nhalo, nx-nhalo
+ ! do j = 1+nhalo, ny-nhalo+1
+ ! do i = 1+nhalo, nx-nhalo+1
+ !divide by 1000 to convert to km for Albany/FELIX
+ xyz_at_nodes(k,3) = (stagusrf(i,j) - sigma(l)*stagthck(i,j))/1000.0
+ surf_height_at_nodes(k) = stagusrf(i,j)/1000.0
+ beta_at_nodes(k) = beta(i,j)/1000.0;
+ k = k + 1
+ enddo
+ enddo
+ enddo
+
+ !IK, 9/12/13: printing output for debugging/checking node
+ !numbering/coordinates
+ if (this_rank == 0) then
+ do l=1, (nx-2*nhalo+1)*(ny-2*nhalo+1)*nz
+ print *, 'x, y, z: ', xyz_at_nodes(l,1), xyz_at_nodes(l,2), xyz_at_nodes(l,3)
+ print *, 'global node: ', global_node_id_owned_map(l)
+ print *, 'sh: ', surf_height_at_nodes(l)
+ print *, 'beta: ', beta_at_nodes(l)
+ enddo
+ endif
+
+ ! Identify the active cells.
+ ! Include all cells that border locally owned vertices and contain ice.
+
+ nCellsActive = 0 !start counter keeping track of how many active cells
+ !there are on each processor
+
+ active_cell(:,:) = .false.
+
+ do j = 1+nhalo, ny-nhalo
+ do i = 1+nhalo, nx-nhalo
+ if (thck(i,j) > thklim) then
+ active_cell(i,j) = .true.
+ nCellsActive = nCellsActive + 1
+ endif
+ enddo
+ enddo
+
+ !IK, 10/24/13: populate flwa_at_cells array from flwa array, and change
+ !units to Albany/FELIX units
+ k = 1
+ do l = 1, nz-1
+ do j = 1+nhalo, ny-nhalo
+ do i = 1+nhalo, nx-nhalo
+ flwa_at_cells(k) = flwa(l,i,j)*(1.0E12) !scale flwa by 1e12 to get units
+ !consistent with those in
+ !Albany/FELIX
+ k = k + 1;
+ enddo
+ enddo
+ enddo
+
+
+
+ !--------------------------------------------------------------------------
+ ! Creation of hexahedral mesh and global numbering of elements (IK, 9/8/13)
+ !--------------------------------------------------------------------------
+
+ nEles2D = global_ewn*global_nsn
+ nElesProc2D = (nx - 2*nhalo)*(ny - 2*nhalo)
+ k = 1 !local cell number
+ do j = 1+nhalo, ny-nhalo
+ do i = 1+nhalo, nx-nhalo
+ gnx = ewlb + 1 + i - nhalo
+ gny = nslb + 1 + j - nhalo
+ global_cell_id_owned_map(k) = gnx + global_ewn*(gny - 1)
+ active_cell_vector(k) = active_cell(i,j)
+ k = k + 1
+ enddo
+ enddo
+ do l = 1, nz - 1 !loop over vertical layers
+ global_element_conn((l-1)*nElesProc2D + 1:nElesProc2D*l, 1) = global_cell_id_owned_map+ nEles2D*(l-1)
+ active_cell_vector3D((l-1)*nElesProc2D + 1:nElesProc2D*l) = active_cell_vector
+ enddo
+
+ nodes_x = global_ewn + 1 !# nodes in x
+ nElesProc3D = (nx - 2*nhalo)*(ny - 2*nhalo)*(nz - 1) !number of elements on proc
+ k = 1 ! counter for incrementing boundary faces
+ do i = 1, nElesProc3D
+ elem_GID = global_element_conn(i, 1) - 1
+ z_GID = elem_GID/nEles2D !mesh column number
+ xy_plane = mod(elem_GID, nEles2D)
+ x_GID = mod(xy_plane, global_ewn) !mesh column number
+ y_GID = xy_plane/(global_ewn) !mesh row number
+ x_GIDplus1 = x_GID + 1
+ y_GIDplus1 = y_GID + 1
+ z_GIDplus1 = z_GID + 1
+ ! find and mark boundary faces on basal boundary
+ if (z_GIDplus1 == nz - 1) then
+ global_basal_face_conn(:, 1) = global_cell_id_owned_map
+ global_basal_face_conn(k, 2) = global_element_conn(i,1)
+ !IK, 9/8/13: below the +1 is added to make the connectivity 1-based
+ !like in Fortran -- the node numbering has been created with this
+ !convention
+ global_basal_face_conn(k, 3) = x_GID + nodes_x*y_GID + nNodes2D*z_GIDplus1 + 1
+ global_basal_face_conn(k, 4) = x_GIDplus1 + nodes_x*y_GID + nNodes2D*z_GIDplus1 + 1
+ global_basal_face_conn(k, 5) = x_GIDplus1 + nodes_x*y_GIDplus1 + nNodes2D*z_GIDplus1 + 1
+ global_basal_face_conn(k, 6) = x_GID + nodes_x*y_GIDplus1 + nNodes2D*z_GIDplus1 + 1
+ k = k + 1
+ endif
+ !IK, 9/8/13: below the +1 is added to make the connectivity 1-based
+ !like in Fortran -- the node numbering has been created with this
+ !convention
+ global_element_conn(i, 2) = x_GID + nodes_x*y_GID + nNodes2D*z_GIDplus1 + 1
+ global_element_conn(i, 3) = x_GIDplus1 + nodes_x*y_GID + nNodes2D*z_GIDplus1 + 1
+ global_element_conn(i, 4) = x_GIDplus1 + nodes_x*y_GIDplus1 + nNodes2D*z_GIDplus1 + 1
+ global_element_conn(i, 5) = x_GID + nodes_x*y_GIDplus1 + nNodes2D*z_GIDplus1 + 1
+ global_element_conn(i, 6) = x_GID + nodes_x*y_GID + nNodes2D*z_GID + 1
+ global_element_conn(i, 7) = x_GIDplus1 + nodes_x*y_GID + nNodes2D*z_GID + 1
+ global_element_conn(i, 8) = x_GIDplus1 + nodes_x*y_GIDplus1 + nNodes2D*z_GID + 1
+ global_element_conn(i, 9) = x_GID + nodes_x*y_GIDplus1 + nNodes2D*z_GID + 1
+ enddo
+
+ !dynamically allocate arrays that depent on # active cells
+ allocate(global_element_conn_active(nCellsActive*(nz-1), 8))
+ allocate(global_element_id_active_owned_map(nCellsActive*(nz-1),1))
+ allocate(global_basal_face_conn_active(nCellsActive, 5))
+ allocate(global_basal_face_id_active_owned_map(nCellsActive,1))
+ allocate(flwa_at_active_cells(nCellsActive*(nz-1), 1))
+ !IK, 9/9/13: do dynamically-allocated arrays need to be deallocated/deleted?
+ k = 1
+ do i = 1, nElesProc3D
+ if (active_cell_vector3D(i)) then
+ global_element_conn_active(k, 1:8) = global_element_conn(i,2:9)
+ global_element_id_active_owned_map(k,1) = global_element_conn(i,1)
+ flwa_at_active_cells(k,1) = flwa_at_cells(i)
+ k = k + 1
+ endif
+ enddo
+ k = 1
+ do i = 1, nElesProc2D
+ if (active_cell_vector(i)) then
+ global_basal_face_conn_active(k, :) = global_basal_face_conn(i, 2:6)
+ global_basal_face_id_active_owned_map(k,1) = global_basal_face_conn(i, 1)
+ k = k + 1
+ endif
+ enddo
+
+ !IK, 9/12/13: printing output for debugging/checking element numbering
+ if (this_rank == 0) then
+ do l=1, nCellsActive*(nz-1)
+ print *, 'element connectivity active: ', global_element_conn_active(l,1:8)
+ print *, 'global element #: ', global_element_id_active_owned_map(l,1)
+ print *, 'flwa: ', flwa_at_active_cells(l,1)
+ enddo
+ endif
+
+ !IK, 9/12/13: printing output for debugging/checking basal face numbering
+ if (this_rank == 0) then
+ do l=1, nCellsActive
+ print *, 'face connectivity active: ', global_basal_face_conn_active(l,1:5)
+ print *, 'global face #: ', global_basal_face_id_active_owned_map(l,1)
+ enddo
+ endif
+
+
+ !--------------------------------------------------------------------
+ end subroutine get_parallel_finite_element_mesh_data
+
+
+end module felix_dycore_interface
diff --git a/components/cism/glimmer-cism/libglide/glam_grid_operators.F90 b/components/cism/glimmer-cism/libglide/glam_grid_operators.F90
new file mode 100644
index 0000000000..65049eefd2
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glam_grid_operators.F90
@@ -0,0 +1,1568 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glam_grid_operators.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+! Various grid operators for glam dycore, including routines for computing gradients
+! and switching between staggered and unstaggered grids
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_mask.inc"
+
+!> This module contains functions for computing derivatives numerically, both
+!> for a single value and for an entire field.
+!> Note that this module is written with the first index in a matrix corresponding
+!> to the x (east-west) coordinate. If this is not the case (i.e. if the first
+!> index corresponds to the y (north-south) coordinate), then transposition
+!> will be necessary. Simply ask for the y-derivative when you mean to ask for
+!> the x-derivative, and vice versa.
+
+module glam_grid_operators
+
+ use glimmer_global, only: dp
+
+ implicit none
+
+contains
+
+!----------------------------------------------------------------------------
+
+ subroutine glam_geometry_derivs(model)
+
+ ! Compute derivatives of the ice and bed geometry, as well as averaging
+ ! them onto the staggered grid
+
+ use glide_types, only: glide_global_type
+ use glide_grid_operators, only: stagvarb ! can we remove this?
+ implicit none
+
+ type(glide_global_type), intent(inout) :: model
+
+ call stagthickness(model%geometry% thck, &
+ model%geomderv%stagthck,&
+ model%general%ewn, &
+ model%general%nsn, &
+ model%geometry%usrf, &
+ model%numerics%thklim, &
+ model%geometry%thkmask)
+
+!NOTE: Should these calls to stagvarb be replaced by calls to df_field_2d_staggered?
+ call stagvarb(model%geometry%lsrf, &
+ model%geomderv%staglsrf,&
+ model%general%ewn, &
+ model%general%nsn)
+
+ call stagvarb(model%geometry%topg, &
+ model%geomderv%stagtopg,&
+ model%general%ewn, &
+ model%general%nsn)
+
+ model%geomderv%stagusrf = model%geomderv%staglsrf + model%geomderv%stagthck
+
+ call df_field_2d_staggered(model%geometry%usrf, &
+ model%numerics%dew, model%numerics%dns, &
+ model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns, &
+ model%geometry%thck, &
+ model%numerics%thklim )
+
+ call df_field_2d_staggered(model%geometry%thck, &
+ model%numerics%dew, model%numerics%dns, &
+ model%geomderv%dthckdew, &
+ model%geomderv%dthckdns, &
+ model%geometry%thck, &
+ model%numerics%thklim )
+
+ !Make sure that the derivatives are 0 where staggered thickness is 0
+
+ where (model%geomderv%stagthck == 0.d0)
+ model%geomderv%dusrfdew = 0.d0
+ model%geomderv%dusrfdns = 0.d0
+ model%geomderv%dthckdew = 0.d0
+ model%geomderv%dthckdns = 0.d0
+ endwhere
+
+ model%geomderv%dlsrfdew = model%geomderv%dusrfdew - model%geomderv%dthckdew
+ model%geomderv%dlsrfdns = model%geomderv%dusrfdns - model%geomderv%dthckdns
+
+ !Compute second derivatives.
+
+ call d2f_field_stag(model%geometry%usrf, model%numerics%dew, model%numerics%dns, &
+ model%geomderv%d2usrfdew2, model%geomderv%d2usrfdns2, &
+ .false., .false.)
+
+ call d2f_field_stag(model%geometry%thck, model%numerics%dew, model%numerics%dns, &
+ model%geomderv%d2thckdew2, model%geomderv%d2thckdns2, &
+ .false., .false.)
+
+ end subroutine glam_geometry_derivs
+
+!----------------------------------------------------------------------------
+
+ subroutine stagthickness(ipvr,opvr,ewn,nsn,usrf,thklim,mask)
+
+ !! A special staggering algorithm that is meant to conserve mass when operating on thickness fields.
+ !! This incorporates Ann LeBroque's nunatak fix and the calving front fix.
+
+!NOTE: This subroutine, used by the glam HO dycore, is different from stagvarb,
+! which is used by the glide SIA dycore. Here, zero-thickness values are
+! ignored when thickness is averaged over four adjacent grid cells.
+! In stagvarb, zero-thickness values are included in the average.
+! The glam approach works better for calving.
+
+ implicit none
+
+ real(dp), intent(out), dimension(:,:) :: opvr
+ real(dp), intent(in), dimension(:,:) :: ipvr
+
+ real(dp), intent(in), dimension(:,:) :: usrf
+ real(dp), intent(in) :: thklim
+ integer, intent(in), dimension(:,:) :: mask
+
+ integer :: ewn,nsn,ew,ns,n
+ real(dp) :: tot
+
+ do ns = 1,nsn-1
+ do ew = 1,ewn-1
+
+ !If any of our staggering points are shelf front, ignore zeros when staggering
+ !if (any(GLIDE_IS_CALVING(mask(ew:ew+1, ns:ns+1)))) then ! in contact with the ocean
+ !Use the "only nonzero thickness" staggering criterion for ALL marginal ice. For
+ ! reasons that are not entirely clear, this corrects an error whereby the land ice
+ ! margin is defined incorrectly as existing one grid cell too far inland from where
+ ! it should be.
+
+ if (any(GLIDE_HAS_ICE(mask(ew:ew+1,ns:ns+1)))) then
+ n = 0
+ tot = 0
+ if (abs(ipvr(ew,ns)) > 0.d0 )then
+ tot = tot + ipvr(ew,ns)
+ n = n + 1
+ end if
+ if (abs(ipvr(ew+1,ns)) > 0.d0 )then
+ tot = tot + ipvr(ew+1,ns)
+ n = n + 1
+ end if
+ if (abs(ipvr(ew,ns+1)) > 0.d0 )then
+ tot = tot + ipvr(ew,ns+1)
+ n = n + 1
+ end if
+ if (abs(ipvr(ew+1,ns+1)) > 0.d0 )then
+ tot = tot + ipvr(ew+1,ns+1)
+ n = n + 1
+ end if
+ if (n > 0) then
+ opvr(ew,ns) = tot/n
+ else
+ opvr(ew,ns) = 0.d0
+ end if
+
+ !The following cases relate to Anne LeBroque's fix for nunataks
+ !ew,ns cell is ice free:
+ else if (ipvr(ew,ns) <= thklim .and. &
+ ((usrf(ew,ns) >= usrf(ew+1,ns) .and. ipvr(ew+1,ns) >= thklim) &
+ .or. (usrf(ew,ns) >= usrf(ew,ns+1) .and. ipvr(ew,ns+1) >= thklim))) then
+ opvr(ew,ns) = 0.d0
+
+ !ew+1,ns cell is ice free:
+ else if (ipvr(ew+1,ns) <= thklim .and. &
+ ((usrf(ew+1,ns) >= usrf(ew,ns) .and. ipvr(ew,ns) >= thklim) &
+ .or. (usrf(ew+1,ns) >= usrf(ew+1,ns+1) .and. ipvr(ew+1,ns+1) >= thklim))) then
+ opvr(ew,ns) = 0.d0
+
+ !ew,ns+1 cell is ice free:
+ else if (ipvr(ew,ns+1) <= thklim .and. &
+ ((usrf(ew,ns+1) >= usrf(ew,ns) .and. ipvr(ew,ns) >= thklim) &
+ .or. (usrf(ew,ns+1) >= usrf(ew+1,ns+1) .and. ipvr(ew+1,ns+1) >= thklim))) then
+ opvr(ew,ns) = 0.d0
+
+ !ew+1,ns+1 cell is ice free:
+ else if (ipvr(ew+1,ns+1) <= thklim .and. &
+ ((usrf(ew+1,ns+1) >= usrf(ew+1,ns) .and. ipvr(ew+1,ns) >=thklim) &
+ .or. (usrf(ew+1,ns+1) >= usrf(ew,ns+1) .and. ipvr(ew,ns+1) >=thklim))) then
+ opvr(ew,ns) = 0.d0
+
+! !Standard Staggering !! Not needed if only-nonzero-thickness staggering scheme is used
+! else
+! opvr(ew,ns) = (ipvr(ew+1,ns) + ipvr(ew,ns+1) + &
+! ipvr(ew+1,ns+1) + ipvr(ew,ns)) / 4.d0
+
+ end if
+
+ end do
+ end do
+
+ end subroutine stagthickness
+
+!----------------------------------------------------------------------------
+
+ !------------------------------------------------------------------
+ !First Derivative Estimates, Second Order, 2D
+ !------------------------------------------------------------------
+
+ !> Computes derivative fields of the given function.
+ subroutine df_field_2d(f, &
+ deltax, deltay, &
+ out_dfdx, out_dfdy, &
+ direction_x, direction_y)
+
+ use parallel
+ implicit none
+ real(dp), dimension(:, :), intent(in) :: f
+ real(dp), intent(in) :: deltax, deltay
+ real(dp), dimension(:, :), intent(out) :: out_dfdx, out_dfdy
+ real(dp), dimension(:, :), intent(in), optional :: direction_x, direction_y
+
+ logical :: upwind !Whether or not directions for upwinding were provided
+
+ integer :: grad_x, grad_y !Whether to upwind or downwind at the current point
+
+ integer :: nx, ny, x, y
+
+ !Get the size of the field we're working with
+ nx = size(f, 1)
+ ny = size(f, 2)
+
+ upwind = present(direction_x) .and. present(direction_y)
+
+ !For now, we'll use the function calls defined above.
+ !Later on we might want to refactor?
+
+!LOOP: all scalar points (uses upwinding and downwinding to avoid stepping out of bounds)
+ do x = 1, nx
+ do y = 1, ny
+ grad_x = 0
+ grad_y = 0
+ if (upwind) then
+ if (direction_x(x,y) < 0.d0 .and. x > 2) then ! Upstream case
+ grad_x = -1
+ else if(direction_x(x,y) > 0.d0 .and. x < nx - 1) then ! Downstream case
+ grad_x = 1
+ end if
+
+ if (direction_y(x,y) < 0.d0 .and. y > 2) then !Upstream case
+ grad_y = -1
+ else if(direction_y(x,y) > 0.d0 .and. y < ny - 1) then !Downstream case
+ grad_y = 1
+ end if
+ end if
+
+ !For each of the variables in x, y, check whether or not
+ !we need to use an upwinding or downwinding differentiation
+ !scheme.
+ if (x == 1 .or. grad_x > 0) then
+ out_dfdx(x, y) = dfdx_2d_downwind(f, x, y, deltax)
+ else if (x == nx .or. grad_x < 0) then
+ out_dfdx(x, y) = dfdx_2d_upwind(f, x, y, deltax)
+ else
+ out_dfdx(x, y) = dfdx_2d(f, x, y, deltax)
+ end if
+
+ if (y == 1 .or. grad_y > 0) then
+ out_dfdy(x, y) = dfdy_2d_downwind(f, x, y, deltay)
+ elseif (y == ny .or. grad_y < 0) then
+ out_dfdy(x, y) = dfdy_2d_upwind(f, x, y, deltay)
+ else
+ out_dfdy(x, y) = dfdy_2d(f, x, y, deltay)
+ end if
+
+ end do
+ end do
+
+!NOTE: If halo updates are needed, they should be done at a higher level.
+
+!! call parallel_halo(out_dfdx)
+!! call parallel_halo(out_dfdy)
+
+ end subroutine df_field_2d
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative fields of the given function. Places the result
+ !> on a staggered grid. If periodic in one dimension is set, that
+ !> dimension for derivatives must be the same size as the value's dimension.
+ !> Otherwise, it should be one less
+
+ subroutine df_field_2d_staggered(f, &
+ deltax, deltay, &
+ out_dfdx, out_dfdy, &
+ thck, thklim )
+
+ implicit none
+ real(dp), dimension(:, :), intent(in) :: f, thck ! unstaggered grid
+ real(dp), intent(in) :: deltax, deltay, thklim
+ real(dp), dimension(:, :), intent(out) :: out_dfdx, out_dfdy ! staggered grid
+
+ integer :: nx, ny, x, y
+
+ !Get the size of the field we're working with
+ nx = size(f, 1)
+ ny = size(f, 2)
+
+ ! intialize to zeros
+ out_dfdx = 0.d0
+ out_dfdy = 0.d0
+
+ ! *SFP* old subroutine calls, commented out below but still available,
+ ! use centered diffs on normal thck / surf grids but do nothing special at lateral
+ ! boundaries where centered diffs might give unreasonable values (e.g., due to jumping
+ ! from a region of non-zero to zero thickness / elevation). New calls access new
+ ! subroutines which attempt to correct for this if/when possible using approx., first-order
+ ! accurate one-sided diffs.
+
+ !Note - Can remove thck and thklim from argument list if not using new calls
+
+ do x = 1, nx - 1 ! We go to nx - 1 because we're using a staggered grid
+ do y = 1, ny - 1
+ out_dfdx(x,y) = dfdx_2d_stag(f, x, y, deltax) !*SFP* old call
+ out_dfdy(x,y) = dfdy_2d_stag(f, x, y, deltay) !*SFP* old call
+! out_dfdx(x,y) = dfdx_2d_stag_os(f, x, y, deltax, thck, thklim )
+! out_dfdy(x,y) = dfdy_2d_stag_os(f, x, y, deltay, thck, thklim )
+ end do
+ end do
+
+ !NOTE - Remove this chunk of commented-out code
+! !Deal with periodic boundary conditions. We will do so by
+! !providing another set of values at the end of each dimension
+! !that contains the derivative of the value off the edge of the
+! !grid. Because this set of values is at the end, when
+! !x = nx, x+1 = 1. This identity has been hard-coded below.
+! if (periodic_x) then
+! do y = 1, ny - 1
+! out_dfdx(nx,y) = -(f(1, y) + f(1, y+1) - f(nx, y) - f(nx, y+1))/(2*deltax)
+! out_dfdy(nx,y) = dfdy_2d_stag(f, nx, y, deltay)
+! end do
+! end if
+!
+! if (periodic_y) then
+! do x = 1, nx - 1
+! out_dfdx(x,ny) = dfdx_2d_stag(f, x, ny, deltax)
+! out_dfdy(x,ny) = -(f(x, 1) + f(x+1, 1) - f(x,ny) - f(x+1, ny))/(2*deltay)
+! end do
+! end if
+!
+! !Do the corner that hasn't been done if both dimensions are periodic
+! if (periodic_x .and. periodic_y) then
+! out_dfdx(nx,ny) = (f(1, ny) + f(1, 1) - f(nx, ny) - f(nx, 1))/(2*deltax)
+! out_dfdy(nx,ny) = (f(nx, 1) + f(1, 1) - f(nx,ny) - f(1, ny))/(2*deltay)
+! end if
+!
+ end subroutine df_field_2d_staggered
+
+!----------------------------------------------------------------------------
+
+ !NOTE - Remove subroutine df_field_3d? It is never called.
+
+ subroutine df_field_3d(f, &
+ deltax, deltay, deltaz, &
+ out_dfdx, out_dfdy, out_dfdz, &
+ direction_x, direction_y)
+
+ !> Computes derivative fields of the given function.
+ !> The z axis is computed on an irregular grid.
+
+ implicit none
+ real(dp), dimension(:, :, :), intent(in) :: f
+ real(dp), intent(in) :: deltax, deltay
+ real(dp), dimension(:), intent(in) :: deltaz
+ real(dp), dimension(:, :, :), intent(out) :: out_dfdx, out_dfdy, out_dfdz
+
+ !Field containing the direction that derivatives should be upwinded in.
+ !If 0, centered differences are used. If negative, then upwinded
+ !derivatives (approaching from the negative side) are used. If
+ !positive, then downwinded derivatives (approaching from the positive
+ !side) are used.
+ real(dp), dimension(:,:), optional :: direction_x, direction_y
+
+
+ integer :: grad_x, grad_y !Sign of the gradient, used for determining upwinding
+ integer :: nx, ny, nz, x, y, z
+ logical :: upwind
+
+ !Get the size of the field we're working with
+ nx = size(f, 2)
+ ny = size(f, 3)
+ nz = size(f, 1)
+
+ upwind = present(direction_x) .and. present(direction_y)
+
+ !For now, we'll use the function calls defined above.
+ !Later on we might want to refactor?
+
+!LOOP: all scalar points
+! uses upwinding and downwinding to avoid going out of bounds
+ do x = 1, nx
+ do y = 1, ny
+ grad_x = 0
+ grad_y = 0
+ if (upwind) then
+ if (direction_x(x,y) < 0.d0 .and. x > 2) then !Upstream case
+ grad_x = -1
+ else if(direction_x(x,y) > 0.d0 .and. x < nx - 1) then !Downstream case
+ grad_x = 1
+ end if
+
+ if (direction_y(x,y) < 0.d0 .and. y > 2) then !Upstream case
+ grad_y = -1
+ else if(direction_y(x,y) > 0.d0 .and. y < ny - 1) then !Downstream case
+ grad_y = 1
+ end if
+ end if
+
+ do z = 1, nz
+ !For each of the variables in x, y, check whether or not
+ !we need to use an upwinding or downwinding differentiation
+ !scheme.
+ if (x == 1 .or. grad_x > 0) then
+ out_dfdx(z, x, y) = dfdx_3d_downwind(f, x, y, z, deltax)
+ !out_dfdx(x, y, z) = (f(x+1,y,z) - f(x,y,z))/deltax
+ else if (x == nx .or. grad_x < 0) then
+ out_dfdx(z, x, y) = dfdx_3d_upwind(f, x, y, z, deltax)
+ !out_dfdx(x, y, z) = (f(x,y,z) - f(x-1,y,z))/deltax
+ else
+ out_dfdx(z, x, y) = dfdx_3d(f, x, y, z, deltax)
+ end if
+ if (y == 1 .or. grad_y > 0) then
+ out_dfdy(z, x, y) = dfdy_3d_downwind(f, x, y, z, deltay)
+ !out_dfdy(x, y, z) = (f(x,y+1,z) - f(x,y,z))/deltay
+ else if (y == ny .or. grad_y < 0) then
+ out_dfdy(z, x, y) = dfdy_3d_upwind(f, x, y, z, deltay)
+ !out_dfdy(x, y, z) = (f(x,y,z) - f(x,y-1,z))/deltay
+ else
+ out_dfdy(z, x, y) = dfdy_3d(f, x, y, z, deltay)
+ end if
+ if (z == 1) then
+ out_dfdz(z, x, y) = dfdz_3d_downwind_irregular(f, x, y, z, deltaz)
+ else if (z == nz) then
+ out_dfdz(z, x, y) = dfdz_3d_upwind_irregular(f, x, y, z, deltaz)
+ else
+ out_dfdz(z, x, y) = dfdz_3d_irregular(f, x, y, z, deltaz)
+ end if
+ end do
+ end do
+ end do
+
+ end subroutine df_field_3d
+
+!----------------------------------------------------------------------------
+
+ !NOTE - Remove subroutine df_field_3d_stag? It is never called.
+
+ subroutine df_field_3d_stag(f, &
+ deltax, deltay, deltaz, &
+ out_dfdx, out_dfdy, out_dfdz)
+
+ !> Computes the derivative fields of the given function. The X and Y
+ !> derivatives are computed on a staggered grid. The Z derivative
+ !> is computed on a nonstaggered but irregular grid. This means that,
+ !> if an array of dimensions (n1, n2, n3), the output arrays should
+ !> be of size (n1 - 1, n2 - 1, n3)
+
+ implicit none
+ real(dp), dimension(:, :, :), intent(in) :: f
+ real(dp), intent(in) :: deltax, deltay
+ real(dp), dimension(:), intent(in) :: deltaz
+ real(dp), dimension(:, :, :), intent(out) :: out_dfdx, out_dfdy, out_dfdz
+
+ real(dp), dimension(4) :: zDerivs !Temporarily holds derivatives in Z to average
+ integer :: nx, ny, nz, x, y, z
+
+ !Get the size of the field we're working with
+ nx = size(f, 1)
+ ny = size(f, 2)
+ nz = size(f, 3)
+
+!LOOP: all scalar points
+! uses upwinding and downwinding to avoid going out of bounds
+
+ do x = 1, nx - 1
+ do y = 1, ny - 1
+ do z = 1, nz
+ !We will never have to compute upstream and downstream
+ !derivatives in the horizontal (avoided by the staggered scheme),
+ !but we will in the vertical.
+ out_dfdx(x,y,z) = dfdx_3d_stag(f, x, y, z, deltax)
+ out_dfdy(x,y,z) = dfdy_3d_stag(f, x, y, z, deltay)
+
+ !Even though we are not staggering in the vertical, the points
+ !we compute the derivatives at are still staggered in the
+ !horizontal. We'll solve this by computing four
+ !derivatives horizontally around the point requested
+ !and averaging the results
+ if (z == 1) then
+ zDerivs(1) = dfdz_3d_downwind_irregular(f, x, y, z, deltaz)
+ zDerivs(2) = dfdz_3d_downwind_irregular(f, x+1, y, z, deltaz)
+ zDerivs(3) = dfdz_3d_downwind_irregular(f, x, y+1, z, deltaz)
+ zDerivs(4) = dfdz_3d_downwind_irregular(f, x+1, y+1, z, deltaz)
+ else if (z == nz) then
+ zDerivs(1) = dfdz_3d_upwind_irregular(f, x, y, z, deltaz)
+ zDerivs(2) = dfdz_3d_upwind_irregular(f, x+1, y, z, deltaz)
+ zDerivs(3) = dfdz_3d_upwind_irregular(f, x, y+1, z, deltaz)
+ zDerivs(4) = dfdz_3d_upwind_irregular(f, x+1, y+1, z, deltaz)
+ else
+ zDerivs(1) = dfdz_3d_irregular(f, x, y, z, deltaz)
+ zDerivs(2) = dfdz_3d_irregular(f, x+1, y, z, deltaz)
+ zDerivs(3) = dfdz_3d_irregular(f, x, y+1, z, deltaz)
+ zDerivs(4) = dfdz_3d_irregular(f, x+1, y+1, z, deltaz)
+ end if
+ out_dfdz(x, y, z) = (zDerivs(1) + zDerivs(2) + zDerivs(3) + zDerivs(4)) / 4
+ end do
+ end do
+ end do
+
+ end subroutine df_field_3d_stag
+
+!----------------------------------------------------------------------------
+
+ !NOTE - Check the rest of this module for unused functions we might want to remove
+
+ !> Computes derivative with respect to x at a given point.
+ !> Applies periodic boundary conditions if needed.
+
+ function dfdx_2d(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdx_2d
+
+ dfdx_2d = (-.5d0/delta)*f(i-1, j) + (.5d0/delta)*f(i+1, j)
+ !write(*,*), i, j, f(i,j), ip1, im1, delta, dfdx_2d
+ end function dfdx_2d
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to y at a given point
+
+ function dfdy_2d(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdy_2d
+
+ integer :: jp1, jm1
+ jp1 = j + 1
+ jm1 = j - 1
+ if (jp1 == size(f, 2)+1) jp1 = 2
+ if (jm1 == 0) jm1 = size(f, 2)-1
+
+ dfdy_2d = (-.5d0/delta)*f(i, j-1) + (.5d0/delta)*f(i, j+1)
+ end function dfdy_2d
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to x at the equivalent
+ !> point on a staggered grid.
+
+ function dfdx_2d_stag(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdx_2d_stag
+ dfdx_2d_stag = (f(i+1, j) + f(i+1, j+1) - f(i, j) - f(i, j+1))/(2.d0*delta)
+ end function dfdx_2d_stag
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to y at the equivalent
+ !> point on a staggered grid.
+
+ function dfdy_2d_stag(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdy_2d_stag
+ dfdy_2d_stag = (f(i, j+1) + f(i+1, j+1) - f(i,j) - f(i+1, j))/(2.d0*delta)
+ end function dfdy_2d_stag
+
+!----------------------------------------------------------------------------
+
+ function dfdx_2d_stag_os(f_in, i, j, delta, thck, thklim )
+
+ !*SFP* altered/expanded version of above function that uses approx. one-sided
+ ! diffs at physical domain edges so as not to overesimate grads there.
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f_in, thck
+ integer, intent(in) :: i, j
+ real(dp), intent(in) :: delta, thklim
+ real(dp) :: dfdx_2d_stag_os
+
+ real(dp), dimension(2,2) :: f_array, thck_array
+ real(dp), dimension(1:size(f_in,1),1:size(f_in,2)) :: f
+ real(dp) :: f_min
+
+ ! initialize vars/arrays to zeros
+ dfdx_2d_stag_os = 0.d0; f_array = 0.d0; f_min = 0.d0
+
+ f = f_in
+
+ where( thck <= thklim )
+ f = 0.d0
+ end where
+
+ f_array(1,1) = f(i,j); f_array(2,1) = f(i+1,j); f_array(1,2) = f(i,j+1); f_array(2,2) = f(i+1,j+1);
+
+ if( sum( f_array/ f_array, MASK = f_array /= 0.0d0 ) == 4.d0 )then
+
+ ! normal differencing for interior points
+ dfdx_2d_stag_os = (f(i+1,j) + f(i+1,j+1) - f(i,j) - f(i,j+1))/(2*delta)
+
+ elseif( sum( f_array/ f_array, MASK = f_array /= 0.d0 ) == 3.d0 )then
+
+ ! corner; use 2x next closest value
+ if( f(i,j) == f_min )then ! southwest corner point missing: apply value from s.e. point
+ dfdx_2d_stag_os = ( f(i+1,j+1) + f(i+1,j) - 2.d0*f(i,j+1) )/(2.d0*delta)
+ elseif( f(i+1,j) == f_min )then ! southeast corner point missing: apply value from s.w. point
+ dfdx_2d_stag_os = ( 2.d0*f(i+1,j+1) - f(i,j) - f(i,j+1) )/(2.d0*delta)
+ elseif( f(i,j+1) == f_min )then ! northwest corner point missing: apply value from n.e. point
+ dfdx_2d_stag_os = ( f(i+1,j+1) + f(i+1,j) - 2.d0*f(i,j))/(2.d0*delta)
+ elseif( f(i+1,j+1) == f_min )then ! northeast corner point missing: apply value from n.w. point
+ dfdx_2d_stag_os = ( 2.d0*f(i+1,j) - f(i,j) - f(i,j+1) )/(2.d0*delta)
+ endif
+
+ elseif( sum( f_array/ f_array, MASK = f_array /= 0.d0 ) == 2.0 )then
+
+ ! side; back up and take gradient from points one set of cells in OR use only the single set of
+ ! cells available along the differencing direction
+ if( f(i,j) == f_min .and. f(i,j+1) == f_min )then ! west cells empty
+ dfdx_2d_stag_os = (f(i+2,j) + f(i+2,j+1) - f(i+1,j+1) - f(i+1,j))/(2.d0*delta)
+ elseif( f(i+1,j) == f_min .and. f(i+1,j+1) == f_min )then ! east cells empty
+ dfdx_2d_stag_os = (f(i,j) + f(i,j+1) - f(i-1,j) - f(i-1,j+1))/(2.d0*delta)
+ elseif( f(i,j+1) == f_min .and. f(i+1,j+1) == f_min )then ! north cells empty
+ dfdx_2d_stag_os = (f(i+1,j) - f(i,j) )/(delta)
+ elseif( f(i,j) == f_min .and. f(i+1,j) == f_min )then ! south cells empty
+ dfdx_2d_stag_os = (f(i+1,j+1) - f(i,j+1) )/(delta)
+ endif
+
+ elseif( sum( f_array/ f_array, MASK = f_array /= 0.d0 ) == 1.d0 )then
+
+ ! isolated; treat by assuming it is part of a 3 block for which the rest of the values are not contained in
+ ! the local 2x2 block with indices i:i+1, j:j+1
+ if( f(i,j) /= f_min .and. f(i+1,j) == f_min .and. f(i+1,j+1) == f_min .and. f(i,j+1) == f_min)then
+ ! a northeast corner
+ dfdx_2d_stag_os = ( f(i,j) - f(i-1,j) ) / (delta)
+ elseif( f(i,j) == f_min .and. f(i+1,j) /= f_min .and. f(i+1,j+1) == f_min .and. f(i,j+1) == f_min)then
+ ! a northwest corner
+ dfdx_2d_stag_os = ( f(i+2,j) - f(i+1,j) ) / (delta)
+ elseif( f(i,j) == f_min .and. f(i+1,j) == f_min .and. f(i+1,j+1) /= f_min .and. f(i,j+1) == f_min)then
+ ! a southwest corner
+ dfdx_2d_stag_os = ( f(i+2,j+1) - f(i+1,j+1) ) / (delta)
+ elseif( f(i,j) == f_min .and. f(i+1,j) == f_min .and. f(i+1,j+1) == f_min .and. f(i,j+1) /= f_min)then
+ ! a southeast corner
+ dfdx_2d_stag_os = ( f(i,j+1) - f(i-1,j+1) ) / (delta)
+ endif
+
+ endif
+
+ end function dfdx_2d_stag_os
+
+!----------------------------------------------------------------------------
+
+ function dfdy_2d_stag_os(f_in, i, j, delta, thck, thklim )
+
+ !*SFP* altered/expanded version of above function that uses approx. one-sided
+ ! diffs at physical domain edges so as not to overesimate grads there.
+
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f_in, thck
+ integer, intent(in) :: i, j
+ real(dp), intent(in) :: delta, thklim
+ real(dp) :: dfdy_2d_stag_os
+
+ real(dp), dimension(2,2) :: f_array, thck_array
+ real(dp), dimension(1:size(f_in,1),1:size(f_in,2)) :: f
+ real(dp) :: f_min
+
+ ! initialize to zeros
+ dfdy_2d_stag_os = 0.d0; f_array = 0.d0; f_min = 0.d0
+
+ f = f_in
+
+ where( thck <= thklim )
+ f = 0.d0
+ end where
+
+ f_array(1,1) = f(i,j); f_array(2,1) = f(i+1,j); f_array(1,2) = f(i,j+1); f_array(2,2) = f(i+1,j+1);
+
+ if( sum( f_array/ f_array, MASK = f_array /= 0.d0 ) == 4.d0 )then
+
+ ! normal differencing for interior points
+ dfdy_2d_stag_os = (f(i,j+1) + f(i+1,j+1) - f(i,j) - f(i+1,j))/(2.d0*delta)
+
+ elseif( sum( f_array/ f_array, MASK = f_array /= 0.d0 ) == 3.d0 ) then
+
+ ! corner; use 2x next closest value
+ if( f(i,j) == f_min )then ! southwest corner point missing: apply value from s.e. point
+ dfdy_2d_stag_os = (f(i,j+1) + f(i+1,j+1) - 2.d0*f(i+1,j))/(2.d0*delta)
+ elseif( f(i+1,j) == f_min )then ! southeast corner point missing: apply value from s.w. point
+ dfdy_2d_stag_os = (f(i,j+1) + f(i+1,j+1) - 2.d0*f(i,j))/(2.d0*delta)
+ elseif( f(i,j+1) == f_min )then ! northwest corner point missing: apply value from n.e. point
+ dfdy_2d_stag_os = ( 2.d0*f(i+1,j+1) - f(i,j) - f(i+1,j))/(2.d0*delta)
+ elseif( f(i+1,j+1) == f_min )then ! northeast corner point missing: apply value from n.w. point
+ dfdy_2d_stag_os = ( 2.d0*f(i,j+1) - f(i,j) - f(i+1,j))/(2.d0*delta)
+ endif
+
+ elseif( sum( f_array/ f_array, MASK = f_array /= 0.d0 ) == 2.d0 )then
+
+ ! side; back up and take gradient from points one set of cells in OR use only the single set of
+ ! cells available along the differencing direction
+ if( f(i,j) == f_min .and. f(i,j+1) == f_min )then ! west cells empty
+ dfdy_2d_stag_os = (f(i+1,j+1) - f(i+1, j))/(delta)
+ elseif( f(i+1,j) == f_min .and. f(i+1,j+1) == f_min )then ! east cells empty
+ dfdy_2d_stag_os = (f(i,j+1) - f(i,j) )/(delta)
+ elseif( f(i,j+1) == f_min .and. f(i+1,j+1) == f_min )then ! north cells empty
+ dfdy_2d_stag_os = (f(i,j) + f(i+1,j) - f(i,j-1) - f(i+1,j-1))/(2.d0*delta)
+ elseif( f(i,j) == f_min .and. f(i+1,j) == f_min )then ! south cells empty
+ dfdy_2d_stag_os = (f(i,j+2) + f(i+1,j+2) - f(i,j+1) - f(i+1,j+1))/(2.d0*delta)
+ endif
+
+ elseif( sum( f_array/ f_array, MASK = f_array /= 0.d0 ) == 1.d0 ) then
+
+ ! isolated; treat by assuming it is part of a 3 block for which the rest of the values are not contained within
+ ! the local 2x2 block with indices i:i+1, j:j+1
+ if( f(i,j) /= f_min .and. f(i+1,j) == f_min .and. f(i+1,j+1) == f_min .and. f(i,j+1) == f_min )then
+ ! a northeast corner
+ dfdy_2d_stag_os = ( f(i,j) - f(i,j-1) ) / (delta)
+ elseif( f(i,j) == f_min .and. f(i+1,j) /= f_min .and. f(i+1,j+1) == f_min .and. f(i,j+1) == f_min )then
+ ! a northwest corner
+ dfdy_2d_stag_os = ( f(i+1,j) - f(i+1,j-1) ) / (delta)
+ elseif( f(i,j) == f_min .and. f(i+1,j) == f_min .and. f(i+1,j+1) /= f_min .and. f(i,j+1) == f_min )then
+ ! a southwest corner
+ dfdy_2d_stag_os = ( f(i+1,j+2) - f(i+1,j+1) ) / (delta)
+ elseif( f(i,j) == f_min .and. f(i+1,j) == f_min .and. f(i+1,j+1) == f_min .and. f(i,j+1) /= f_min )then
+ ! a southeast corner
+ dfdy_2d_stag_os = ( f(i,j+2) - f(i,j+1) ) / (delta)
+ endif
+
+ endif
+
+ end function dfdy_2d_stag_os
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to x at the given point
+ !> using an upwind method (suitable for maximum boundaries)
+
+ function dfdx_2d_upwind(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdx_2d_upwind
+ dfdx_2d_upwind = (.5d0 * f(i-2,j) - 2.d0 * f(i-1, j) + 1.5d0 * f(i, j))/delta
+ end function dfdx_2d_upwind
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to y at the given point
+ !> using an upwind method (suitable for maximum boundaries)
+
+ function dfdy_2d_upwind(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdy_2d_upwind
+ dfdy_2d_upwind = (.5d0 * f(i,j-2) - 2.d0 * f(i, j-1) + 1.5d0 * f(i, j))/delta
+ end function dfdy_2d_upwind
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to x at the given point
+ !> using a downwind method (suitable for minimum boundaries)
+
+ function dfdx_2d_downwind(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdx_2d_downwind
+ dfdx_2d_downwind = (-1.5d0 * f(i, j) + 2.d0 * f(i+1, j) - .5d0 * f(i+2, j))/delta
+ end function dfdx_2d_downwind
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to y at the given point
+ !> using a downwind method (suitable for minimum boundaries)
+
+ function dfdy_2d_downwind(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdy_2d_downwind
+ dfdy_2d_downwind = (-1.5d0 * f(i, j) + 2.d0 * f(i, j+1) - .5d0 * f(i, j+2))/delta
+ end function dfdy_2d_downwind
+
+!----------------------------------------------------------------------------
+
+ !------------------------------------------------------------------
+ !First Derivative Estimates, Second Order, 3D
+ !------------------------------------------------------------------
+
+ !> Computes derivative with respect to x at a given point
+
+ function dfdx_3d(f, i, j, k, delta)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdx_3d
+ dfdx_3d = (-.5d0/delta)*f(k, i-1, j) + (.5d0/delta)*f(k, i+1, j)
+ end function dfdx_3d
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to y at a given point
+
+ function dfdy_3d(f, i, j, k, delta)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdy_3d
+ dfdy_3d = (-.5d0/delta)*f(k, i, j-1) + (.5d0/delta)*f(k, i, j+1)
+ end function dfdy_3d
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to z at a given point
+ !> where the Z axis uses an irregular grid defined by \ittext{deltas}.
+ !> This derivative is given by the formula:
+
+ function dfdz_3d_irregular(f, i, j, k, dz)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), dimension(:), intent(in) :: dz
+ real(dp) :: dfdz_3d_irregular
+
+ dfdz_3d_irregular = f(k-1,i,j)*(dz(k) - dz(k+1))/((dz(k) - dz(k-1))*(dz(k+1)-dz(k-1))) + &
+ f(k, i,j)*(dz(k+1)-2.d0*dz(k)+dz(k-1))/((dz(k)-dz(k-1))*(dz(k+1)-dz(k))) + &
+ f(k+1,i,j)*(dz(k)-dz(k-1))/((dz(k+1)-dz(k))*(dz(K+1)-dz(k-1)))
+ end function
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to z at a given point using an upwinding
+ !> scheme. The Z axis uses an irregular grid defined by \iittext{deltas}.
+
+ function dfdz_3d_upwind_irregular(f, i, j, k, deltas)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), dimension(:), intent(in) :: deltas
+ real(dp) :: dfdz_3d_upwind_irregular
+ real(dp) :: zkMinusZkm1, zkMinusZkm2, zkm1MinusZkm2
+ zkMinusZkm1 = deltas(k) - deltas(k-1)
+ zkMinusZkm2 = deltas(k) - deltas(k-2)
+ zkm1MinusZkm2 = deltas(k-1) - deltas(k-2)
+
+ dfdz_3d_upwind_irregular = f(k-2, i, j) * zkMinusZkm1 / (zkm1MinusZkm2 * zkMinusZkm2) - &
+ f(k-1, i, j) * zkMinusZkm2 / (zkMinusZkm1 * zkm1MinusZkm2) + &
+ f(k, i, j) * (2.d0*deltas(k) - deltas(k-1) - deltas(k-2)) / (zkMinusZkm1 * zkMinusZkm2)
+ end function
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to z at a given point using a downwinding
+ !> scheme. The Z axis uses an irregular grid defined by \iittext{deltas}.
+
+ function dfdz_3d_downwind_irregular(f, i, j, k, deltas)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), dimension(:), intent(in) :: deltas
+ real(dp) :: dfdz_3d_downwind_irregular
+ real(dp) :: zkp1MinusZk, zkp2MinusZk, zkp2MinusZkp1
+ zkp1MinusZk = deltas(k+1) - deltas(k)
+ zkp2MinusZk = deltas(k+2) - deltas(k)
+ zkp2MinusZkp1 = deltas(k+2) - deltas(k+1)
+
+ dfdz_3d_downwind_irregular =f(k, i, j) * (-zkp1MinusZk - zkp2MinusZk)/(zkp1MinusZk * zkp2MinusZk) + &
+ f(k+1, i, j) * zkp2MinusZk / (zkp2MinusZkp1 * zkp1MinusZk) - &
+ f(k+2, i, j) * zkp1MinusZk / (zkp2MinusZkp1 * zkp2MinusZk)
+ end function
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to x at the equivalent
+ !> point on a staggered grid.
+
+ function dfdx_3d_stag(f, i, j, k, delta)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdx_3d_stag
+ dfdx_3d_stag = (f(k, i+1, j) + f(k, i+1, j+1) - f(k, i, j) - f(k, i, j+1))/(2.d0*delta)
+ end function dfdx_3d_stag
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to y at the equivalent
+ !> point on a staggered grid.
+
+ function dfdy_3d_stag(f, i, j, k, delta)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdy_3d_stag
+ dfdy_3d_stag = (f(k, i, j+1) + f(k, i+1, j+1) - f(k, i, j) - f(k, i+1, j))/(2.d0*delta)
+ end function dfdy_3d_stag
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to x at the given point
+ !> using an upwind method (suitable for maximum boundaries)
+
+ function dfdx_3d_upwind(f, i, j, k, delta)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdx_3d_upwind
+ dfdx_3d_upwind = (.5d0 * f(k, i-2, j) - 2.d0 * f(k, i-1, j) + 1.5d0 * f(k, i, j))/delta
+ end function dfdx_3d_upwind
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to y at the given point
+ !> using an upwind method (suitable for maximum boundaries)
+
+ function dfdy_3d_upwind(f, i, j, k, delta)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdy_3d_upwind
+ dfdy_3d_upwind = (.5d0 * f(k, i, j-2) - 2.d0 * f(k, i, j-1) + 1.5d0 * f(k, i, j))/delta
+ end function dfdy_3d_upwind
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to x at the given point
+ !> using a downwind method (suitable for minimum boundaries)
+
+ function dfdx_3d_downwind(f, i, j, k, delta)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j, k
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdx_3d_downwind
+ dfdx_3d_downwind = (-1.5d0 * f(k, i, j) + 2.d0 * f(k, i+1, j) - .5d0 * f(k, i+2, j))/delta
+ end function dfdx_3d_downwind
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to y at the given point
+ !> using a downwind method (suitable for minimum boundaries)
+
+ function dfdy_3d_downwind(f, i, j, k, delta)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), intent(in) :: delta
+ real(dp) :: dfdy_3d_downwind
+ dfdy_3d_downwind = (-1.5d0 * f(k, i, j) + 2.d0 * f(k, i, j+1) - .5d0 * f(k, i, j+2))/delta
+ end function dfdy_3d_downwind
+
+!----------------------------------------------------------------------------
+
+ !------------------------------------------------------------------
+ !Second Derivative Estimates, Second Order
+ !------------------------------------------------------------------
+
+ !> Computes 2nd derivative with respect to x at the given point
+
+ function d2fdx2_2d(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdx2_2d
+ d2fdx2_2d = (f(i+1,j) + f(i-1,j) - 2.d0 * f(i, j))/(delta*delta)
+ end function d2fdx2_2d
+
+!----------------------------------------------------------------------------
+
+ function d2fdx2_2d_downwind(f,i,j,delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdx2_2d_downwind
+
+ d2fdx2_2d_downwind = (3.d0*f(i, j) - 7.d0*f(i+1, j) + 5.d0*f(i+2, j) - f(i+3, j)) / (2.d0*delta**2)
+
+ end function d2fdx2_2d_downwind
+
+!----------------------------------------------------------------------------
+
+ function d2fdx2_2d_upwind(f,i,j,delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdx2_2d_upwind
+
+ d2fdx2_2d_upwind = (3.d0*f(i, j) - 7.d0*f(i-1, j) + 5.d0*f(i-2, j) - f(i-3, j)) / (2.d0*delta**2)
+
+ end function d2fdx2_2d_upwind
+
+!----------------------------------------------------------------------------
+
+ function d2fdy2_2d_downwind(f,i,j,delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdy2_2d_downwind
+
+ d2fdy2_2d_downwind = (3.d0*f(i, j) - 7.d0*f(i, j+1) + 5.d0*f(i, j+2) - f(i, j+3)) / (2.d0*delta**2)
+
+ end function d2fdy2_2d_downwind
+
+!----------------------------------------------------------------------------
+
+ function d2fdy2_2d_upwind(f,i,j,delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdy2_2d_upwind
+
+ d2fdy2_2d_upwind = (3.d0*f(i, j) - 7.d0*f(i, j-1) + 5.d0*f(i, j-2) - f(i, j-3)) / (2.d0*delta**2)
+
+ end function d2fdy2_2d_upwind
+
+!----------------------------------------------------------------------------
+
+ function d2fdx2_2d_stag(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdx2_2d_stag
+
+ !This formula can be derived using two central differences
+ !(i to i+2, and i-1 to i+1) to get the derivative at
+ !i and i+1, then applying a central difference to that
+ !in order to get the 2nd derivative at a staggered point
+ d2fdx2_2d_stag = sum(f(i+2, j:j+1) + f(i-1, j:j+1) - f(i+1, j:j+1) - f(i, j:j+1))/(4.d0*delta**2)
+ end function d2fdx2_2d_stag
+
+!----------------------------------------------------------------------------
+
+ function d2fdx2_2d_stag_downwind(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdx2_2d_stag_downwind
+
+ d2fdx2_2d_stag_downwind = sum(3.d0*f(i, j:j+1) - 7.d0*f(i+1, j:j+1) + 5.d0*f(i+2, j:j+1) - f(i+3, j:j+1)) / (4.d0*delta**2)
+ end function d2fdx2_2d_stag_downwind
+
+ function d2fdx2_2d_stag_upwind(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdx2_2d_stag_upwind
+
+ d2fdx2_2d_stag_upwind = sum(-3.d0*f(i+1, j:j+1) + 7.d0*f(i, j:j+1) - 5.d0*f(i-1, j:j+1) + f(i-2, j:j+1)) / (4.d0*delta**2)
+ end function d2fdx2_2d_stag_upwind
+
+!----------------------------------------------------------------------------
+
+ !> Computes 2nd derivative with respect to y at the given point
+
+ function d2fdy2_2d(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdy2_2d
+ d2fdy2_2d = (f(i, j+1) + f(i, j-1) - 2.d0 * f(i, j))/(delta*delta)
+ end function d2fdy2_2d
+
+!----------------------------------------------------------------------------
+
+ function d2fdy2_2d_stag(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdy2_2d_stag
+
+ !This formula can be derived using two central differences
+ !(i to i+2, and i-1 to i+1) to get the derivative at
+ !i and i+1, then applying a central difference to that
+ !in order to get the 2nd derivative at a staggered point
+ d2fdy2_2d_stag = sum(f(i:i+1, j+2) + f(i:i+1, j-1) - f(i:i+1, j+1) - f(i:i+1, j))/(4.d0*delta**2)
+ end function d2fdy2_2d_stag
+
+!----------------------------------------------------------------------------
+
+ function d2fdy2_2d_stag_downwind(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdy2_2d_stag_downwind
+
+ d2fdy2_2d_stag_downwind = sum(3.d0*f(i:i+1, j) - 7.d0*f(i:i+1, j+1) + 5.d0*f(i:i+1, j+2) - f(i:i+1, j+3)) / (4.d0*delta**2)
+ end function d2fdy2_2d_stag_downwind
+
+!----------------------------------------------------------------------------
+
+ function d2fdy2_2d_stag_upwind(f, i, j, delta)
+ implicit none
+ real(dp), dimension(:,:), intent(in) :: f
+ integer, intent(in) :: i,j
+ real(dp), intent(in) :: delta
+ real(dp) :: d2fdy2_2d_stag_upwind
+
+ d2fdy2_2d_stag_upwind = sum(-3.d0*f(i:i+1, j+1) + 7.d0*f(i:i+1, j) - 5.d0*f(i:i+1, j-1) + f(i:i+1, j-2)) / (4.d0*delta**2)
+ end function d2fdy2_2d_stag_upwind
+
+!----------------------------------------------------------------------------
+
+ subroutine d2f_field(f, deltax, deltay, d2fdx2, d2fdy2, direction_x, direction_y)
+
+ use parallel
+ implicit none
+
+ real(dp), intent(out), dimension(:,:) :: d2fdx2, d2fdy2
+ real(dp), intent(in), dimension(:,:) :: f ! unstaggered grid
+ real(dp), intent(in) :: deltax, deltay
+ real(dp), intent(in), dimension(:,:), optional :: direction_x, direction_y
+ integer :: i,j
+
+!LOOP: all scalar points
+! uses upwinding and downwinding to avoid going out of bounds
+
+ do i = 1,size(f,1)
+ do j = 1,size(f,2)
+
+ !unstaggered grid
+ if (i == 1) then
+ d2fdx2(i,j) = d2fdx2_2d_downwind(f,i,j,deltax)
+ else if (i == size(f,1)) then
+ d2fdx2(i,j) = d2fdx2_2d_upwind(f,i,j,deltax)
+ else
+ if (present(direction_x)) then
+ if (direction_x(i,j) > 0.d0) then
+ d2fdx2(i,j) = d2fdx2_2d_downwind(f,i,j,deltax)
+ else if (direction_x(i,j) < 0.d0) then
+ d2fdx2(i,j) = d2fdx2_2d_upwind(f,i,j,deltax)
+ else
+ d2fdx2(i,j) = d2fdx2_2d(f,i,j,deltax)
+ end if
+ else
+ d2fdx2(i,j) = d2fdx2_2d(f,i,j,deltax)
+ end if
+ end if
+
+ if (j == 1) then
+ d2fdy2(i,j) = d2fdy2_2d_downwind(f,i,j,deltax)
+ else if (j == size(f,2)) then
+ d2fdy2(i,j) = d2fdy2_2d_upwind(f,i,j,deltax)
+ else
+ if (present(direction_y)) then
+ if (direction_y(i,j) > 0.d0) then
+ d2fdy2(i,j) = d2fdy2_2d_downwind(f,i,j,deltax)
+ else if (direction_y(i,j) < 0.d0) then
+ d2fdy2(i,j) = d2fdy2_2d_upwind(f,i,j,deltax)
+ else
+ d2fdy2(i,j) = d2fdy2_2d(f,i,j,deltax)
+ end if
+ else
+ d2fdy2(i,j) = d2fdy2_2d(f,i,j,deltax)
+ end if
+ end if
+ end do
+ end do
+
+ call parallel_halo(d2fdx2)
+ call parallel_halo(d2fdy2)
+
+ end subroutine d2f_field
+
+!----------------------------------------------------------------------------
+
+ subroutine d2f_field_stag(f, deltax, deltay, d2fdx2, d2fdy2, periodic_x, periodic_y)
+
+ implicit none
+
+ real(dp), intent(out), dimension(:,:) :: d2fdx2, d2fdy2
+ real(dp), intent(in), dimension(:,:) :: f
+ real(dp), intent(in) :: deltax, deltay
+ logical :: periodic_x, periodic_y
+
+ real(dp) :: dewsq4, dnssq4
+ integer :: ew,ns
+
+ integer :: pt(2)
+ integer :: nsn
+ integer :: ewn
+
+ nsn = size(f,2)
+ ewn = size(f,1)
+
+ dewsq4 = 4.0d0 * deltax * deltax
+ dnssq4 = 4.0d0 * deltay * deltay
+
+ d2fdx2 = 0.d0
+ d2fdy2 = 0.d0
+
+ !LOOP - not sure what bounds should be in this subroutine
+
+ do ns = 2, nsn-2
+ do ew = 2, ewn-2
+ d2fdx2(ew,ns) = centerew(ew,ns)
+ d2fdy2(ew,ns) = centerns(ew,ns)
+ end do
+ end do
+
+! *** 2nd order boundaries using upwinding
+
+ do ew = 1, ewn-1, ewn-2
+
+ pt = whichway(ew)
+
+ do ns = 2, nsn-2
+ d2fdx2(ew,ns) = boundyew(pt,ns)
+ d2fdy2(ew,ns) = centerns(ew,ns)
+ end do
+
+ end do
+
+ do ns = 1, nsn-1, nsn-2
+
+ pt = whichway(ns)
+
+ do ew = 2, ewn-2
+ d2fdx2(ew,ns) = centerew(ew,ns)
+ d2fdy2(ew,ns) = boundyns(pt,ew)
+ end do
+
+ end do
+
+ do ns = 1, nsn-1, nsn-2
+ do ew = 1, ewn-1, ewn-2
+ pt = whichway(ew)
+ d2fdx2(ew,ns) = boundyew(pt,ns)
+ pt = whichway(ns)
+ d2fdy2(ew,ns) = boundyns(pt,ew)
+ end do
+ end do
+
+ contains
+
+!----------------------------------------------------------------------------
+
+ function centerew(ew,ns)
+
+ implicit none
+
+ real(dp) :: centerew
+ integer ns,ew
+
+ centerew = (sum(f(ew+2,ns:ns+1)) + sum(f(ew-1,ns:ns+1)) - &
+ sum(f(ew+1,ns:ns+1)) - sum(f(ew,ns:ns+1))) / dewsq4
+
+ end function centerew
+
+!----------------------------------------------------------------------------
+
+ function centerns(ew,ns)
+
+ implicit none
+
+ real(dp) :: centerns
+ integer ns,ew
+
+ centerns = (sum(f(ew:ew+1,ns+2)) + sum(f(ew:ew+1,ns-1)) - &
+ sum(f(ew:ew+1,ns+1)) - sum(f(ew:ew+1,ns))) / dnssq4
+
+ end function centerns
+
+!----------------------------------------------------------------------------
+
+ function boundyew(pt,ns)
+
+ implicit none
+
+ integer, intent(in) :: pt(2)
+ real(dp) :: boundyew
+ integer ns
+
+ boundyew = pt(1) * (3.d0 * sum(f(pt(2),ns:ns+1)) - 7.d0 * sum(f(pt(2)+pt(1),ns:ns+1)) + &
+ 5.d0 * sum(f(pt(2)+2*pt(1),ns:ns+1)) - sum(f(pt(2)+3*pt(1),ns:ns+1))) / dewsq4
+
+ end function boundyew
+
+!----------------------------------------------------------------------------
+
+ function boundyns(pt,ew)
+
+ implicit none
+
+ integer, intent(in) :: pt(2)
+ real(dp) :: boundyns
+ integer ew
+
+ boundyns = pt(1) * (3.d0 * sum(f(ew:ew+1,pt(2))) - 7.d0 * sum(f(ew:ew+1,pt(2)+pt(1))) + &
+ 5.d0 * sum(f(ew:ew+1,pt(2)+2*pt(1))) - sum(f(ew:ew+1,pt(2)+3*pt(1)))) / dnssq4
+
+ end function boundyns
+
+!----------------------------------------------------------------------------
+
+ function whichway(i)
+
+ implicit none
+
+ integer, intent(in) :: i
+ integer :: whichway(2)
+
+ if (i == 1) then
+ whichway = (/1,1/)
+ else
+ whichway = (/-1,i+1/)
+ end if
+
+ end function whichway
+
+!----------------------------------------------------------------------------
+ !NOTE: Remove this commented-out code?
+
+! real(dp), dimension(:,:), intent(in) :: f
+! real(dp), dimension(:,:), intent(out) :: d2fdx2, d2fdy2
+! real(dp), intent(in) :: deltax, deltay
+! logical :: periodic_x, periodic_y
+!
+! integer :: nx, x, ny, y
+!
+! nx = size(f, 1)
+! ny = size(f, 2)
+!
+! !NOTE: See the field 1st derivative staggered function for
+! !a discussion of periodic boundary conditions
+!
+! !First compute the values that do not fall on any boundaries
+! !This is the same regardless of whether periodic boundary
+! !conditions are used
+! do x = 1, nx-1
+! do y = 1, ny-1
+! if (x == 1) then
+! d2fdx2(1,y) = d2fdx2_2d_stag_downwind(f, 1, y, deltax)
+! else if (x == nx - 1) then
+! d2fdx2(nx-1, y) = d2fdx2_2d_stag_upwind(f, nx-1, y, deltax)
+! else
+! d2fdx2(x,y) = d2fdx2_2d_stag(f, x, y, deltax)
+! end if
+!
+! if (y == 1) then
+! d2fdy2(x,1) = d2fdy2_2d_stag_downwind(f, x, 1, deltay)
+! else if (y == ny - 1) then
+! d2fdy2(x, ny-1) = d2fdy2_2d_stag_upwind(f, x, ny-1, deltay)
+! else
+! d2fdy2(x,y) = d2fdy2_2d_stag(f, x, y, deltay)
+! end if
+! end do
+! end do
+!
+! !If we are not using periodic boundary conditions, then we need
+! !to use an upwinding scheme to get the values when x = 1, y = 1,
+! !x = nx - 1, or y = ny - 1
+! !If we are using periodic boundary conditions, then compute the
+! !boundaries with input from the periodic conditions. We do not
+! !upwind or downwind. Also, because an extra set of values around
+! !the edges is necessary to correctly maintain periodicity,
+! !we fill in values where x = nx and where y = ny (whereas we
+! !do not with nonperiodic boundaries, as the staggered grid
+! !points fall strictly in the interior of the nonstaggered
+! !grid)
+! do y = 1, ny - 2
+! if (.not.(periodic_x)) then
+! d2fdx2(1,y) = d2fdx2_2d_stag_downwind(f, 1, y, deltax)
+!
+! d2fdx2(nx-1, y) = d2fdx2_2d_stag_upwind(f, nx-1, y, deltax)
+!
+! else
+! !Because of the periodicity, I will simply copy the appropriate values
+! !(e.g. u(1) = u(n-2), u(n-1) = u(2)
+! d2fdx2(1,y) = d2fdx2(nx-2,y)
+! d2fdx2(nx-1,y) = d2fdx2(2,y)
+! end if
+! d2fdy2(1,y) = d2fdy2_2d_stag(f, 1, y, deltay)
+! d2fdy2(nx-1, y) = d2fdy2_2d_stag(f, nx-1, y, deltay)
+! end do
+!
+! !See comments for the periodic x boundary case above; the same
+! !principles apply here.
+! do x=1, nx-2
+! if (.not.(periodic_y)) then
+! d2fdy2(x,1) = d2fdy2_2d_stag_downwind(f, x, 1, deltay)
+! d2fdy2(x, ny-1) = d2fdy2_2d_stag_upwind(f, x, ny-1, deltay)
+! else
+! d2fdy2(x,1) = d2fdy2(x,ny-2)
+! d2fdy2(x,nx-1) = d2fdy2(x,2)
+! end if
+! d2fdx2(x,1) = d2fdx2_2d_stag(f, x, 1, deltax)
+! d2fdx2(x, ny-1) = d2fdx2_2d_stag(f, x, ny-1, deltax)
+! end do
+!
+!
+! !To do: Change this to use the scheme above
+! !We have neglected so far to take care of the four points that occur at the
+! !maximum two indices in x and y. If no periodic boundaries are being used,
+! !we compute the value zt (nx-1, ny-1) using upwinding schemes.
+! if (.not. periodic_x .and. .not. periodic_y) then
+! d2fdx2(nx-1, ny-1) = d2fdx2_2d_stag_upwind(f, nx-1, ny-1, deltax)
+! d2fdy2(nx-1, ny-1) = d2fdy2_2d_stag_upwind(f, nx-1, ny-1, deltay)
+! else if (.not. periodic_x) then
+! !y is periodic - this means we need to compute the derivative
+! !for x=nx-1 and y=ny, ny-1. We will copy and paste
+! !y derivatives (for now), as above, and upwind
+! !the x derivatives
+! d2fdx2(nx-1, ny-1) = d2fdx2_2d_stag_upwind(f, nx-1, ny-1, deltax)
+! d2fdy2(nx-1, ny-1) = sum(f(nx-1:nx, 1) + f(nx-1:nx, ny-2) - f(nx-1:nx, ny) - f(nx-1:nx, ny-1))/(4*deltay**2)
+!
+!
+! d2fdx2(nx-1, ny) = d2fdx2_2d_stag_upwind(f, nx-1, ny, deltax)
+! d2fdy2(nx-1, ny) = sum(f(nx-1:nx, 2) + f(nx-1:nx, ny-1) - f(nx-1:nx, 1) - f(nx-1:nx, ny))/(4*deltay**2)
+!
+! else if (.not. periodic_y) then
+! !See comments for the periodic y case above - we are basically using the same
+! !logic with x and y swapped
+! d2fdx2(nx-1, ny-1) = sum(f(1, ny-1:ny) + f(nx-2, ny-1:ny) - f(nx, ny-1:ny) - f(nx-1, ny-1:ny))/(4*deltax**2)
+! d2fdy2(nx-1, ny-1) = d2fdy2_2d_stag_upwind(f, nx-1, ny-1, deltay)
+!
+! d2fdx2(nx, ny-1) = sum(f(2, ny-1:ny) + f(nx-1, ny-1:ny) - f(1, ny-1:ny) - f(nx, ny-1:ny))/(4*deltax**2)
+! d2fdy2(nx, ny-1) = d2fdy2_2d_stag_upwind(f, nx-1, ny-1, deltay)
+! else
+! !X and Y are periodic; we will use the periodic forms of the above differences
+! !Some of these will get very funky because
+! d2fdx2(nx-1, ny-1) = sum(f(1, ny-1:ny) + f(nx-1, ny-1:ny) - f(nx, ny-1:ny) - f(nx-1, ny-1:ny))/(4*deltax**2)
+! d2fdy2(nx-1, ny-1) = sum(f(nx-1:nx, 1) + f(nx-1:nx, ny-2) - f(nx-1:nx, ny) - f(nx-1:nx, ny-1))/(4*deltay**2)
+!
+! d2fdx2(nx, ny-1) = sum(f(2, ny-1:ny) + f(nx-1, ny-1:ny) - f(1, ny-1:ny) - f(nx, ny-1:ny))/(4*deltax**2)
+! d2fdy2(nx, ny-1) = ((f(nx, 1) + f(nx, ny-2) - f(nx, ny) - f(nx, ny-1)) + &
+! (f(1, 1) + f(1, ny-2) - f(1, ny) - f(1, ny-1)))/(4*deltay**2)
+!
+! d2fdy2(nx-1, ny) = ((f(1, ny) + f(nx-1, ny) - f(nx, ny) - f(nx-1, ny)) + &
+! (f(1, 1) + f(nx-1, 1) - f(nx, 1) - f(nx-1, 1)))/(4*deltax**2)
+! d2fdy2(nx-1, ny) = sum(f(nx-1:nx, 2) + f(nx-1:nx, ny-1) - f(nx-1:nx, 1) - f(nx-1:nx, ny))/(4*deltay**2)
+!
+! d2fdx2(nx, ny) = ((f(2, ny) + f(nx-1, ny) - f(1, ny) - f(nx, ny)) + (f(2, 1) + f(nx-1, 1) - f(1, 1) - f(nx, 1))) / (4*deltax**2)
+! d2fdy2(nx, ny) = ((f(nx, 2) + f(nx, ny-1) - f(nx, 1) - f(nx, ny)) + (f(1, 2) + f(1, ny-1) - f(1, 1) - f(1, ny)))/(4*deltay**2)
+!
+! end if
+
+ end subroutine d2f_field_stag
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative taken first w.r.t x, then to y at the given point.
+
+ function d2fdxy_3d(f, i, j, k, delta_x, delta_y)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), intent(in) :: delta_x, delta_y
+ real(dp) :: d2fdxy_3d
+
+ d2fdxy_3d = (f(k, i-1, j-1) - f(k, i-1, j+1) - f(k, i+1, j-1) + f(k, i+1, j+1))/(4.d0*delta_x*delta_y)
+ end function d2fdxy_3d
+
+!----------------------------------------------------------------------------
+
+ function d2fdxz_3d(f, i, j, k, delta_x, dz)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), intent(in) :: delta_x
+ real(dp), dimension(:), intent(in) :: dz
+ real(dp) :: d2fdxz_3d
+
+ d2fdxz_3d = (.5d0/delta_x) * ( &
+ (f(k-1, i+1, j) - f(k-1, i-1, j)) * (dz(k) - dz(k+1)) / ( (dz(k) - dz(k-1)) * (dz(k+1) - dz(k-1)) ) + &
+ (f(k, i+1, j) - f(k, i-1, j)) * (dz(k+1) + dz(k-1) - 2*dz(k)) / ( (dz(k) - dz(k-1)) * (dz(k+1) - dz(k)) ) + &
+ (f(k+1, i+1, j) - f(k+1, i-1, j)) * (dz(k) - dz(k-1)) / ( (dz(k+1) - dz(k)) * (dz(k+1) - dz(k-1)) ) )
+ end function d2fdxz_3d
+
+ function d2fdyz_3d(f, i, j, k, delta_x, dz)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), intent(in) :: delta_x
+ real(dp), dimension(:), intent(in) :: dz
+ real(dp) :: d2fdyz_3d
+
+ d2fdyz_3d = (.5d0/delta_x) * ( &
+ (f(k-1, i, j+1) - f(k-1, i, j-1)) * (dz(k) - dz(k+1)) / ( (dz(k) - dz(k-1)) * (dz(k+1) - dz(k-1)) ) + &
+ (f(k, i, j+1) - f(k, i, j-1)) * (dz(k+1) + dz(k-1) - 2*dz(k)) / ( (dz(k) - dz(k-1)) * (dz(k+1) - dz(k)) ) + &
+ (f(k+1, i, j+1) - f(k+1, i, j-1)) * (dz(k) - dz(k-1)) / ( (dz(k+1) - dz(k)) * (dz(k+1) - dz(k-1)) ) )
+ end function d2fdyz_3d
+
+!----------------------------------------------------------------------------
+
+ !> Computes derivative with respect to z at a given point
+ !> where the Z axis uses an irregular grid defined by \ittext{deltas}.
+
+ function d2fdz2_3d_irregular(f, i, j, k, deltas)
+ implicit none
+ real(dp), dimension(:,:,:), intent(in) :: f
+ integer, intent(in) :: i,j,k
+ real(dp), dimension(:), intent(in) :: deltas
+ real(dp) :: d2fdz2_3d_irregular
+ real(dp) :: zkMinusZkp1, zkMinusZkm1, zkp1MinusZkm1, zkp1MinusZk
+
+ zkMinusZkp1 = deltas(k) - deltas(k+1)
+ zkMinusZkm1 = deltas(k) - deltas(k-1)
+ zkp1MinusZkm1 = deltas(k+1) - deltas(k-1)
+ zkp1MinusZk = -1 * zkMinusZkp1
+
+
+ d2fdz2_3d_irregular = 2.d0 * f(k-1, i, j) / (zkMinusZkm1 * zkp1MinusZkm1) - &
+ 2.d0 * f(k, i, j) / (zkp1MinusZk * zkMinusZkm1) + &
+ 2.d0 * f(k+1, i, j) / (zkp1Minuszk * zkp1MinusZkm1)
+ end function d2fdz2_3d_irregular
+
+!---------------------------------------------------------------------------------
+
+end module glam_grid_operators
+
+!----------------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglide/glam_strs2.F90 b/components/cism/glimmer-cism/libglide/glam_strs2.F90
new file mode 100644
index 0000000000..d621304b71
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glam_strs2.F90
@@ -0,0 +1,6571 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glam_strs2.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+! 3d velocity calculation based on Blatter/Pattyn, 1st-order equations, by Tony Payne (Univ.
+! of Bristol) and Steve Price (Univ. of Bristol / Los Alamos Nat. Lab.). Boundary conditions
+! available include periodic (lateral), free surface, zero slip at bed, specified basal
+! traction at bed, and specified basal yield stress at bed (all three of which are implemented
+! through various verions of the specified traction b.c.)
+! include macros for glide mask definitions
+#include "glide_mask.inc"
+#include "config.inc"
+
+!NOTE - Get rid of the globalIDs option.
+! Make it the default for Trilinos, else not used.
+
+!GlobalIDs are for distributed TRILINOS variable IDs
+#ifdef TRILINOS
+#define globalIDs
+#endif
+
+!NOTE: In this module there are chunks of code that are used more than once, for Picard as well as JFNK.
+! It would be better to combine these chunks of code into subroutines that can be called
+! from multiple places in the code--or even better, to remove the extra chunks of code
+! if they are no longer needed.
+! KJE looked into creating a generic initialization solver routine but most of init is passing
+! variables, so its not worth it IMHO
+
+!***********************************************************************
+module glam_strs2
+!***********************************************************************
+
+use iso_c_binding
+use glimmer_paramets, only : dp
+use glimmer_physcon, only : gn, rhoi, rhoo, grav, pi, scyr
+
+use glimmer_paramets, only : thk0, len0, vel0, vis0, tim0, evs0, tau0
+
+use glimmer_log, only : write_log
+use glide_mask
+use glimmer_sparse_type
+use glimmer_sparse
+use glide_types
+
+implicit none
+
+ logical, save :: lateralboundry = .false.
+ integer, dimension(6), save :: loc_latbc
+
+ real(dp), allocatable, dimension(:,:,:), save :: flwafact
+ real(dp), allocatable, dimension(:), save :: dups
+ real(dp), allocatable, dimension(:,:,:,:,:), save :: corr
+ real(dp), allocatable, dimension(:,:,:,:), save :: usav
+ real(dp), dimension(2), save :: usav_avg
+ real(dp), allocatable, dimension(:,:,:), save :: tvel
+ real(dp), allocatable, dimension(:), save :: dup, dupm
+
+ integer, dimension(:,:), allocatable :: uindx
+
+ ! regularization constant for eff. strain rate to avoid infinite visc.
+ ! NOTE: would be good to explore how small this really needs to be, as
+ ! code converges much better when this value is made larger.
+
+ !SCALING - This corresponds to an effective min strain rate of 1.0d-20 s^(-1).
+ real(dp), parameter :: effstrminsq = (1.0d-20 * tim0)**2
+ real(dp) :: homotopy = 0.d0
+
+ real(dp) :: p1, p2, p3 ! variants of Glen's "n" (e.g. n, (1-n)/n)
+ real(dp) :: dew2, dns2, dew4, dns4
+
+ ! combinations of coeffs. used in momentum balance calcs
+ real(dp) :: cdxdy
+ real(dp), dimension(2) :: cdxdx
+ real(dp), dimension(:), allocatable :: cdsds, cds
+ real(dp), dimension(:), allocatable :: cvert, fvert
+ real(dp), dimension(:,:), allocatable :: cdsdx
+
+ real(dp), dimension(:), allocatable :: dsigmadew, dsigmadns
+ real(dp), dimension(:), allocatable :: d2sigmadew2, d2sigmadns2, d2sigmadewdns
+ real(dp) :: d2sigmadewdsigma, d2sigmadnsdsigma
+
+ ! vectors of coeffs. used for switching symmetric solution subroutines between calc.
+ ! of x-comp of vel or y-comp of vel
+ real(dp), dimension(2), parameter :: &
+ oneorfour = (/ 1.d0, 4.d0 /), &
+ fourorone = (/ 4.d0, 1.d0 /), &
+ oneortwo = (/ 1.d0, 2.d0 /), &
+ twoorone = (/ 2.d0, 1.d0 /)
+
+ real(dp), allocatable, dimension(:,:,:), save :: ughost
+ real(dp), allocatable, dimension(:,:,:), save :: vghost
+
+ ! coeff. for forward differencing template, used for stress bcs at lateral boundaries
+ real(dp), dimension(3), parameter :: &
+ onesideddiff = (/ -3.d0, 4.d0, -1.d0 /)
+
+ ! geometric 2nd and cross-derivs
+ real(dp), dimension(:,:), allocatable :: &
+ d2thckdew2, d2usrfdew2, d2thckdns2, d2usrfdns2, d2thckdewdns, d2usrfdewdns
+
+ real(dp), dimension(:,:,:,:), allocatable :: ghostbvel
+
+ ! variables for use in sparse matrix calculation
+ real(dp), dimension(:), allocatable :: pcgval, rhsd, rhsx
+ integer, dimension(:), allocatable :: pcgcol, pcgrow
+ integer, dimension(2) :: pcgsize
+ integer :: ct_nonzero ! number of nonzero matrix entries
+
+!*SFP* NOTE: these redefined here so that they are "in scope" and can avoid being passed as args
+ integer :: whatsparse ! needed for putpgcg()
+ integer :: nonlinear ! flag for indicating type of nonlinar iteration (Picard vs. JFNK)
+
+ logical, save :: inisoln = .false. ! true only if a converged solution (velocity fields) exists
+
+ real(dp) :: linearSolveTime = 0.d0
+ real(dp) :: totalLinearSolveTime = 0.d0 ! total linear solve time
+
+ ! AGS: partition information for distributed solves
+ ! JEFF: Moved to module-level scope for globalIDs
+ integer, allocatable, dimension(:) :: myIndices
+ real(dp), allocatable, dimension(:) :: myX, myY, myZ
+ integer, allocatable, dimension(:,:,:) :: loc2_array
+ integer :: mySize = -1
+
+ ! JEFF: Debugging Output Variables
+ integer :: overallloop = 1
+
+!***********************************************************************
+
+contains
+
+!***********************************************************************
+
+! WJS: The following routine doesn't compile on gnu; commenting it out for now
+! subroutine dumpvels(name, uvel, vvel)
+! !JEFF routine to track the uvel and vvel calculations in Picard Iteration for debugging
+! !3/28/11
+! use parallel
+! implicit none
+
+! character(*) :: name
+! real(dp), dimension(:,:,:), intent(inout) :: uvel, vvel ! horiz vel components: u(z), v(z)
+
+! if (distributed_execution()) then
+! if (this_rank == 0) then
+! write(*,*) name, "Proc 0 uvel & vvel (1,7:8,16:17)", uvel(1,7:8,16:17), vvel(1,7:8,16:17)
+! else
+! write(*,*) name, "Proc 1 uvel & vvel (1,7:8,0:1)", uvel(1,7:8,0:1), vvel(1,7:8,0:1)
+! endif
+! else
+! write(*,*) name, "Parallel uvel & vvel (1,5:6,15:16)", uvel(1,5:6,15:16), vvel(1,5:6,15:16)
+! endif
+! end subroutine dumpvels
+
+
+subroutine glam_velo_init( ewn, nsn, upn, &
+ dew, dns, &
+ sigma)
+
+ ! Allocate arrays and initialize variables.
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ real(dp), intent(in) :: dew, dns
+
+ real(dp), dimension(:), intent(in) :: sigma
+
+ integer :: up
+
+ allocate( dup(upn) )
+ allocate( dupm(upn) )
+ allocate( cvert(upn) )
+ allocate( cdsdx(upn,2) )
+ allocate( cdsds(upn) )
+ allocate( cds(upn) )
+ allocate( fvert(upn) )
+ allocate(ughost(2,ewn-1,nsn-1))
+ allocate(vghost(2,ewn-1,nsn-1))
+
+ ! NOTE: "dup", the sigma coordinate spacing is defined as a vector to allow it to
+ ! be read in from file for use with non-constant vertical grid spacing. Currently, this
+ ! is not working, so the code will not give accurate results if the sigma coordinate is
+ ! not regularly spaced.
+ dup = (/ ( (sigma(2)-sigma(1)), up = 1, upn) /)
+ dupm = - 0.25d0 / dup
+
+ ! p1 = -1/n - used with rate factor in eff. visc. def.
+ ! p2 = (1-n)/2n - used with eff. strain rate in eff. visc. def.
+ ! p3 = (1-n)/n !NOTE - Remove p3? It is never used.
+
+ p1 = -1.d0 / real(gn,dp)
+ p2 = (1.d0 - real(gn,dp)) / (2.d0 * real(gn,dp))
+ p3 = (1.d0 - real(gn,dp)) / real(gn,dp)
+
+ dew2 = 2.d0 * dew; dns2 = 2.d0 * dns ! 2x the standard grid spacing
+ dew4 = 4.d0 * dew; dns4 = 4.d0 * dns ! 4x the standard grid spacing
+
+ allocate(dsigmadew(upn), dsigmadns(upn))
+ allocate(d2sigmadew2(upn),d2sigmadns2(upn),d2sigmadewdns(upn))
+
+ allocate (d2thckdew2(ewn-1,nsn-1),d2thckdns2(ewn-1,nsn-1),d2thckdewdns(ewn-1,nsn-1), &
+ d2usrfdew2(ewn-1,nsn-1),d2usrfdns2(ewn-1,nsn-1),d2usrfdewdns(ewn-1,nsn-1))
+
+ allocate(flwafact(1:upn-1,ewn,nsn)) ! NOTE: the vert dim here must agree w/ that of 'efvs'
+
+ allocate(dups(upn))
+
+ allocate(ghostbvel(2,3,ewn-1,nsn-1)) !! for saving the fictious basal vels at the bed !!
+
+ ghostbvel(:,:,:,:) = 0.d0
+
+ flwafact = 0.d0
+
+ ! define constants used in various FD calculations associated with the
+ ! subroutine 'findcoefst'
+ call calccoeffsinit(upn, dew, dns)
+
+ dups = (/ (sigma(up+1) - sigma(up), up=1,upn-1), 0.d0 /)
+
+end subroutine glam_velo_init
+
+
+!***********************************************************************
+
+! This is the driver subroutine, called from subroutine glissade_velo_driver in
+! module glissade_velo.F90.
+
+subroutine glam_velo_solver(ewn, nsn, upn, &
+ dew, dns, &
+ sigma, stagsigma, &
+ thck, usrf, &
+ lsrf, topg, &
+ dthckdew, dthckdns, &
+ dusrfdew, dusrfdns, &
+ dlsrfdew, dlsrfdns, &
+ stagthck, flwa, &
+ btraction, &
+ umask, &
+ whichbabc, &
+ whichefvs, &
+ whichresid, &
+ whichnonlinear, &
+ whichsparse, &
+ beta, &
+ beta_const, &
+ mintauf, &
+ bwat, &
+ basal_physics, &
+ uvel, vvel, &
+ uflx, vflx, &
+ efvs )
+
+ use parallel
+ use glimmer_paramets, only: GLC_DEBUG
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ integer, dimension(:,:), intent(inout) :: umask
+
+ !NOTE - Make umask intent in?
+ ! NOTE: 'inout' status to 'umask' should be changed to 'in' at some point,
+ ! but for now this allows for some minor internal hacks to CISM-defined mask
+
+ real(dp), intent(in) :: dew, dns
+
+ real(dp), dimension(:), intent(in) :: sigma, stagsigma ! sigma coords
+ real(dp), dimension(:,:), intent(in) :: thck, usrf, lsrf, topg ! geom vars
+ real(dp), dimension(:,:), intent(in) :: dthckdew, dthckdns ! thick grads
+ real(dp), dimension(:,:), intent(in) :: dusrfdew, dusrfdns ! upper surf grads
+ real(dp), dimension(:,:), intent(in) :: dlsrfdew, dlsrfdns ! basal surf grads
+ real(dp), dimension(:,:), intent(in) :: stagthck ! staggered thickness
+ real(dp), dimension(:,:,:), intent(inout) :: btraction ! consistent basal traction array
+ real(dp), dimension(:,:,:), intent(in) :: flwa ! flow law rate factor
+
+ real(dp), dimension(:,:), intent(inout) :: beta ! basal traction coefficient, computed in calcbeta
+ real(dp), dimension(:,:), intent(in) :: mintauf ! specified basal yield stress, used in calcbeta (if specified in config file)
+ real(dp), intent(in) :: beta_const ! spatially uniform beta (Pa yr/m)
+ real(dp), intent(in), dimension(:,:) :: bwat ! basal water depth
+ type(glide_basal_physics), intent(inout) :: basal_physics ! basal physics object
+
+ integer, intent(in) :: whichbabc ! options for beta basal boundary condition
+ integer, intent(in) :: whichefvs ! options for efvs calculation (calculate it or make it uniform)
+ integer, intent(in) :: whichresid ! options for method to use when calculating vel residul
+ integer, intent(in) :: whichnonlinear ! options for which method for doing elliptic solve
+ integer, intent(in) :: whichsparse ! options for which method for doing elliptic solve
+
+ real(dp), dimension(:,:,:), intent(inout) :: uvel, vvel ! horiz vel components: u(z), v(z)
+ real(dp), dimension(:,:), intent(out) :: uflx, vflx ! horiz fluxs: u_bar*H, v_bar*H
+ real(dp), dimension(:,:,:), intent(out) :: efvs ! effective viscosity
+
+ integer :: ew, ns, up ! counters for horiz and vert do loops
+
+ real(dp), parameter :: minres = 1.0d-4 ! assume vel fields converged below this resid
+ real(dp), parameter :: NL_tol = 1.0d-6 ! to have same criterion than with JFNK
+ real(dp), save, dimension(2) :: resid ! vector for storing u resid and v resid
+
+ integer, parameter :: cmax = 100 ! max no. of iterations
+ integer :: counter, linit ! iteration counter, ???
+ character(len=100) :: message ! error message
+
+ ! variables used for incorporating generic wrapper to sparse solver
+ type(sparse_matrix_type) :: matrix
+ real(dp), dimension(:), allocatable :: answer, uk_1, vk_1, F
+ real(dp) :: err, L2norm, L2square, NL_target
+ integer :: iter, pic
+ integer , dimension(:), allocatable :: g_flag ! jfl flag for ghost cells
+
+ ! variables for when to stop outer loop when using Picard for nonlinear iteration
+ real(dp) :: outer_it_criterion, outer_it_target
+
+ ! variables for debugging output JEFF
+ character(3) :: loopnum
+ character(3) :: looptime
+ real(dp) :: multiplier
+
+ call t_startf("PICARD_pre")
+ ! RN_20100125: assigning value for whatsparse, which is needed for putpcgc()
+!NOTE - Can we get rid of whatsparse and use only whichsparse?
+ whatsparse = whichsparse
+
+ ! assign value for nonlinear iteration flag
+ nonlinear = whichnonlinear
+
+!NOTE - Note: d2usrfdew2 and d2usrfdns2 are needed at all locally owned velocity points.
+! I am not sure where and why the upwind 2nd derivatives are computed.
+!NOTE MJH These 2nd derivatives are already calculated in subroutine geometry_derivs(model) in glide_thck.
+!These calls could either be deleted and just use those previous calculations, or possibly use that module here.
+!First it needs to be determined that they are making the same (or not) calculation!
+
+ ! calc geometric 2nd deriv. for generic input variable 'ipvr', returns 'opvr'
+ call geom2ders(ewn, nsn, dew, dns, usrf, stagthck, d2usrfdew2, d2usrfdns2)
+ call geom2ders(ewn, nsn, dew, dns, thck, stagthck, d2thckdew2, d2thckdns2)
+
+ ! calc geometric 2nd cross-deriv. for generic input variable 'ipvr', returns 'opvr'
+ call geom2derscros(ewn, nsn, dew, dns, thck, stagthck, d2thckdewdns)
+ call geom2derscros(ewn, nsn, dew, dns, usrf, stagthck, d2usrfdewdns)
+
+ allocate(uindx(ewn-1,nsn-1))
+
+ ! If a point from the 2d array 'mask' is associated with a non-zero ice thickness
+ ! assign it a unique number. If not assign a zero.
+ uindx = indxvelostr(ewn, nsn, upn, umask,pcgsize(1))
+
+!!!!!!!!!! Boundary conditions HACKS section !!!!!!!!!!!!!
+
+!NOTE - Remove this commented-out code if no longer needed.
+
+!! A hack of the boundary condition mask needed for the Ross Ice Shelf exp.
+!! The quick check of whether or not this is the Ross experiment is to look
+!! at the domain size.
+! if( ewn == 151 .and. nsn == 115 )then
+! call not_parallel(__FILE__, __LINE__)
+! do ns=1,nsn-1; do ew=1,ewn-1
+! if( umask(ew,ns) == 21 .or. umask(ew,ns) == 5 )then
+! umask(ew,ns) = 73
+! endif
+! end do; end do
+! end if
+
+!! hack for basal processes submodel test case, to avoid floatation at downstream
+!! end yet still allow for application of a floating ice bc there
+! do ns=1,nsn-1; do ew=1,ewn-1
+! if( umask(ew,ns) == 37 )then
+! umask(ew,ns) = 41
+! endif
+! end do; end do
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ ! allocate space for storing temporary across-flow comp of velocity
+ allocate(tvel(upn,ewn-1,nsn-1))
+ tvel = 0.d0
+
+ ! allocate space for variables used by 'mindcrash' function (unstable manifold correction)
+ allocate(corr(upn,ewn-1,nsn-1,2,2),usav(upn,ewn-1,nsn-1,2))
+ ! and initialize them
+ corr = 0.d0
+ usav = 0.d0
+
+ ! make an initial guess at the size of the sparse matrix
+ pcgsize(2) = pcgsize(1) * 20
+
+!==============================================================================
+! RN_20100129: Option to load Trilinos matrix directly bypassing sparse_easy_solve
+!==============================================================================
+
+#ifdef TRILINOS
+ if (whatsparse == STANDALONE_TRILINOS_SOLVER) then
+ if (main_task) write(*,*) "Using GlobalIDs..."
+ ! JEFF: Define myIndices in terms of globalIDs
+ allocate(myIndices(pcgsize(1))) ! myIndices is an integer vector with a unique ID for each layer for ice grid points
+ allocate(myX(pcgsize(1))) ! Coordinates of nodes, used by ML preconditioner
+ allocate(myY(pcgsize(1)))
+ allocate(myZ(pcgsize(1)))
+ call distributed_create_partition(ewn, nsn, (upn + 2) , uindx, pcgsize(1), myIndices, myX, myY, myZ) ! Uses uindx mask to determine ice grid points.
+ mySize = pcgsize(1) ! Set variable for inittrilinos
+
+ !write(*,*) "GlobalIDs myIndices..."
+ !write(*,*) "pcgsize = ", pcgsize(1)
+ !write(*,*) "myIndices = ", myIndices
+ !call parallel_stop(__FILE__, __LINE__)
+
+ ! Now send this partition to Trilinos initialization routines
+ call inittrilinos(20, mySize, myIndices, myX, myY, myZ, comm)
+
+ ! Set if need full solution vector returned or just owned portion
+
+ !No Triad matrix needed in this case -- save on memory alloc
+ pcgsize(2) = 1
+
+ ! JEFF: deallocate myIndices after the solve loop, because used in translation between globalIDs and local indices
+ ! deallocate(myIndices)
+ endif
+#else
+ if (whatsparse == STANDALONE_TRILINOS_SOLVER) then
+ write(*,*) 'Error: Trilinos sparse solver requires Trilinos build'
+ stop
+ endif
+#endif
+
+!==============================================================================
+! RN_20100126: End of the block
+!==============================================================================
+
+ ! allocate sparse matrix variables
+ allocate (pcgrow(pcgsize(2)),pcgcol(pcgsize(2)),rhsd(pcgsize(1)), &
+ pcgval(pcgsize(2)))
+
+ allocate(matrix%row(pcgsize(2)), matrix%col(pcgsize(2)), &
+ matrix%val(pcgsize(2)), answer(pcgsize(1)))
+
+ allocate( uk_1(pcgsize(1)), vk_1(pcgsize(1)), &
+ F(2*pcgsize(1)), g_flag(pcgsize(1)) ) ! jfl for res calc.
+
+ ! set residual and iteration counter to initial values
+ resid = 1.d0
+ counter = 1
+ L2norm = 1.d20
+
+ ! intialize outer loop test vars
+ outer_it_criterion = 1.d0
+ outer_it_target = 0.d0
+
+ if (main_task) then
+ ! print some info to the screen to update on iteration progress
+ print *, ' '
+ print *, 'Running Payne/Price higher-order dynamics solver'
+ print *, ' '
+ if( whichresid == HO_RESID_L2NORM ) then
+ print *, 'iter # resid (L2 norm) target resid'
+ else
+ print *, 'iter # uvel resid vvel resid target resid'
+ end if
+ print *, ' '
+ endif
+
+ call t_stopf("PICARD_pre")
+ ! ****************************************************************************************
+ ! START of Picard iteration
+ ! ****************************************************************************************
+ call t_startf("PICARD_iter")
+
+ call ghost_preprocess( ewn, nsn, upn, uindx, ughost, vghost, &
+ uk_1, vk_1, uvel, vvel, g_flag) ! jfl_20100430
+
+ ! Picard iteration; continue iterating until resid falls below specified tolerance
+ ! or the max no. of iterations is exceeded
+
+ !JEFF Guarantees at least one loop
+ outer_it_criterion = 1.d0
+ outer_it_target = 0.d0
+
+ do while ( outer_it_criterion >= outer_it_target .and. counter < cmax) ! use L2 norm for resid calculation
+ call t_startf("PICARD_in_iter")
+
+ ! choose outer loop stopping criterion
+ if( counter > 1 )then
+ if( whichresid == HO_RESID_L2NORM )then
+ outer_it_criterion = L2norm
+ outer_it_target = NL_target
+ else
+ outer_it_criterion = maxval(resid)
+ outer_it_target = minres
+ end if
+ else
+ outer_it_criterion = 1.d10
+ outer_it_target = 1.d-12
+ end if
+
+ ! WJS: commenting out the following block, because it leads to lots of extra files,
+ ! which is undesirable even when GLC_DEBUG=.true.
+ ! if (GLC_DEBUG) then
+ ! !JEFF Debugging Output to see what differences in final vvel and tvel.
+ ! write(loopnum,'(i3.3)') counter
+ ! write(Looptime, '(i3.3)') overallloop
+ ! loopnum = trim(loopnum) ! Trying to get rid of spaces in name.
+ ! Looptime = trim(Looptime)
+ ! call distributed_print("uvela_ov"//Looptime//"_pic"//loopnum//"_tsk", uvel)
+
+ ! call distributed_print("vvela_ov"//Looptime//"_pic"//loopnum//"_tsk", vvel)
+
+ ! ! call dumpvels("Before findefvsstr", uvel, vvel)
+
+ ! ! call distributed_print("preefvs_ov"//Looptime//"_pic"//loopnum//"_tsk", efvs)
+ ! end if
+
+ call t_startf("PICARD_findefvsstr")
+ ! calc effective viscosity using previously calc vel. field
+ call findefvsstr(ewn, nsn, upn, &
+ stagsigma, counter, &
+ whichefvs, efvs, &
+ uvel, vvel, &
+ flwa, thck, &
+ dusrfdew, dthckdew, &
+ dusrfdns, dthckdns, &
+ umask)
+ call t_stopf("PICARD_findefvsstr")
+
+ call t_startf("PICARD_findcoefstr1")
+ ! calculate coeff. for stress balance in y-direction
+ call findcoefstr(ewn, nsn, upn, &
+ dew, dns, sigma, &
+ 2, efvs, &
+ vvel, uvel, &
+ thck, dusrfdns, &
+ dusrfdew, dthckdew, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdns, dthckdns, &
+ d2usrfdns2, d2thckdns2, &
+ d2usrfdewdns,d2thckdewdns, &
+ dlsrfdew, dlsrfdns, &
+ stagthck, whichbabc, &
+ uindx, umask, &
+ lsrf, topg, &
+ flwa, &
+ beta, &
+ beta_const, &
+ mintauf, &
+ bwat, &
+ basal_physics, &
+ btraction, &
+ 0 )
+ call t_stopf("PICARD_findcoefstr1")
+
+ call t_startf("PICARD_solver_pre1")
+ ! put vels and coeffs from 3d arrays into sparse vector format
+ call solver_preprocess( ewn, nsn, upn, uindx, matrix, answer, vvel )
+ call t_stopf("PICARD_solver_pre1")
+
+!==============================================================================
+! jfl 20100412: residual for v comp: Fv= A(u^k-1,v^k-1)v^k-1 - b(u^k-1,v^k-1)
+!==============================================================================
+
+ !NOTE - Is L2square summed correctly in res_vect?
+ !JEFF - The multiplication Ax is done across all nodes, but Ax - b is only
+ ! computed locally, so L2square needs to be summed.
+ call t_startf("PICARD_res_vect")
+ call res_vect( matrix, vk_1, rhsd, size(rhsd), g_flag, L2square, whichsparse )
+ call t_stopf("PICARD_res_vect")
+
+ L2norm = L2square
+ F(1:pcgsize(1)) = vk_1(:)
+
+! call output_res(ewn,nsn,upn,uindx,counter,size(vk_1),vk_1, 2) ! JFL
+
+!==============================================================================
+! RN_20100129: Option to load Trilinos matrix directly bypassing sparse_easy_solve
+!==============================================================================
+
+ call t_startf("PICARD_solvea")
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+ call sparse_easy_solve(matrix, rhsd, answer, err, iter, whichsparse)
+#ifdef TRILINOS
+ else
+ call solvewithtrilinos(rhsd, answer, linearSolveTime)
+ totalLinearSolveTime = totalLinearSolveTime + linearSolveTime
+ ! write(*,*) 'Total linear solve time so far', totalLinearSolveTime
+#endif
+ endif
+ call t_stopf("PICARD_solvea")
+
+!==============================================================================
+! RN_20100129: End of the block
+!==============================================================================
+
+ vk_1 = answer ! jfl for residual calculation
+
+ ! put vels and coeffs from sparse vector format (soln) back into 3d arrays
+ call solver_postprocess( ewn, nsn, upn, 2, uindx, answer, tvel, ghostbvel )
+
+ ! NOTE: y-component of velocity that comes out is called "tvel", to differentiate it
+ ! from the y-vel solution from the previous iteration, which is maintained as "vvel".
+ ! This is necessary since we have not yet solved for the x-comp of vel, which needs the
+ ! old prev. guess as an input (NOT the new guess).
+
+! >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+
+ call t_startf("PICARD_findcoefstr2")
+ ! calculate coeff. for stress balance calc. in x-direction
+ call findcoefstr(ewn, nsn, upn, &
+ dew, dns, sigma, &
+ 1, efvs, &
+ uvel, vvel, &
+ thck, dusrfdew, &
+ dusrfdew, dthckdew, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdns, dthckdns, &
+ d2usrfdns2, d2thckdns2, &
+ d2usrfdewdns,d2thckdewdns, &
+ dlsrfdew, dlsrfdns, &
+ stagthck, whichbabc, &
+ uindx, umask, &
+ lsrf, topg, &
+ flwa, &
+ beta, &
+ beta_const, &
+ mintauf, &
+ bwat, &
+ basal_physics, &
+ btraction, &
+ 0 )
+
+ call t_stopf("PICARD_findcoefstr2")
+
+ call t_startf("PICARD_solver_pre2")
+ ! put vels and coeffs from 3d arrays into sparse vector format
+ call solver_preprocess( ewn, nsn, upn, uindx, matrix, answer, uvel )
+ call t_stopf("PICARD_solver_pre2")
+
+!==============================================================================
+! jfl 20100412: residual for u comp: Fu= C(u^k-1,v^k-1)u^k-1 - d(u^k-1,v^k-1)
+!==============================================================================
+
+ call t_startf("PICARD_res_vect")
+ call res_vect( matrix, uk_1, rhsd, size(rhsd), g_flag, L2square, whichsparse )
+ call t_stopf("PICARD_res_vect")
+
+ L2norm = sqrt(L2norm + L2square)
+ F(pcgsize(1)+1:2*pcgsize(1)) = uk_1(:) ! F = [ Fv, Fu ]
+
+! print *, 'L2 with/without ghost (k)= ', counter, &
+! sqrt(DOT_PRODUCT(F,F)), L2norm
+! if (counter <= 2) NL_target = NL_tol * L2norm
+! if (counter == 1) NL_target = NL_tol * L2norm
+ if (counter == 1) NL_target = 1.0d-4
+
+!==============================================================================
+! RN_20100129: Option to load Trilinos matrix directly bypassing sparse_easy_solve
+!==============================================================================
+
+ call t_startf("PICARD_solveb")
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+ call sparse_easy_solve(matrix, rhsd, answer, err, iter, whichsparse)
+#ifdef TRILINOS
+ else
+ call solvewithtrilinos(rhsd, answer, linearSolveTime)
+ totalLinearSolveTime = totalLinearSolveTime + linearSolveTime
+ ! write(*,*) 'Total linear solve time so far', totalLinearSolveTime
+#endif
+ endif
+ call t_stopf("PICARD_solveb")
+
+!==============================================================================
+! RN_20100129: End of the block
+!==============================================================================
+
+ uk_1 = answer ! jfl for residual calculation
+
+ ! put vels and coeffs from sparse vector format (soln) back into 3d arrays
+ call solver_postprocess( ewn, nsn, upn, 1, uindx, answer, uvel, ghostbvel )
+
+ ! call fraction of assembly routines, passing current vel estimates (w/o manifold
+ ! correction!) to calculate consistent basal tractions
+
+ call t_startf("PICARD_findcoefstr3")
+ call findcoefstr(ewn, nsn, upn, &
+ dew, dns, sigma, &
+ 2, efvs, &
+ tvel, uvel, &
+ thck, dusrfdns, &
+ dusrfdew, dthckdew, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdns, dthckdns, &
+ d2usrfdns2, d2thckdns2, &
+ d2usrfdewdns,d2thckdewdns, &
+ dlsrfdew, dlsrfdns, &
+ stagthck, whichbabc, &
+ uindx, umask, &
+ lsrf, topg, &
+ flwa, &
+ beta, &
+ beta_const, &
+ mintauf, &
+ bwat, &
+ basal_physics, &
+ btraction, &
+ 1 )
+
+ call findcoefstr(ewn, nsn, upn, &
+ dew, dns, sigma, &
+ 1, efvs, &
+ uvel, tvel, &
+ thck, dusrfdew, &
+ dusrfdew, dthckdew, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdns, dthckdns, &
+ d2usrfdns2, d2thckdns2, &
+ d2usrfdewdns,d2thckdewdns, &
+ dlsrfdew, dlsrfdns, &
+ stagthck, whichbabc, &
+ uindx, umask, &
+ lsrf, topg, &
+ flwa, &
+ beta, &
+ beta_const, &
+ mintauf, &
+ bwat, &
+ basal_physics, &
+ btraction, &
+ 1 )
+
+ call t_stopf("PICARD_findcoefstr3")
+
+ ! apply unstable manifold correction to converged velocities
+
+ call t_startf("PICARD_mindcrsh")
+
+ call mindcrshstr(1,whichresid,uvel,counter,resid(1))
+
+ vvel = tvel
+ call mindcrshstr(2,whichresid,vvel,counter,resid(2))
+
+ call t_stopf("PICARD_mindcrsh")
+
+!HALO - I'm pretty sure these updates *are* needed.
+!
+ call t_startf("PICARD_halo_upds")
+ ! coordinate halos for updated uvel and vvel
+ call staggered_parallel_halo(uvel)
+ call staggered_parallel_halo(vvel)
+ call t_stopf("PICARD_halo_upds")
+
+ !call dumpvels("After mindcrsh", uvel, vvel)
+
+ if (this_rank == 0) then
+
+ !NOTE - Does this comment still apply, or is parallel_single defunct?
+
+ ! Can't use main_task flag because main_task is true for all processors in case of parallel_single
+ ! output the iteration status: iteration number, max residual, and location of max residual
+ ! (send output to the screen or to the log file, per whichever line is commented out)
+
+ if( whichresid == HO_RESID_L2NORM ) then
+ print '(i4,3g20.6)', counter, L2norm, NL_target ! Output when using L2norm for convergence
+ !print '(a,i4,3g20.6)', "sup-norm uvel, vvel=", counter, resid(1), resid(2), minres
+ !write(message,'(i4,3g20.6)') counter, L2norm, NL_target
+ !call write_log (message)
+ else
+ print '(i4,3g20.6)', counter, resid(1), resid(2), minres
+ !write(message,'(" * strs ",i3,3g20.6)') counter, resid(1), resid(2), minres
+ !call write_log (message)
+ end if
+ endif
+
+ counter = counter + 1 ! advance the iteration counter
+ call t_stopf("PICARD_in_iter")
+
+ end do ! while ( outer_it_criterion >= outer_it_target .and. counter < cmax)
+
+ inisoln = .true.
+
+ ! ****************************************************************************************
+ ! END of Picard iteration
+ ! ****************************************************************************************
+ call t_stopf("PICARD_iter")
+
+ call t_startf("PICARD_post")
+ call ghost_postprocess( ewn, nsn, upn, uindx, uk_1, vk_1, &
+ ughost, vghost )
+
+!NOTE - I don't think uflx and vflx are needed; they are not used by the remapping subroutine.
+
+ do ns = 1+staggered_lhalo, size(umask,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(umask,1)-staggered_uhalo
+ ! calc. fluxes from converged vel. fields (needed for input to thickness evolution subroutine)
+ if (umask(ew,ns) > 0) then
+ uflx(ew,ns) = vertintg(upn, sigma, uvel(:,ew,ns)) * stagthck(ew,ns)
+ vflx(ew,ns) = vertintg(upn, sigma, vvel(:,ew,ns)) * stagthck(ew,ns)
+ end if
+ end do
+ end do
+
+ !JEFF: Coordinate halos
+ !JEFF: umask is marked as INOUT and is updated for the Ross Ice Shelf experiment, but for no other, so don't update halos
+ !JEFF: uvel, vvel, uflx, and vflx are calculated in this routine, but only for "owned" grid cells, so update halos to get neighboring values.
+
+ !call staggered_parallel_halo(uvel) (called earlier)
+ !call staggered_parallel_halo(vvel) (called earlier)
+
+!NOTE - Do we need halo updates for btraction and efvs?
+! I think we don't need an update for efvs, because it is already computed in a layer of halo cells.
+! And I think we don't need an update for btraction, because it is computed in bodyset for all
+! locally owned velocity points.
+
+ call parallel_halo(efvs)
+ call staggered_parallel_halo(btraction)
+
+ !NOTE - Pretty sure we don't need these updates; uflx and vflx are not used elsewhere.
+ call staggered_parallel_halo(uflx)
+ call staggered_parallel_halo(vflx)
+
+ if (GLC_DEBUG) then
+ !JEFF Debugging Output to see what differences in final vvel and tvel.
+ ! write(CurrTimeLoopStr, '(i3.3)') CurrTimeLoop
+ ! call distributed_print("uvel_post_ov"//CurrTimeLoopStr//"_tsk", uvel)
+ !
+ ! call distributed_print("vvel_post_ov"//CurrTimeLoopStr//"_tsk", vvel)
+ end if
+
+ ! JEFF: Deallocate myIndices which is used to intialize Trilinos
+ if (whatsparse == STANDALONE_TRILINOS_SOLVER) then
+ deallocate(myIndices)
+ deallocate(myX)
+ deallocate(myY)
+ deallocate(myZ)
+ endif
+
+ ! de-allocation sparse matrix solution variables
+ deallocate(tvel)
+ deallocate(uindx,corr,usav)
+ deallocate(pcgval,pcgrow,pcgcol,rhsd)
+ deallocate(matrix%row, matrix%col, matrix%val)
+ deallocate(answer)
+ deallocate(uk_1, vk_1, F, g_flag)
+
+ !JEFF Debugging output
+ overallloop = overallloop + 1
+ call t_stopf("PICARD_post")
+
+ return
+
+end subroutine glam_velo_solver
+
+!***********************************************************************
+
+!NOTE - Remove umask from argument list; it's the same as model%geometry%stagmask
+
+subroutine JFNK_velo_solver (model,umask)
+
+ use parallel
+ use glimmer_paramets, only: GLC_DEBUG
+
+ use iso_c_binding
+ use glide_types, only : glide_global_type
+
+ implicit none
+
+ type(glide_global_type) ,target, intent(inout) :: model
+
+ !NOTE - Can we make the mask intent in?
+
+ integer, dimension(:,:), intent(inout) :: umask !*SFP* replaces the prev., internally calc. mask
+ ! ... 'inout' status allows for a minor alteration
+ ! to cism defined mask, which don't necessarily
+ ! associate all/any boundaries as a unique mask value.
+
+ type(glide_global_type) ,pointer :: fptr=>NULL()
+ type(c_ptr) :: c_ptr_to_object
+
+ integer(c_int) :: xk_size
+ real(dp), dimension(:), allocatable :: xk_1
+ integer ,dimension(:) ,allocatable :: gx_flag
+
+! split off of derived types
+
+! intent(in)
+ integer :: ewn, nsn, upn
+ real(dp) :: dew, dns
+
+ real(dp), dimension(:) ,pointer :: sigma, stagsigma
+ real(dp), dimension(:,:) ,pointer :: thck, usrf, lsrf, topg
+ real(dp), dimension(:,:) ,pointer :: dthckdew, dthckdns
+ real(dp), dimension(:,:) ,pointer :: dusrfdew, dusrfdns
+ real(dp), dimension(:,:) ,pointer :: dlsrfdew, dlsrfdns
+ real(dp), dimension(:,:) ,pointer :: stagthck
+ real(dp), dimension(:,:,:) ,pointer :: flwa
+ real(dp), dimension(:,:,:) ,pointer :: btraction ! consistent basal traction array
+
+ real(dp), dimension(:,:) ,pointer :: beta ! basal traction coefficient, computed in calcbeta
+ real(dp) ,pointer :: beta_const ! spatially uniform beta (Pa yr/m)
+ real(dp), dimension(:,:) ,pointer :: mintauf ! basal yield stress used by calcbeta (if specified)
+ real(dp), dimension(:,:) ,pointer :: bwat ! basal water depth
+ type(glide_basal_physics) :: basal_physics ! basal physics object
+
+ integer :: whichbabc
+ integer :: whichefvs
+ integer :: whichresid
+ integer :: whichsparse
+ integer :: whichnonlinear
+
+! intent(out)
+ real(dp), dimension(:,:,:) ,pointer :: uvel, vvel
+ real(dp), dimension(:,:) ,pointer :: uflx, vflx
+ real(dp), dimension(:,:,:) ,pointer :: efvs
+
+ integer :: ew, ns, up, nele
+ real(dp), parameter :: NL_tol = 1.0d-6
+
+! currently needed to assess whether basal traction is updated after each nonlinear iteration
+! integer :: k
+!NOTE: "k" is not needed in order to calculate basal traction; note that new subroutine calls
+! at lines 1175 below pass in a dummy value for this variable. In the long run, we can likely remove
+! this argument altogether - it was originally passed in to aid in stabilization
+! of the ice shelf boundary conditions but may no longer be needed (grep for the variable "cc" within
+! the subroutine "bodyset" to see where it is currently used)
+
+ character(len=100) :: message
+
+!*SFP* needed to incorporate generic wrapper to solver
+ type(sparse_matrix_type) :: matrixA, matrixC, matrixtp, matrixAuv, matrixAvu
+ real(dp) :: L2norm
+
+ call t_startf("JFNK_pre")
+ ewn = model%general%ewn
+ nsn = model%general%nsn
+ upn = model%general%upn
+ dew = model%numerics%dew
+ dns = model%numerics%dns
+ sigma => model%numerics%sigma(:)
+ stagsigma => model%numerics%stagsigma(:)
+ thck => model%geometry%thck(:,:)
+ usrf => model%geometry%usrf(:,:)
+ lsrf => model%geometry%lsrf(:,:)
+ topg => model%geometry%topg(:,:)
+ dthckdew => model%geomderv%dthckdew(:,:)
+ dthckdns => model%geomderv%dthckdns(:,:)
+ dusrfdew => model%geomderv%dusrfdew(:,:)
+ dusrfdns => model%geomderv%dusrfdns(:,:)
+ dlsrfdew => model%geomderv%dlsrfdew(:,:)
+ dlsrfdns => model%geomderv%dlsrfdns(:,:)
+ stagthck => model%geomderv%stagthck(:,:)
+ flwa => model%temper%flwa(:,:,:)
+ btraction => model%velocity%btraction(:,:,:)
+ whichbabc = model%options%which_ho_babc
+ whichefvs = model%options%which_ho_efvs
+ whichresid = model%options%which_ho_resid
+ whichsparse = model%options%which_ho_sparse
+ whichnonlinear = model%options%which_ho_nonlinear
+
+ !Note: The beta passed into the solver is equal to model%velocity%beta
+ beta => model%velocity%beta(:,:)
+ beta_const => model%paramets%ho_beta_const
+ mintauf => model%basalproc%mintauf(:,:)
+ bwat => model%temper%bwat(:,:)
+ basal_physics = model%basal_physics
+
+ uvel => model%velocity%uvel(:,:,:)
+ vvel => model%velocity%vvel(:,:,:)
+ uflx => model%velocity%uflx(:,:)
+ vflx => model%velocity%vflx(:,:)
+ efvs => model%stress%efvs(:,:,:)
+
+ ! RN_20100125: assigning value for whatsparse, which is needed for putpcgc()
+!NOTE - Can we use just one variable for each of these options?
+ whatsparse = whichsparse
+ nonlinear = whichnonlinear
+
+!NOTE - Much of the following code is a copy of code above.
+! Can we get by with a single copy? I'm thinking of operations that are done once, before the iterations begin.
+!NOTE MJH: can we put these derivative calculations in the diagnostic solve part where the other derivatives are calculated?
+
+ ! *SFP* geometric 1st deriv. for generic input variable 'ipvr',
+ ! output as 'opvr' (includes 'upwinding' for boundary values)
+ call geom2ders(ewn, nsn, dew, dns, usrf, stagthck, d2usrfdew2, d2usrfdns2)
+ call geom2ders(ewn, nsn, dew, dns, thck, stagthck, d2thckdew2, d2thckdns2)
+
+ ! *SFP* geometric (2nd) cross-deriv. for generic input variable 'ipvr', output as 'opvr'
+ call geom2derscros(ewn, nsn, dew, dns, thck, stagthck, d2thckdewdns)
+ call geom2derscros(ewn, nsn, dew, dns, usrf, stagthck, d2usrfdewdns)
+
+ model%geomderv%d2thckdew2 = d2thckdew2
+ model%geomderv%d2thckdns2 = d2thckdns2
+ model%geomderv%d2usrfdew2 = d2usrfdew2
+ model%geomderv%d2usrfdns2 = d2usrfdns2
+
+ ! *SFP* make a 2d array identifying if the associated point has zero thickness,
+ ! has non-zero thickness and is interior, or has non-zero thickness
+ ! and is along a boundary
+
+ !*SFP* This subroutine has been altered from its original form (was a function, still included
+ ! below w/ subroutine but commented out) to allow for a tweak to the CISM calculated mask (adds
+ ! in an unique number for ANY arbitrary boundary, be it land, water, or simply at the edge of
+ ! the calculation domain).
+
+ allocate(uindx(ewn-1,nsn-1))
+
+ ! *SFP* if a point from the 2d array 'mask' is associated with non-zero ice thickness,
+ ! either a boundary or interior point, give it a unique number. If not, give it a zero
+ uindx = indxvelostr(ewn, nsn, upn, umask, pcgsize(1))
+
+ L2norm = 1.0d20
+
+ ! *SFP* an initial guess at the size of the sparse matrix
+ pcgsize(2) = pcgsize(1) * 20
+
+ ! Structure to become NOX implementation for JFNK solve
+ xk_size=2*pcgsize(1)
+
+!==============================================================================
+! RN_20100129: Option to load Trilinos matrix directly bypassing sparse_easy_solve
+!==============================================================================
+
+#ifdef TRILINOS
+ if (whatsparse == STANDALONE_TRILINOS_SOLVER) then
+ if (main_task) write(*,*) "Using GlobalIDs..."
+ ! JEFF: Define myIndices in terms of globalIDs
+ allocate(myIndices(pcgsize(1))) ! myIndices is an integer vector with a unique ID for each layer for ice grid points
+ allocate(myX(pcgsize(1))) ! Coordinates of nodes, used by ML preconditioner
+ allocate(myY(pcgsize(1)))
+ allocate(myZ(pcgsize(1)))
+ call distributed_create_partition(ewn, nsn, (upn + 2) , uindx, pcgsize(1), myIndices, myX, myY, myZ) ! Uses uindx mask to determine ice grid points.
+ mySize = pcgsize(1) ! Set variable for inittrilinos
+
+ if (GLC_DEBUG) then
+ write(*,*) "GlobalIDs myIndices..."
+ write(*,*) "pcgsize = ", pcgsize(1)
+ write(*,*) "myIndices = ", myIndices
+ !call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ call inittrilinos(25, mySize, myIndices, myX, myY, myZ, comm) !re: Why 25 not 20 for PIC? needed the mem space
+
+ ! Triad sparse matrix not used in this case, so save on memory
+ pcgsize(2) = 1
+
+ ! JEFF: deallocate myIndices after the solve loop, because used in translation between globalIDs and local indices
+ ! deallocate(myIndices)
+ endif
+#endif
+
+!NOTE This is the end of the block of code that is (mostly) cut and pasted from above.
+
+!==============================================================================
+! RN_20100126: End of the block
+!==============================================================================
+
+ allocate( xk_1(2*pcgsize(1)), gx_flag(2*pcgsize(1)) )
+
+ ! *SFP* allocate space matrix variables
+ allocate (pcgrow(pcgsize(2)),pcgcol(pcgsize(2)), rhsd(pcgsize(1)), rhsx(2*pcgsize(1)), &
+ pcgval(pcgsize(2)))
+ allocate(matrixA%row(pcgsize(2)), matrixA%col(pcgsize(2)), &
+ matrixA%val(pcgsize(2)))
+ allocate(matrixC%row(pcgsize(2)), matrixC%col(pcgsize(2)), &
+ matrixC%val(pcgsize(2)))
+ allocate(matrixtp%row(pcgsize(2)), matrixtp%col(pcgsize(2)), &
+ matrixtp%val(pcgsize(2)))
+
+ allocate(model%solver_data%ui(ewn-1,nsn-1) )
+ allocate(model%solver_data%um(ewn-1,nsn-1) )
+ allocate(model%solver_data%d2thckcross(ewn-1,nsn-1) )
+ allocate(model%solver_data%d2usrfcross(ewn-1,nsn-1) )
+ allocate(model%solver_data%gxf( 2*pcgsize(1) ) )
+
+ call assign_resid(model, uindx, umask, d2thckdewdns, d2usrfdewdns, &
+ pcgsize, gx_flag, matrixA, matrixC, L2norm, ewn, nsn)
+
+ fptr => model
+ c_ptr_to_object = c_loc(fptr)
+
+ call ghost_preprocess_jfnk( ewn, nsn, upn, uindx, ughost, vghost, &
+ xk_1, uvel, vvel, gx_flag, pcgsize(1)) ! jfl_20100430
+
+if (main_task) then
+ print *, ' '
+ print *, 'Running Payne/Price higher-order dynamics with JFNK solver'
+end if
+
+ call t_stopf("JFNK_pre")
+
+#ifdef TRILINOS
+
+!==============================================================================
+! Newton loop Using Trilinos NOX. Solves F(x) = 0 for x where x = [v, u] and
+! F = [Fv(u,v), Fu(u,v)]
+!==============================================================================
+
+ call t_startf("JFNK_noxinit")
+ call noxinit(xk_size, xk_1, comm, c_ptr_to_object)
+ call t_stopf("JFNK_noxinit")
+
+ call t_startf("JFNK_noxsolve")
+ call noxsolve(xk_size, xk_1, c_ptr_to_object)
+ call t_stopf("JFNK_noxsolve")
+
+ call t_startf("JFNK_noxfinish")
+ call noxfinish()
+ call t_stopf("JFNK_noxfinish")
+
+! k = 0
+
+#else
+
+!==============================================================================
+! SLAP JFNK loop: calculate F(u^k-1,v^k-1)
+!==============================================================================
+
+ call t_startf("JFNK_SLAP")
+ call slapsolve(xk_1, xk_size, c_ptr_to_object, NL_tol, pcgsize)
+ call t_stopf("JFNK_SLAP")
+
+! k = 1
+
+#endif
+
+ call t_startf("JFNK_post")
+
+! need to update these values from fptr%uvel,vvel,stagthck etc
+ call solver_postprocess_jfnk( ewn, nsn, upn, uindx, xk_1, vvel, uvel, ghostbvel, pcgsize(1) )
+ call ghost_postprocess_jfnk( ewn, nsn, upn, uindx, xk_1, ughost, vghost, pcgsize(1) )
+
+ ! call fraction of assembly routines, passing current vel estimates (w/o manifold
+ ! correction!) to calculate consistent basal tractions
+ !
+ ! *SFP* NOTE that if wanting to use basal tractions for the Newton method of converging on a
+ ! coulomb-friction basasl BC, must update basal tractions estimate at EACH nonlinear iteration.
+ ! In this case, the following two calls need to sit INSIDE of the do loop above. They are left
+ ! out here because the current implementation of NOX skips to the end of this do loop, in order
+ ! to skip JFs original implementation of JFNK (and jumping out of the do loop means these calls
+ ! are skipped if they are inside of the do loop).
+ !
+
+! KJE this is now outside the loop of both JFNK methods (and has been for while)
+! appears to be redundant, but leaving commented for a while in case an unknown issues pops up
+ call findcoefstr(ewn, nsn, upn, &
+ dew, dns, sigma, &
+ 2, efvs, &
+ vvel, uvel, &
+ thck, dusrfdns, &
+ dusrfdew, dthckdew, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdns, dthckdns, &
+ d2usrfdns2, d2thckdns2, &
+ d2usrfdewdns,d2thckdewdns, &
+ dlsrfdew, dlsrfdns, &
+ stagthck, whichbabc, &
+ uindx, umask, &
+ lsrf, topg, &
+ flwa, &
+ beta, &
+ beta_const, &
+ mintauf, &
+ bwat, &
+ basal_physics, &
+ btraction, &
+ 1 )
+
+ call findcoefstr(ewn, nsn, upn, &
+ dew, dns, sigma, &
+ 1, efvs, &
+ uvel, vvel, &
+ thck, dusrfdew, &
+ dusrfdew, dthckdew, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdns, dthckdns, &
+ d2usrfdns2, d2thckdns2, &
+ d2usrfdewdns,d2thckdewdns, &
+ dlsrfdew, dlsrfdns, &
+ stagthck, whichbabc, &
+ uindx, umask, &
+ lsrf, topg, &
+ flwa, &
+ beta, &
+ beta_const, &
+ mintauf, &
+ bwat, &
+ basal_physics, &
+ btraction, &
+ 1 )
+
+ inisoln = .true.
+
+ if (GLC_DEBUG) then
+ print*,"Solution vector norm after JFNK = " ,sqrt(DOT_PRODUCT(xk_1,xk_1))
+ end if
+
+!NOTE - The remaining code in this subroutine is cut and pasted from above.
+! Can we encapsulate this repeated code in a subroutine?
+
+! I don't think uflx and vflux are needed.
+
+ ! Locally owned velocity points
+ do ns = 1+staggered_lhalo, size(umask,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(umask,1)-staggered_uhalo
+ ! *SFP* calc. fluxes from converged vel. fields (for input to thickness evolution subroutine)
+ if (umask(ew,ns) > 0) then
+ uflx(ew,ns) = vertintg(upn, sigma, uvel(:,ew,ns)) * stagthck(ew,ns)
+ vflx(ew,ns) = vertintg(upn, sigma, vvel(:,ew,ns)) * stagthck(ew,ns)
+ end if
+ end do
+ end do
+
+ ! JEFF: Deallocate myIndices which is used to intialize Trilinos
+ if (whatsparse == STANDALONE_TRILINOS_SOLVER) then
+ deallocate(myIndices)
+ deallocate(myX)
+ deallocate(myY)
+ deallocate(myZ)
+ endif
+
+ ! *SFP* de-allocation of sparse matrix solution variables
+ deallocate(uindx)
+ deallocate(pcgval,pcgrow,pcgcol,rhsd, rhsx)
+ deallocate(matrixA%row, matrixA%col, matrixA%val)
+ deallocate(matrixC%row, matrixC%col, matrixC%val)
+ deallocate(matrixtp%row, matrixtp%col, matrixtp%val)
+ deallocate(gx_flag )
+ deallocate(model%solver_data%ui)
+ deallocate(model%solver_data%um)
+ deallocate(model%solver_data%d2thckcross)
+ deallocate(model%solver_data%d2usrfcross)
+ deallocate(model%solver_data%gxf)
+
+ !PW following are needed for glam_velo_fordsiapstr - putting here until can be convinced
+ ! that they are not needed (or that they should be delayed until later)
+ call staggered_parallel_halo(uvel)
+ call staggered_parallel_halo(vvel)
+
+!NOTE - Not sure we need halo updates for efvs, btraction, uflx, vflx
+! I think we do not need an update for efvs, because it is already computed in a layer of halo cells.
+! And I think we don't need an update for btraction, because it is computed in bodyset for all
+! locally owned velocity points.
+
+ call parallel_halo(efvs)
+ call staggered_parallel_halo(btraction)
+ call staggered_parallel_halo(uflx)
+ call staggered_parallel_halo(vflx)
+
+ call t_stopf("JFNK_post")
+
+ return
+
+end subroutine JFNK_velo_solver
+
+!***********************************************************************
+
+function indxvelostr(ewn, nsn, upn, &
+ mask, pointno)
+
+ !if a point from the 2d array 'mask' is associated with non-zero ice thickness,
+ ! (either a boundary or interior point) give it a unique number. If not, give it a zero.
+
+ use parallel
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ integer, intent(in), dimension(:,:) :: mask
+ integer, intent(out) :: pointno
+
+ integer :: ew, ns
+ integer, dimension(size(mask,1),size(mask,2)) :: indxvelostr
+
+ pointno = 1
+
+ do ns = 1+staggered_lhalo, size(mask,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(mask,1)-staggered_uhalo
+ if ( GLIDE_HAS_ICE( mask(ew,ns) ) ) then
+ indxvelostr(ew,ns) = pointno
+ pointno = pointno + 1
+ else
+ indxvelostr(ew,ns) = 0
+ end if
+ end do
+ end do
+
+ ! add two ghost points at upper and lower boundaries (needed for sfc and basal bcs)
+ pointno = (pointno - 1) * (upn + 2)
+
+ return
+
+end function indxvelostr
+
+!***********************************************************************
+
+subroutine findefvsstr(ewn, nsn, upn, &
+ stagsigma, counter, &
+ whichefvs, efvs, &
+ uvel, vvel, &
+ flwa, thck, &
+ dusrfdew, dthckdew, &
+ dusrfdns, dthckdns, &
+ mask)
+
+ ! calculate the effective viscosity
+
+ use parallel
+ use glimmer_paramets, only: GLC_DEBUG
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ real(dp), intent(in), dimension(:) :: stagsigma
+ real(dp), intent(in), dimension(:,:,:) :: uvel, vvel, flwa
+ real(dp), intent(inout), dimension(:,:,:) :: efvs
+ real(dp), intent(in), dimension(:,:) :: thck, dthckdew, dusrfdew, &
+ dusrfdns, dthckdns
+ integer, intent(in), dimension(:,:) :: mask
+ integer, intent(in) :: whichefvs, counter
+
+ integer :: ew, ns, up
+
+ real(dp), dimension(size(efvs,1)) :: effstr, ugradup, vgradup, &
+ ugradew, ugradns, vgradew, vgradns
+
+ integer, dimension(2) :: mew, mns
+
+ ! This is the factor 1/4(X0/H0)^2 in front of the term ((dv/dz)^2+(du/dz)^2)
+ real(dp), parameter :: f1 = 0.25d0 * (len0 / thk0)**2
+
+ if (counter == 1) then
+
+ ! effstrminsq = (1.0d-20 * tim0)**2
+
+ if (GLC_DEBUG) then
+
+ ! if (main_task) then
+ ! print *, 'nsn=', nsn
+ ! print *, 'ewn=', ewn
+ ! print *, 'uvel shape =', shape(uvel)
+ ! print *, 'vvel shape =', shape(vvel)
+ ! print *, 'thck shape =', shape(thck)
+ ! print *, 'efvs shape =', shape(efvs)
+ ! print *, 'flwafact shape =', shape(flwafact)
+ ! endif
+
+ end if
+
+!NOTE - Can remove the 'if' becuase Glam required temp and flwa on staggered vertical grid.
+
+ if (size(flwa,1)==upn-1) then ! temperature and flwa live on staggered vertical grid
+
+ !Note: To avoid parallel halo calls for efvs within glam_strs2, we need to compute efvs in one layer of halo cells
+ ! surrounding the locally owned velocity cells.
+
+ do ns = 2,nsn-1
+ do ew = 2,ewn-1
+ if (thck(ew,ns) > 0.d0) then
+ ! This is the rate factor term in the expression for the eff. visc: 1/2*A^(-1/n).
+ ! If both temperature and eff. visc. live on a staggered grid in the vertical, then
+ ! no vertical averaging is needed.
+ flwafact(1:upn-1,ew,ns) = 0.5d0 * flwa(1:upn-1,ew,ns)**p1
+ end if
+
+ end do
+ end do
+
+ else ! size(flwa,1)=upn; temperature and flwa live on unstaggered vertical grid
+
+ do ns = 2,nsn-1
+ do ew = 2,ewn-1
+ if (thck(ew,ns) > 0.d0) then
+ ! this is the rate factor term in the expression for the eff. visc: 1/2*A^(-1/n),
+ ! which is averaged to midpoints in the vertical (i.e. it lives on a staggered
+ ! grid in the vertical, which is the case for "efvs" as well).
+ forall (up = 1:upn-1) flwafact(up,ew,ns) = 0.5d0 * (sum(flwa(up:up+1,ew,ns)) / 2.d0)**p1
+ end if
+ end do
+ end do
+
+ end if ! present(flwa_vstag)
+ endif ! counter
+
+ select case(whichefvs)
+
+ case(HO_EFVS_CONSTANT) ! set the eff visc to a constant value
+
+ do ns = 2,nsn-1
+ do ew = 2,ewn-1
+ if (thck(ew,ns) > 0.d0) then
+ ! Steve recommends 10^6 to 10^7 Pa yr
+ ! ISMIP-HOM Test F requires 2336041.42829 Pa yr, so use this as the typical value
+ efvs(1:upn-1,ew,ns) = 2336041.42829d0 * scyr/tim0 / tau0 ! tau0 = rhoi*grav*thk0
+ else
+ efvs(:,ew,ns) = effstrminsq ! if the point is associated w/ no ice, set to min value
+ endif
+ enddo
+ enddo
+
+ case(HO_EFVS_FLOWFACT) ! set the eff visc to a value based on the flow rate factor
+
+! *SFP* changed default setting for linear viscosity so that the value of the rate
+! factor is taken into account
+
+ do ns = 2,nsn-1
+ do ew = 2,ewn-1
+ if (thck(ew,ns) > 0.d0) then
+! KJE code used to have this
+! efvs(1:upn-1,ew,ns) = 0.5d0 * flwa(1:upn-1,ew,ns)**(-1.d0)
+ efvs(1:upn-1,ew,ns) = flwafact(1:upn-1,ew,ns)
+ else
+ efvs(:,ew,ns) = effstrminsq ! if the point is associated w/ no ice, set to min value
+ end if
+ end do
+ end do
+
+ case(HO_EFVS_NONLINEAR) ! calculate eff. visc. using eff. strain rate
+
+!Note - This code may not work correctly if nhalo = 1.
+! In that case we would need a halo update of efvs to make sure we have the correct value
+! in all neighbors of locally owned velocity cells.
+
+ do ns = 2,nsn-1
+ do ew = 2,ewn-1
+ if (thck(ew,ns) > 0.d0) then
+ ! The hsum() is on the unstaggered grid picking up the four points.
+ ! Then there is a derivative in the vertical direction.
+ ugradup = vertideriv(upn, hsum(uvel(:,ew-1:ew,ns-1:ns)), thck(ew,ns))
+ vgradup = vertideriv(upn, hsum(vvel(:,ew-1:ew,ns-1:ns)), thck(ew,ns))
+
+ ugradew = horizderiv(upn, stagsigma, &
+ sum(uvel(:,ew-1:ew,ns-1:ns),3), &
+ dew4, ugradup, &
+ sum(dusrfdew(ew-1:ew,ns-1:ns)), &
+ sum(dthckdew(ew-1:ew,ns-1:ns)))
+
+ vgradew = horizderiv(upn, stagsigma, &
+ sum(vvel(:,ew-1:ew,ns-1:ns),3), &
+ dew4, vgradup, &
+ sum(dusrfdew(ew-1:ew,ns-1:ns)), &
+ sum(dthckdew(ew-1:ew,ns-1:ns)))
+
+ ugradns = horizderiv(upn, stagsigma, &
+ sum(uvel(:,ew-1:ew,ns-1:ns),2), &
+ dns4, ugradup, &
+ sum(dusrfdns(ew-1:ew,ns-1:ns)), &
+ sum(dthckdns(ew-1:ew,ns-1:ns)))
+
+ vgradns = horizderiv(upn, stagsigma, &
+ sum(vvel(:,ew-1:ew,ns-1:ns),2), &
+ dns4, vgradup, &
+ sum(dusrfdns(ew-1:ew,ns-1:ns)), &
+ sum(dthckdns(ew-1:ew,ns-1:ns)))
+
+ ! "effstr" = eff. strain rate squared
+ effstr = ugradew**2 + vgradns**2 + ugradew*vgradns + &
+ 0.25d0 * (vgradew + ugradns)**2 + &
+! f1 * (ugradup**2 + vgradup**2) ! make line ACTIVE for "capping" version (see note below)
+ f1 * (ugradup**2 + vgradup**2) + effstrminsq ! make line ACTIVE for new version
+
+ ! -----------------------------------------------------------------------------------
+ ! NOTES on capping vs. non-capping version of eff. strain rate calc.
+ ! -----------------------------------------------------------------------------------
+ !
+ ! Set eff. strain rate (squared) to some min value where it falls below some
+ ! threshold value, 'effstrminsq'. Commented out the old version below, which "caps"
+ ! the min eff strain rate (and thus the max eff visc) in favor of a version that
+ ! leads to a "smooth" description of eff strain rate (and eff visc). The change for
+ ! new version is that the value of 'effstrminsq' simply gets added in with the others
+ ! (e.g. how it is done in the Pattyn model). The issues w/ the capping approach are
+ ! discussed (w.r.t. sea ice model) in: Lemieux and Tremblay, JGR, VOL. 114, C05009,
+ ! doi:10.1029/2008JC005017, 2009). Long term, the capping version should probably be
+ ! available as a config file option or possibly removed altogether.
+
+ ! Old "capping" scheme ! these lines must be active to use the "capping" scheme for the efvs calc
+! where (effstr < effstrminsq)
+! effstr = effstrminsq
+! end where
+
+ ! Note that the vert dims are explicit here, since glide_types defines this
+ ! field as having dims 1:upn. This is something that we'll have to decide on long-term;
+ ! should efvs live at cell centroids in the vert (as is assumed in this code)
+ ! or should we be doing some one-sided diffs at the sfc/bed boundaries so that it has vert dims
+ ! of upn? For now, we populate ONLY the first 1:upn-1 values of the efvs vector and leave the one
+ ! at upn empty (the Pattyn/Bocek/Johnson core would fill all values, 1:upn).
+
+ ! NOTE also that efvs lives on the non-staggered grid in the horizontal. That is, in all of the
+ ! discretizations conducted below, efvs is explicitly averaged from the normal horiz grid onto the
+ ! staggered horiz grid (Thus, in the calculations, efvs is treated as if it lived on the staggered
+ ! horiz grid, even though it does not).
+
+ ! Below, p2=(1-n)/2n. The 1/2 is from taking the sqr root of the squared eff. strain rate
+ efvs(1:upn-1,ew,ns) = flwafact(1:upn-1,ew,ns) * effstr**p2 + homotopy
+! efvs(:,ew,ns) = flwafact(:,ew,ns) * effstr**p2
+
+ else
+ efvs(:,ew,ns) = effstrminsq ! if the point is associated w/ no ice, set to min value
+ end if
+
+ end do ! end ew
+ end do ! end ns
+
+ end select
+
+ return
+end subroutine findefvsstr
+
+!***********************************************************************
+
+function vertideriv(upn, varb, thck)
+
+ implicit none
+
+ integer, intent(in) :: upn
+ real(dp), intent(in), dimension(:) :: varb
+ real(dp), intent(in) :: thck
+
+ real(dp), dimension(size(varb)-1) :: vertideriv
+ !'dupm' is defined as -1/(2*del_sigma), in which case it seems like
+ ! there should be a '-' in front of this expression ... but note that
+ ! the negative sign is implicit in the fact that the vertical index
+ ! increases moving downward in the ice column (up=1 is the sfc,
+ ! up=upn is the bed).
+
+ integer :: k
+
+!WHL - Rewriting to get code to run on Mac with array bounds checking
+!! vertideriv(1:upn-1) = dupm * (varb(2:upn) - varb(1:upn-1)) / thck
+
+ do k = 1, upn-1
+ vertideriv(k) = dupm(k) * (varb(k+1) - varb(k)) / thck
+ enddo
+
+ return
+
+end function vertideriv
+
+!***********************************************************************
+
+function horizderiv(upn, stagsigma, &
+ varb, grid, &
+ dvarbdz, dusrfdx, dthckdx)
+
+ implicit none
+
+ integer, intent(in) :: upn
+ real(dp), dimension(:), intent(in) :: stagsigma
+ real(dp), dimension(:,:), intent(in) :: varb
+ real(dp), dimension(:), intent(in) :: dvarbdz
+ real(dp), intent(in) :: dusrfdx, dthckdx, grid
+
+ real(dp) :: horizderiv(size(varb,1)-1)
+
+ horizderiv = (varb(1:upn-1,2) + varb(2:upn,2) - varb(1:upn-1,1) - varb(2:upn,1)) / grid - &
+ dvarbdz * (dusrfdx - stagsigma * dthckdx) / 4.d0
+
+ return
+
+end function horizderiv
+
+!***********************************************************************
+
+function getlocrange(upn, indx)
+
+ implicit none
+
+ integer, intent(in) :: upn
+ integer, intent(in) :: indx
+ integer, dimension(2) :: getlocrange
+
+ getlocrange = (indx - 1) * (upn + 2) + 1 + (/ 1, upn /)
+
+ return
+
+end function getlocrange
+
+!***********************************************************************
+
+!! WHL - Testing whether this function will work for single-processor parallel runs
+!! with solvers other than trilinos
+
+function getlocationarray(ewn, nsn, upn, mask, indxmask, return_global_IDs)
+!function getlocationarray(ewn, nsn, upn, mask, indxmask)
+
+ use parallel
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ integer, dimension(:,:), intent(in) :: mask
+ integer, dimension(:,:), intent(in) :: indxmask
+ logical, intent(in), optional :: return_global_IDs
+
+ integer, dimension(ewn,nsn,2) :: getlocationarray
+
+ logical :: return_globalIDs ! set to return_global_IDs, if present
+
+ integer :: ew, ns
+ integer, dimension(ewn,nsn) :: temparray
+ integer :: cumsum
+
+ if (present(return_global_IDs)) then
+ if (return_global_IDs) then
+ return_globalIDs = .true.
+ else
+ return_globalIDs = .false.
+ endif
+ else
+ return_globalIDs = .true.
+ endif
+
+!NOTE - Make this if which_ho_sparse = 4 instead (or ifdef Trilinos?)
+#ifdef globalIDs
+ ! Returns in (:,:,1) the global ID bases for each grid point, including
+ ! halos and those without ice.
+ ! Since the code checks elsewhere whether ice occurs at a given grid point,
+ ! this information is not encoded here. For the local indices (see below)
+ ! the mask information is used since ice-free grid points are not indexed
+ ! locally
+
+!WHL - debug
+! print*, 'In getlocationarray, ifdef globalIDs'
+! print*, 'return_globalIDs =', return_globalIDs
+
+!LOOP NOTE - Not sure if these loops are correct.
+! Is the input mask on the scalar (ice) grid?
+!SFP: Need to check indices here - getlocationarray should exist on the velocity grid, not the thickness (scalar) grid
+
+!WHL - added this conditional
+
+ if (return_globalIDs) then
+
+ do ns = 1,nsn
+ do ew = 1,ewn
+ getlocationarray(ew,ns,1) = parallel_globalID(ns, ew, upn + 2) ! Extra two layers for ghost layers
+ end do
+ end do
+
+ ! Returns in (:,:,2) the local index base for each ice grid point
+ ! (same indices as those used in myIndices)
+ ! indxmask is ice mask with non-zero values for cells with ice.
+ ! If a point (ew,ns) doesn't have ice, then value is set to 0.
+ ! If a point (ew,ns) is in the halo, value is also set to 0.
+ ! upn+2 is the total number of vertical layers including any ghosts
+ ! (logic modelled after distributed_create_partition)
+
+ ! initialize to zero (in order to set halo and ice-free cells to zero)
+
+ getlocationarray(:,:,2) = 0
+
+ ! Step through indxmask, but exclude halo
+
+ do ns = 1+staggered_lhalo, size(indxmask,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(indxmask,1)-staggered_uhalo
+ if ( indxmask(ew,ns) /= 0 ) then
+ getlocationarray(ew,ns,2) = (indxmask(ew,ns) - 1) * (upn+2) + 1
+ endif
+ end do
+ end do
+
+!NOTE - Clean this up, so we always use this procedure when solving without Trilinos.
+
+ else ! use the procedure below under #else
+
+ ! initialize to zero
+ cumsum = 0
+ temparray = 0
+ getlocationarray = 0
+
+ do ns=1+staggered_lhalo, size(mask,2)-staggered_uhalo
+ do ew=1+staggered_lhalo, size(mask,1)-staggered_uhalo
+ if ( GLIDE_HAS_ICE( mask(ew,ns) ) ) then
+ cumsum = cumsum + ( upn + 2 )
+ getlocationarray(ew,ns,1) = cumsum
+ temparray(ew,ns) = upn + 2
+ else
+ getlocationarray(ew,ns,1) = 0
+ temparray(ew,ns) = 1
+ end if
+ end do
+ end do
+
+ getlocationarray(:,:,1) = ( getlocationarray(:,:,1) + 1 ) - temparray(:,:)
+ getlocationarray(:,:,2) = getlocationarray(:,:,1)
+
+ endif ! return_globalIDs
+
+#else
+
+ ! initialize to zero
+ cumsum = 0
+ temparray = 0
+ getlocationarray = 0
+
+ do ns=1+staggered_lhalo, size(mask,2)-staggered_uhalo
+ do ew=1+staggered_lhalo, size(mask,1)-staggered_uhalo
+ if ( GLIDE_HAS_ICE( mask(ew,ns) ) ) then
+ cumsum = cumsum + ( upn + 2 )
+ getlocationarray(ew,ns,1) = cumsum
+ temparray(ew,ns) = upn + 2
+ else
+ getlocationarray(ew,ns,1) = 0
+ temparray(ew,ns) = 1
+ end if
+ end do
+ end do
+
+ getlocationarray(:,:,1) = ( getlocationarray(:,:,1) + 1 ) - temparray(:,:)
+ getlocationarray(:,:,2) = getlocationarray(:,:,1)
+
+#endif
+
+ return
+
+end function getlocationarray
+
+!***********************************************************************
+!NOTE - Remove function slapsolvstr? I think it's no longer used.
+
+function slapsolvstr(ewn, nsn, upn, &
+ vel, uindx, its, answer )
+
+! *sp* routine to solve Ax=b sparse matrix problem
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ real(dp), dimension(:,:,:), intent(in) :: vel
+ integer, dimension(:,:), intent(in) :: uindx
+
+ real(dp), dimension(:), intent(out) :: answer
+
+ real(dp), dimension(size(vel,1),size(vel,2),size(vel,3)) :: slapsolvstr
+ integer, intent(inout) :: its
+
+ integer :: ew, ns
+
+ real(dp), dimension(:), allocatable :: rwork
+ integer, dimension(:), allocatable :: iwork
+
+ real(dp), parameter :: tol = 1.0d-12
+ real(dp) :: err
+
+ integer, parameter :: isym = 0, itol = 2, itmax = 100
+ integer, dimension(2) :: loc
+ integer :: iter, ierr, mxnelt
+
+! ** move to values subr
+
+ pcgsize(2) = ct_nonzero - 1
+
+ call ds2y(pcgsize(1),pcgsize(2),pcgrow,pcgcol,pcgval,isym)
+
+!** plot the matrix to check that it has the correct form
+!call dcpplt(pcgsize(1),pcgsize(2),pcgrow,pcgcol,pcgval,isym,ulog)
+
+ mxnelt = 60 * pcgsize(1); allocate(rwork(mxnelt),iwork(mxnelt))
+
+!** solve the problem using the SLAP package routines
+!** -------------------------------------------------
+!** n ... order of matrix a (in)
+!** b ... right hand side vector (in)
+!** x ... initial quess/final solution vector (in/out)
+!** nelt ... number of non-zeroes in A (in)
+!** ia, ja ... sparse matrix format of A (in)
+!** a ... matrix held in SLAT column format (in)
+!** isym ... storage method (0 is complete) (in)
+!** itol ... convergence criteria (2 recommended) (in)
+!** tol ... criteria for convergence (in)
+!** itmax ... maximum number of iterations (in)
+!** iter ... returned number of iterations (out)
+!** err ... error estimate of solution (out)
+!** ierr ... returned error message (0 is ok) (out)
+!** iunit ... unit for error writes during iteration (0 no write) (in)
+!** rwork ... workspace for SLAP routines (in)
+!** mxnelt ... maximum array and vector sizes (in)
+!** iwork ... workspace for SLAP routines (in)
+
+! *sp* initial estimate for vel. field?
+ do ns = 1,nsn-1
+ do ew = 1,ewn-1
+ if (uindx(ew,ns) /= 0) then
+ loc = getlocrange(upn, uindx(ew,ns))
+ answer(loc(1):loc(2)) = vel(:,ew,ns)
+ answer(loc(1)-1) = vel(1,ew,ns)
+ answer(loc(2)+1) = vel(upn,ew,ns)
+ end if
+ end do
+ end do
+
+ call dslucs(pcgsize(1),rhsd,answer,pcgsize(2),pcgrow,pcgcol,pcgval, &
+ isym,itol,tol,itmax,iter,err,ierr,0,rwork,mxnelt,iwork,mxnelt)
+
+ if (ierr /= 0) then
+ print *, 'pcg error ', ierr, itmax, iter, tol, err
+ ! stop
+ end if
+
+ deallocate(rwork,iwork)
+
+ do ns = 1,nsn-1
+ do ew = 1,ewn-1
+ if (uindx(ew,ns) /= 0) then
+ loc = getlocrange(upn, uindx(ew,ns))
+ slapsolvstr(:,ew,ns) = answer(loc(1):loc(2))
+ else
+ slapsolvstr(:,ew,ns) = 0.d0
+ end if
+ end do
+ end do
+
+ its = its + iter
+
+ return
+
+end function slapsolvstr
+
+! *****************************************************************************
+
+subroutine solver_preprocess( ewn, nsn, upn, uindx, matrix, answer, vel )
+
+ ! Puts sparse matrix variables in SLAP triad format into "matrix" derived type,
+ ! so that it can be passed to the generic solver wrapper, "sparse_easy_solve".
+ ! Takes place of the old, explicit solver interface to SLAP linear solver.
+ use parallel
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ real(dp), dimension(:,:,:), intent(in) :: vel
+ integer, dimension(:,:), intent(in) :: uindx
+ type(sparse_matrix_type), intent(inout) :: matrix
+ real(dp), dimension(:), intent(out) :: answer
+
+ integer :: ew, ns
+ integer, dimension(2) :: loc
+
+ pcgsize(2) = ct_nonzero - 1
+
+ matrix%order = pcgsize(1)
+ matrix%nonzeros = pcgsize(2)
+ matrix%symmetric = .false.
+
+ matrix%row = pcgrow
+ matrix%col = pcgcol
+ matrix%val = pcgval
+
+ ! Initial estimate for vel. field; take from 3d array and put into
+ ! the format of a solution vector.
+
+ do ns = 1+staggered_lhalo, size(uindx,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(uindx,1)-staggered_uhalo
+ if (uindx(ew,ns) /= 0) then
+ loc = getlocrange(upn, uindx(ew,ns))
+ answer(loc(1):loc(2)) = vel(:,ew,ns)
+ answer(loc(1)-1) = vel(1,ew,ns)
+ answer(loc(2)+1) = vel(upn,ew,ns)
+
+ !JEFF Verifying Trilinos Input
+ ! write(*,*) "Initial answer at (", ew, ", ", ns, ") = ", answer(loc(1)-1:loc(2)+1)
+ end if
+ end do
+ end do
+
+end subroutine solver_preprocess
+
+!***********************************************************************
+
+subroutine solver_postprocess( ewn, nsn, upn, pt, uindx, answrapped, ansunwrapped, ghostbvel )
+
+ ! Unwrap the vels from the solution vector and place into a 3d array.
+ use parallel
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn, pt
+ integer, dimension(:,:), intent(in) :: uindx
+ real(dp), dimension(:), intent(in) :: answrapped
+ real(dp), dimension(upn,ewn-1,nsn-1), intent(out) :: ansunwrapped
+ real(dp), dimension(:,:,:,:), intent(inout) :: ghostbvel
+
+ integer, dimension(2) :: loc
+ integer :: ew, ns
+
+ do ns = 1+staggered_lhalo, size(uindx,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(uindx,1)-staggered_uhalo
+ if (uindx(ew,ns) /= 0) then
+ loc = getlocrange(upn, uindx(ew,ns))
+ ansunwrapped(:,ew,ns) = answrapped(loc(1):loc(2))
+ !! save the fictitious basal velocities for basal traction calculation !!
+ ghostbvel(pt,:,ew,ns) = answrapped( loc(2)-1:loc(2)+1 )
+ else
+ ansunwrapped(:,ew,ns) = 0.d0
+ end if
+ end do
+ end do
+
+end subroutine solver_postprocess
+
+!***********************************************************************
+
+subroutine solver_postprocess_jfnk( ewn, nsn, upn, uindx, answrapped, ansunwrappedv, &
+ ansunwrappedu, ghostbvel, pcg1 )
+
+ ! Unwrap the vels from the solution vector and place into a 3d array.
+ use parallel
+
+ implicit none
+
+ integer :: pcg1
+ integer, intent(in) :: ewn, nsn, upn
+ integer, dimension(:,:), intent(in) :: uindx
+ real(dp), dimension(:), intent(in) :: answrapped
+ real(dp), dimension(upn,ewn-1,nsn-1), intent(out) :: ansunwrappedv, ansunwrappedu
+ real(dp), dimension(:,:,:,:), intent(inout) :: ghostbvel
+
+ integer, dimension(2) :: loc
+ integer :: ew, ns
+
+ do ns = 1+staggered_lhalo, size(uindx,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(uindx,1)-staggered_uhalo
+ if (uindx(ew,ns) /= 0) then
+ loc = getlocrange(upn, uindx(ew,ns))
+ ansunwrappedv(:,ew,ns) = answrapped(loc(1):loc(2))
+ ansunwrappedu(:,ew,ns) = answrapped(pcg1+loc(1):pcg1+loc(2))
+ !! save the fictitious basal velocities for basal traction calculation !!
+ ghostbvel(2,:,ew,ns) = answrapped( loc(2)-1:loc(2)+1 )
+ ghostbvel(1,:,ew,ns) = answrapped( pcg1+loc(2)-1:pcg1+loc(2)+1 )
+ else
+ ansunwrappedv(:,ew,ns) = 0.d0
+ ansunwrappedu(:,ew,ns) = 0.d0
+ end if
+ end do
+ end do
+
+end subroutine solver_postprocess_jfnk
+
+!***********************************************************************
+
+subroutine resvect_postprocess_jfnk( ewn, nsn, upn, uindx, pcg1, answrapped, ansunwrappedv, &
+ ansunwrappedu, ansunwrappedmag )
+! Unwrap the jfnk residual vector from the solution vector and place into a 3d array.
+ use parallel
+
+ implicit none
+
+ integer :: pcg1
+ integer, intent(in) :: ewn, nsn, upn
+ integer, dimension(:,:), intent(in) :: uindx
+ real(dp), dimension(:), intent(in) :: answrapped
+ real(dp), dimension(upn,ewn-1,nsn-1), intent(out), optional :: ansunwrappedv, ansunwrappedu, ansunwrappedmag
+
+ integer, dimension(2) :: loc
+ integer :: ew, ns
+
+ do ns = 1+staggered_lhalo, size(uindx,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(uindx,1)-staggered_uhalo
+ if (uindx(ew,ns) /= 0) then
+ loc = getlocrange(upn, uindx(ew,ns))
+ ansunwrappedv(:,ew,ns) = answrapped(loc(1):loc(2))
+ ansunwrappedu(:,ew,ns) = answrapped(pcg1+loc(1):pcg1+loc(2))
+ else
+ ansunwrappedv(:,ew,ns) = 0.d0
+ ansunwrappedu(:,ew,ns) = 0.d0
+ end if
+ end do
+ end do
+
+ ansunwrappedmag = dsqrt( ansunwrappedu**2.d0 + ansunwrappedv**2.d0 )
+
+end subroutine resvect_postprocess_jfnk
+
+!***********************************************************************
+
+subroutine form_matrix( matrix ) ! for JFNK solver
+
+ ! Puts sparse matrix variables in SLAP triad format into "matrix"
+ ! derived type. Similar to solver_preprocess but does not form answer vector
+
+ implicit none
+
+! integer, intent(in) :: ewn, nsn, upn
+ type(sparse_matrix_type), intent(inout) :: matrix
+
+ pcgsize(2) = ct_nonzero - 1
+
+ matrix%order = pcgsize(1)
+ matrix%nonzeros = pcgsize(2)
+ matrix%symmetric = .false.
+
+ matrix%row = pcgrow
+ matrix%col = pcgcol
+ matrix%val = pcgval
+
+end subroutine form_matrix
+
+!***********************************************************************
+
+subroutine forcing_term ( k, L2normk_1, gamma_l )
+
+ ! Calculates the forcing term (i.e. the factor that multiplies the initial
+ ! L2 norm to determine the tolerance for the linear solve in the JFNK solver)
+ ! at iteration k given the L2norm at k-1 and k-2.
+ ! jfl, 10 Sept 2010
+
+ ! See eq 2.6 in S.C. Eisenstat, H.F. Walker, Choosing the forcing terms in
+ ! an inexact Newton method, SIAM J. Sci. Comput. 17 (1996) 16-32.
+
+ implicit none
+
+ integer, intent(in) :: k
+ real(dp), intent(in) :: L2normk_1 ! L2 norm at k-1
+ real(dp), intent(out):: gamma_l
+ real(dp) :: gamma_ini, gamma_min, expo
+ real(dp), save :: L2normk_2 ! L2 norm at k-2
+
+ gamma_ini = 0.9d0
+ gamma_min = 0.01d0
+ expo = 2.d0
+
+ if (k == 1) then
+ gamma_l = gamma_ini
+ else
+ gamma_l = (L2normk_1 / L2normk_2)**expo
+ endif
+
+ if (gamma_l > gamma_ini) gamma_l = gamma_ini
+ if (gamma_l < gamma_min) gamma_l = gamma_min
+
+ L2normk_2 = L2normk_1
+
+end subroutine forcing_term
+
+!***********************************************************************
+
+subroutine apply_precond( matrixA, matrixC, nu1, nu2, wk1, wk2, whichsparse )
+
+ ! Apply preconditioner operator for JFNK solver: wk2 = P^-1 *wk1
+ ! The preconditioner operator is in fact taken from the Picard solver
+ ! There is a splitting of the v (A matrix) and u (C matrix) equations
+ ! Each component is solved to a loose tolerance (as opposed to Picard)
+
+ implicit none
+
+ integer, intent(in) :: nu1, nu2, whichsparse
+ integer :: iter
+ type(sparse_matrix_type), intent(in) :: matrixA, matrixC
+ real(dp), dimension(nu2), intent(in) :: wk1
+ real(dp), dimension(nu2), intent(out):: wk2
+ real(dp), dimension(nu1) :: answer, vectp
+ real(dp) :: err
+
+! precondition v component
+
+ answer = 0.d0 ! initial guess
+ vectp(:) = wk1(1:nu1) ! rhs for precond v
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+ call sparse_easy_solve(matrixA, vectp, answer, err, iter, whichsparse, nonlinear_solver = nonlinear)
+#ifdef TRILINOS
+ else
+ call restoretrilinosmatrix(0);
+ call solvewithtrilinos(vectp, answer, linearSolveTime)
+ totalLinearSolveTime = totalLinearSolveTime + linearSolveTime
+ write(*,*) 'Total linear solve time so far', totalLinearSolveTime
+#endif
+ endif
+ wk2(1:nu1) = answer(:)
+
+! precondition u component
+
+ answer = 0.d0 ! initial guess
+ vectp(:) = wk1(nu1+1:nu2) ! rhs for precond u
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+ call sparse_easy_solve(matrixC, vectp, answer, err, iter, whichsparse, nonlinear_solver = nonlinear)
+#ifdef TRILINOS
+ else
+ call restoretrilinosmatrix(1);
+ call solvewithtrilinos(vectp, answer, linearSolveTime)
+ totalLinearSolveTime = totalLinearSolveTime + linearSolveTime
+ write(*,*) 'Total linear solve time so far', totalLinearSolveTime
+#endif
+ endif
+ wk2(nu1+1:nu2) = answer(:)
+
+end subroutine apply_precond
+
+!***********************************************************************
+
+subroutine apply_precond_nox( wk2_nox, wk1_nox, xk_size, c_ptr_to_object ) bind(C, name='apply_precond_nox')
+
+ ! Apply preconditioner operator for JFNK solver through NOX: wk2 = P^-1 *wk1
+ ! The preconditioner operator is in fact taken from the Picard solver
+ ! There is a splitting of the v (A matrix) and u (C matrix) equations
+ ! Each component is solved to a loose tolerance (as opposed to Picard)
+
+ implicit none
+
+! variables coming through from NOX
+ integer(c_int) ,intent(in) ,value :: xk_size
+ real (c_double) ,intent(in) :: wk1_nox(xk_size)
+ real (c_double) ,intent(out) :: wk2_nox(xk_size)
+ type(glide_global_type) ,pointer :: fptr=>NULL()
+ type(c_ptr) ,intent(inout) :: c_ptr_to_object
+
+ integer :: nu1, nu2, whichsparse
+ integer :: iter
+ type(sparse_matrix_type) :: matrixA, matrixC
+ real(dp), dimension(xk_size) :: wk1
+ real(dp), dimension(xk_size) :: wk2
+ real(dp), allocatable, dimension(:) :: answer, vectp
+ real(dp) :: err
+
+ call c_f_pointer(c_ptr_to_object,fptr) ! convert C ptr to F ptr= model
+
+ matrixA = fptr%solver_data%matrixA
+ matrixC = fptr%solver_data%matrixC
+ whichsparse = fptr%options%which_ho_sparse
+ pcgsize = fptr%solver_data%pcgsize
+
+ nu1 = pcgsize(1)
+ nu2 = 2*pcgsize(1)
+ allocate ( answer(nu1) )
+ allocate ( vectp(nu1) )
+ wk1 = wk1_nox
+
+! ID as a test
+! wk2_nox = wk1
+
+! precondition v component
+
+ answer = 0.d0 ! initial guess
+ vectp(:) = wk1(1:nu1) ! rhs for precond v
+ call t_startf("nox_precond_v")
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+ call sparse_easy_solve(matrixA, vectp, answer, err, iter, whichsparse, nonlinear_solver = nonlinear)
+#ifdef TRILINOS
+ else
+ call restoretrilinosmatrix(0);
+ call solvewithtrilinos(vectp, answer, linearSolveTime)
+ totalLinearSolveTime = totalLinearSolveTime + linearSolveTime
+! write(*,*) 'Total linear solve time so far', totalLinearSolveTime
+#endif
+ endif
+ call t_stopf("nox_precond_v")
+ wk2(1:nu1) = answer(:)
+
+! precondition u component
+
+ answer = 0.d0 ! initial guess
+ vectp(:) = wk1(nu1+1:nu2) ! rhs for precond u
+ call t_startf("nox_precond_u")
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+ call sparse_easy_solve(matrixC, vectp, answer, err, iter, whichsparse, nonlinear_solver = nonlinear)
+#ifdef TRILINOS
+ else
+ call restoretrilinosmatrix(1);
+ call solvewithtrilinos(vectp, answer, linearSolveTime)
+ totalLinearSolveTime = totalLinearSolveTime + linearSolveTime
+! write(*,*) 'Total linear solve time so far', totalLinearSolveTime
+#endif
+ endif
+ call t_stopf("nox_precond_u")
+ wk2(nu1+1:nu2) = answer(:)
+
+ wk2_nox = wk2
+
+end subroutine apply_precond_nox
+
+!***********************************************************************
+
+subroutine reset_effstrmin (esm_factor) bind(C, name='reset_effstrmin')
+ use iso_c_binding
+ real (c_double), intent(in):: esm_factor
+
+ ! esm_factor of 0 leads to desired target. Valid values are [0,10]
+! effstrminsq = effstrminsq_target * 10.0**(2.0 * esm_factor)
+
+ ! Homotopy parameter needs to be zero when esm_factor hits zero
+ if (esm_factor > 1.0d-10) then
+ homotopy = 10.0**( esm_factor - 9.0 )
+ else
+ homotopy = 0.0;
+ endif
+
+end subroutine reset_effstrmin
+
+!***********************************************************************
+
+!NOTE - There is more repeated code here.
+
+ subroutine calc_F (xtp, F, xk_size, c_ptr_to_object, ispert) bind(C, name='calc_F')
+
+ ! Calculates either F(x) or F(x+epsilon*vect) for the JFNK method
+ ! Recall that x=[v,u]
+ ! xtp is both vtp and utp in one vector
+
+ use iso_c_binding
+ use glide_types ,only : glide_global_type
+ use parallel
+
+ implicit none
+
+ integer(c_int) ,intent(in) ,value :: xk_size
+! ispert is 0 for base calculations, 1 for perturbed calculations
+ integer(c_int) ,intent(in) ,value :: ispert
+ real(c_double) ,intent(in) :: xtp(xk_size)
+ real(c_double) ,intent(out) :: F(xk_size)
+ type(glide_global_type) ,pointer :: fptr=>NULL()
+ type(c_ptr) ,intent(inout) :: c_ptr_to_object
+
+ integer :: ewn, nsn, upn, counter, whichbabc, whichefvs, i
+ integer ,dimension(2) :: pcgsize
+ integer ,dimension(:) ,allocatable :: gxf ! 0 :reg cell
+ integer ,dimension(:,:) ,allocatable :: ui, um
+ real(dp) :: dew, dns
+ real(dp), dimension(:) ,pointer :: sigma, stagsigma
+ real(dp), dimension(:,:) ,pointer :: thck, dusrfdew, dthckdew, dusrfdns, dthckdns, &
+ dlsrfdew, dlsrfdns, stagthck, lsrf, topg
+
+ real(dp), dimension(:,:) ,pointer :: beta, bwat, mintauf
+ type(glide_basal_physics) :: basal_physics
+ real(dp), pointer :: beta_const
+
+ real(dp), dimension(:,:) ,pointer :: d2usrfdew2, d2thckdew2, d2usrfdns2, d2thckdns2
+ real(dp), dimension(:,:,:) ,pointer :: efvs, btraction
+ real(dp), dimension(:,:,:) ,pointer :: uvel, vvel, flwa
+! real(dp), dimension(:,:,:) ,pointer :: ures, vres, magres !! used for output of residual fields
+ type(sparse_matrix_type) :: matrixA, matrixC
+ real(dp), dimension(:) ,allocatable :: vectx
+ real(dp), dimension(:) ,allocatable :: vectp
+
+ real(dp) :: L2square
+! real(dp), intent(inout):: L2norm
+
+! real(dp) :: Ft(xk_size) !! used for output of residual fields (ures,vres,magres)
+ ! storage for "F" vector when using F to output residual fields for plotting (because it
+ ! during the process of calc. the resid. and unwrapping it and we don't want to alter the
+ ! actual F vector)
+ real(dp) :: L2norm
+
+ call t_startf("Calc_F")
+ call c_f_pointer(c_ptr_to_object,fptr) ! convert C ptr to F ptr= model
+
+ ewn = fptr%general%ewn
+ nsn = fptr%general%nsn
+ upn = fptr%general%upn
+ whichbabc = fptr%options%which_ho_babc
+ whichefvs = fptr%options%which_ho_efvs
+ dew = fptr%numerics%dew
+ dns = fptr%numerics%dns
+ sigma => fptr%numerics%sigma(:)
+ stagsigma => fptr%numerics%stagsigma(:)
+ thck => fptr%geometry%thck(:,:)
+ lsrf => fptr%geometry%lsrf(:,:)
+ topg => fptr%geometry%topg (:,:)
+ stagthck => fptr%geomderv%stagthck(:,:)
+ dthckdew => fptr%geomderv%dthckdew(:,:)
+ dthckdns => fptr%geomderv%dthckdns(:,:)
+ dusrfdew => fptr%geomderv%dusrfdew(:,:)
+ dusrfdns => fptr%geomderv%dusrfdns(:,:)
+ dlsrfdew => fptr%geomderv%dlsrfdew(:,:)
+ dlsrfdns => fptr%geomderv%dlsrfdns(:,:)
+ d2thckdew2 => fptr%geomderv%d2thckdew2(:,:)
+ d2thckdns2 => fptr%geomderv%d2thckdns2(:,:)
+ d2usrfdew2 => fptr%geomderv%d2usrfdew2(:,:)
+ d2usrfdns2 => fptr%geomderv%d2usrfdns2(:,:)
+
+ !Note: The beta passed into the solver is equal to model%velocity%beta
+ beta => fptr%velocity%beta(:,:)
+ beta_const => fptr%paramets%ho_beta_const
+ mintauf => fptr%basalproc%mintauf(:,:)
+ bwat => fptr%temper%bwat(:,:)
+ basal_physics = fptr%basal_physics
+
+!intent (inout) terms
+ btraction => fptr%velocity%btraction(:,:,:)
+ flwa => fptr%temper%flwa(:,:,:)
+ efvs => fptr%stress%efvs(:,:,:)
+ uvel => fptr%velocity%uvel(:,:,:)
+ vvel => fptr%velocity%vvel(:,:,:)
+! ures => fptr%velocity%ures(:,:,:) !! used for output of residual fields
+! vres => fptr%velocity%vres(:,:,:) !! used for output of residual fields
+! magres => fptr%velocity%magres(:,:,:) !! used for output of residual fields
+ L2norm = fptr%solver_data%L2norm
+
+ allocate( ui(ewn-1,nsn-1), um(ewn-1,nsn-1) )
+ ui= fptr%solver_data%ui
+ um = fptr%solver_data%um
+
+ pcgsize = fptr%solver_data%pcgsize
+ allocate( gxf(2*pcgsize(1)) )
+
+ gxf = fptr%solver_data%gxf
+! temporary to test JFNK - need to take out
+ counter = 1
+
+ d2usrfdewdns = fptr%solver_data%d2usrfcross
+ d2thckdewdns = fptr%solver_data%d2thckcross
+
+ matrixA = fptr%solver_data%matrixA
+ matrixC = fptr%solver_data%matrixC
+ allocate( vectp( pcgsize(1)) )
+ allocate( vectx(2*pcgsize(1)) )
+
+ call solver_postprocess_jfnk( ewn, nsn, upn, ui, &
+ xtp, vvel, uvel, ghostbvel, pcgsize(1) )
+
+ ! coordinate halos for updated uvel and vvel
+ call t_startf("Calc_F_uvhalo_upd")
+ call staggered_parallel_halo(uvel)
+ call staggered_parallel_halo(vvel)
+ call t_stopf("Calc_F_uvhalo_upd")
+
+ call t_startf("Calc_F_findefvsstr")
+ call findefvsstr(ewn, nsn, upn, &
+ stagsigma, counter, &
+ whichefvs, efvs, &
+ uvel, vvel, &
+ flwa, thck, &
+ dusrfdew, dthckdew, &
+ dusrfdns, dthckdns, &
+ um)
+ call t_stopf("Calc_F_findefvsstr")
+
+!==============================================================================
+! jfl 20100412: residual for v comp: Fv= A(utp,vtp)vtp - b(utp,vtp)
+!==============================================================================
+
+ ! *SFP* calculation of coeff. for stress balance calc.
+ call t_startf("Calc_F_findcoefstr1")
+ call findcoefstr(ewn, nsn, upn, &
+ dew, dns, sigma, &
+ 2, efvs, &
+ vvel, uvel, &
+ thck, dusrfdns, &
+ dusrfdew, dthckdew, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdns, dthckdns, &
+ d2usrfdns2, d2thckdns2, &
+ d2usrfdewdns,d2thckdewdns, &
+ dlsrfdew, dlsrfdns, &
+ stagthck, whichbabc, &
+ ui, um, &
+ lsrf, topg, &
+ flwa, &
+ beta, &
+ beta_const, &
+ mintauf, &
+ bwat, &
+ basal_physics, &
+ btraction, &
+ 0 )
+
+ call t_stopf("Calc_F_findcoefstr1")
+
+ rhsx(1:pcgsize(1)) = rhsd ! Fv
+
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+ call t_startf("Calc_F_form_matrix1")
+ call form_matrix ( matrixA ) ! to get A(utp,vtp)
+ call t_stopf("Calc_F_form_matrix1")
+#ifdef TRILINOS
+ else
+ if (ispert == 0) then
+ call t_startf("Calc_F_savetrilinos1")
+ call savetrilinosmatrix(0);
+ call t_stopf("Calc_F_savetrilinos1")
+ endif
+#endif
+ end if
+
+ vectp = xtp(1:pcgsize(1))
+
+ call t_startf("Calc_F_res_vect")
+ call res_vect(matrixA, vectp, rhsd, pcgsize(1), gxf, L2square, whatsparse)
+ call t_stopf("Calc_F_res_vect")
+ L2norm=L2square
+
+ F(1:pcgsize(1)) = vectp(1:pcgsize(1))
+
+!==============================================================================
+! jfl 20100412: residual for u comp: Fu= C(utp,vtp)utp - d(utp,vtp)
+!==============================================================================
+
+ call t_startf("Calc_F_findcoefstr2")
+
+ call findcoefstr(ewn, nsn, upn, &
+ dew, dns, sigma, &
+ 1, efvs, &
+ uvel, vvel, &
+ thck, dusrfdew, &
+ dusrfdew, dthckdew, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdns, dthckdns, &
+ d2usrfdns2, d2thckdns2, &
+ d2usrfdewdns,d2thckdewdns, &
+ dlsrfdew, dlsrfdns, &
+ stagthck, whichbabc, &
+ ui, um, &
+ lsrf, topg, &
+ flwa, &
+ beta, &
+ beta_const, &
+ mintauf, &
+ bwat, &
+ basal_physics, &
+ btraction, &
+ 0 )
+
+ call t_stopf("Calc_F_findcoefstr2")
+
+ rhsx(pcgsize(1)+1:2*pcgsize(1)) = rhsd ! Fv
+
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+ call t_startf("Calc_F_form_matrix2")
+ call form_matrix ( matrixC ) ! to get C(utp,vtp)
+ call t_stopf("Calc_F_form_matrix2")
+#ifdef TRILINOS
+ else
+ if (ispert == 0) then
+ call t_startf("Calc_F_savetrilinos2")
+ call savetrilinosmatrix(1);
+ call t_stopf("Calc_F_savetrilinos2")
+ endif
+#endif
+ end if
+
+ vectp(1:pcgsize(1)) = xtp(pcgsize(1)+1:2*pcgsize(1))
+
+ call t_startf("Calc_F_res_vect")
+ call res_vect(matrixC, vectp, rhsd, pcgsize(1), gxf, L2square, whatsparse)
+ call t_stopf("Calc_F_res_vect")
+ L2norm = sqrt(L2norm + L2square)
+
+ F(pcgsize(1)+1:2*pcgsize(1)) = vectp(1:pcgsize(1))
+
+!NOTE: Older code that doesn't seem to be needed anymore? Note that "res_vect_jfnk" sits inside of "res_vect.F90"
+! and should NOT be removed. It is still useful, as per below where it can be used during debug/perf. testing to
+! output the 3d residual fields.
+!
+! vectx = xtp
+! call res_vect_jfnk(matrixA, matrixC, vectx, rhsx, pcgsize(1), 2*pcgsize(1), gxf, L2square, whatsparse)
+! L2norm = L2square
+! F = vectx
+
+ call solver_postprocess_jfnk( ewn, nsn, upn, ui, xtp, vvel, uvel, ghostbvel, pcgsize(1) )
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!! This section used and active only if / for output of residual fields !!
+! Ft = F !! need a temp variable to pass in here because "res_vect_jfnk" alters the value of "F"
+! call res_vect_jfnk(matrixA, matrixC, Ft, rhsx, pcgsize(1), 2*pcgsize(1), gxf, L2square, whatsparse)
+! call resvect_postprocess_jfnk( ewn, nsn, upn, ui, pcgsize(1), Ft, vres, ures, magres )
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ fptr%solver_data%L2norm = L2norm
+ fptr%solver_data%matrixA = matrixA
+ fptr%solver_data%matrixC = matrixC
+ call t_stopf("Calc_F")
+
+end subroutine calc_F
+
+!***********************************************************************
+
+subroutine ghost_preprocess( ewn, nsn, upn, uindx, ughost, vghost, &
+ uk_1, vk_1, uvel, vvel, g_flag)
+
+! puts vel values in uk_1, vk_1 (including ghost values) and creates the
+! ghost flag vector. uk_1, vk_1 and the ghost flag vector are used for
+! the residual calculation (jfl 20100430)
+
+ use parallel
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ integer, dimension(:,:), intent(in) :: uindx
+ integer, dimension(:), intent(out) :: g_flag
+ real(dp), dimension(2,ewn-1,nsn-1), intent(in) ::ughost,vghost
+ real(dp), dimension(:,:,:), intent(in) :: uvel, vvel
+ real(dp), dimension(:), intent(out) :: uk_1, vk_1
+
+ integer :: ew, ns
+ integer, dimension(2) :: loc
+
+ g_flag = 0
+
+ do ns = 1+staggered_lhalo, size(uindx,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(uindx,1)-staggered_uhalo
+ if (uindx(ew,ns) /= 0) then
+ loc = getlocrange(upn, uindx(ew,ns))
+ uk_1(loc(1):loc(2)) = uvel(:,ew,ns)
+ uk_1(loc(1)-1) = ughost(1,ew,ns) ! ghost at top
+ uk_1(loc(2)+1) = ughost(2,ew,ns) ! ghost at base
+
+ vk_1(loc(1):loc(2)) = vvel(:,ew,ns)
+ vk_1(loc(1)-1) = vghost(1,ew,ns) ! ghost at top
+ vk_1(loc(2)+1) = vghost(2,ew,ns) ! ghost at base
+
+ g_flag(loc(1)-1) = 1 ! ghost at top
+ g_flag(loc(2)+1) = 2 ! ghost at base
+ end if
+ end do
+ end do
+
+end subroutine ghost_preprocess
+
+!***********************************************************************
+
+ subroutine ghost_preprocess_jfnk( ewn, nsn, upn, uindx, ughost, vghost, &
+ xk_1, uvel, vvel, gx_flag, pcg1)
+
+ ! puts vel values in xk_1 (including ghost values) and creates the
+ ! ghost flag vector. xk_1 and the ghost flag vector are used for
+ ! the residual calculation (jfl 20100430), adapted to combine uk, vk (kje 20101002)
+ use parallel
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ integer, dimension(:,:), intent(in) :: uindx
+ integer, dimension(:), intent(out) :: gx_flag
+ real(dp), dimension(2,ewn-1,nsn-1), intent(in) ::ughost,vghost
+ real(dp), dimension(:,:,:), intent(in) :: uvel, vvel
+ real(dp), dimension(:), intent(out) :: xk_1
+
+ integer :: ew, ns, pcg1
+ integer, dimension(2) :: loc
+
+ gx_flag = 0
+
+ do ns = 1+staggered_lhalo, size(uindx,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(uindx,1)-staggered_uhalo
+ if (uindx(ew,ns) /= 0) then
+ loc = getlocrange(upn, uindx(ew,ns))
+ xk_1(pcg1+loc(1):pcg1+loc(2)) = uvel(:,ew,ns)
+ xk_1(pcg1+loc(1)-1) = ughost(1,ew,ns) ! ghost at top
+ xk_1(pcg1+loc(2)+1) = ughost(2,ew,ns) ! ghost at base
+
+ xk_1(loc(1):loc(2)) = vvel(:,ew,ns)
+ xk_1(loc(1)-1) = vghost(1,ew,ns) ! ghost at top
+ xk_1(loc(2)+1) = vghost(2,ew,ns) ! ghost at base
+
+! independent of u and v
+ gx_flag(loc(1)-1) = 1 ! ghost at top
+ gx_flag(loc(2)+1) = 2 ! ghost at base
+ end if
+ end do
+ end do
+
+ end subroutine ghost_preprocess_jfnk
+
+!***********************************************************************
+
+subroutine ghost_postprocess( ewn, nsn, upn, uindx, uk_1, vk_1, &
+ ughost, vghost )
+
+! puts ghost values (which are now in uk_1 and vk_1) into ughost and
+! vghost so that they can be used fro the next time step (jfl 20100430)
+ use parallel
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ integer, dimension(:,:), intent(in) :: uindx
+ real(dp), dimension(:), intent(in) :: uk_1, vk_1
+ real(dp), dimension(2,ewn-1,nsn-1), intent(out) :: ughost,vghost
+
+ integer :: ew, ns
+ integer, dimension(2) :: loc
+
+ do ns = 1+staggered_lhalo, size(uindx,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(uindx,1)-staggered_uhalo
+ if (uindx(ew,ns) /= 0) then
+ loc = getlocrange(upn, uindx(ew,ns))
+ ughost(1,ew,ns) = uk_1(loc(1)-1) ! ghost at top
+ ughost(2,ew,ns) = uk_1(loc(2)+1) ! ghost at base
+ vghost(1,ew,ns) = vk_1(loc(1)-1) ! ghost at top
+ vghost(2,ew,ns) = vk_1(loc(2)+1) ! ghost at base
+ else
+ ughost(1,ew,ns) = 0.d0
+ ughost(2,ew,ns) = 0.d0
+ vghost(1,ew,ns) = 0.d0
+ vghost(2,ew,ns) = 0.d0
+ end if
+ end do
+ end do
+end subroutine ghost_postprocess
+
+!***********************************************************************
+
+ subroutine ghost_postprocess_jfnk( ewn, nsn, upn, uindx, xk_1, &
+ ughost, vghost, pcg1 )
+
+ ! puts ghost values (which are now in uk_1 and vk_1) into ughost and
+ ! vghost so that they can be used fro the next time step (jfl 20100430)
+ ! update to use combined uk and vk = xk (kje 20101003)
+ use parallel
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn, pcg1
+ integer, dimension(:,:), intent(in) :: uindx
+ real(dp), dimension(:), intent(in) :: xk_1
+ real(dp), dimension(2,ewn-1,nsn-1), intent(out) :: ughost,vghost
+
+ integer :: ew, ns
+ integer, dimension(2) :: loc
+
+ do ns = 1+staggered_lhalo, size(uindx,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(uindx,1)-staggered_uhalo
+ if (uindx(ew,ns) /= 0) then
+ loc = getlocrange(upn, uindx(ew,ns))
+ ughost(1,ew,ns) = xk_1(pcg1+loc(1)-1) ! ghost at top
+ ughost(2,ew,ns) = xk_1(pcg1+loc(2)+1) ! ghost at base
+ vghost(1,ew,ns) = xk_1(loc(1)-1) ! ghost at top
+ vghost(2,ew,ns) = xk_1(loc(2)+1) ! ghost at base
+ else
+ ughost(1,ew,ns) = 0.d0
+ ughost(2,ew,ns) = 0.d0
+ vghost(1,ew,ns) = 0.d0
+ vghost(2,ew,ns) = 0.d0
+ end if
+ end do
+ end do
+ end subroutine ghost_postprocess_jfnk
+
+!***********************************************************************
+
+subroutine mindcrshstr(pt,whichresid,vel,counter,resid)
+
+ ! Function to perform 'unstable manifold correction' (see Hindmarsh and Payne, 1996,
+ ! "Time-step limits for stable solutions of the ice-sheet equation", Annals of
+ ! Glaciology, 23, p.74-85)
+ use parallel
+ use glimmer_paramets, only: GLC_DEBUG
+
+ implicit none
+
+ real(dp), intent(inout), dimension(:,:,:) :: vel
+ integer, intent(in) :: counter, pt, whichresid
+
+ real(dp), intent(out) :: resid
+
+!NOTE - critlimit is never used
+!NOTE - SCALING - Does 'small' need a velocity scale factor?
+ real(dp), parameter :: ssthres = 5.d0 * pi / 6.d0, &
+ critlimit = 10.d0 / (scyr * vel0), &
+ small = 1.0d-16
+
+ real(dp) :: temp_vel
+
+ integer, dimension(2), save :: new = 1, old = 2
+ !JEFF integer :: locat(3)
+ integer ew, ns, nr
+
+ integer, dimension(size(vel,1),size(vel,2),size(vel,3)) :: vel_ne_0
+ real(dp) :: sum_vel_ne_0
+
+!WHL - debug (to print out intermediate terms in equations)
+!! real(dp) :: alpha, theta
+
+! Note: usav and corr initialized to zero upon allocation; following probably
+! not necessary, but occurs only once (per nonlinear solve)
+ if (counter == 1) then
+ usav(:,:,:,pt) = 0.d0
+ corr(:,:,:,old(pt),pt) = 0.d0
+ end if
+
+ ! RESIDUAL CALCULATION
+
+ select case (whichresid)
+ ! options for residual calculation method, as specified in configuration file
+ ! (see additional notes in "higher-order options" section of documentation)
+ ! case(0): use max of abs( vel_old - vel ) / vel )
+ ! case(1): use max of abs( vel_old - vel ) / vel ) but ignore basal vels
+ ! case(2): use mean of abs( vel_old - vel ) / vel )
+ ! case(3): use max of abs( vel_old - vel ) / vel ) (in addition to L2 norm calculated externally)
+
+ case(HO_RESID_MAXU)
+
+ ! resid = maxval( abs((usav(:,:,:,pt) - vel ) / vel ), MASK = vel /= 0.d0)
+ resid = 0.d0
+
+ do ns = 1 + staggered_lhalo, size(vel,3) - staggered_uhalo
+ do ew = 1 + staggered_lhalo, size(vel,2) - staggered_uhalo
+ do nr = 1, size(vel, 1)
+ if (vel(nr,ew,ns) /= 0.d0) then
+ resid = max(resid, abs(usav(nr,ew,ns,pt) - vel(nr,ew,ns)) / vel(nr,ew,ns))
+ endif
+ enddo
+ enddo
+ enddo
+
+ resid = parallel_reduce_max(resid)
+ !locat is only used in diagnostic print statement below.
+ !locat = maxloc( abs((usav(:,:,:,pt) - vel ) / vel ), MASK = vel /= 0.d0)
+
+ case(HO_RESID_MAXU_NO_UBAS)
+ ! nr = size( vel, dim=1 ) ! number of grid points in vertical ...
+ ! resid = maxval( abs((usav(1:nr-1,:,:,pt) - vel(1:nr-1,:,:) ) / vel(1:nr-1,:,:) ), MASK = vel /= 0.d0)
+ resid = 0.d0
+
+ do ns = 1 + staggered_lhalo, size(vel,3) - staggered_uhalo
+ do ew = 1 + staggered_lhalo, size(vel,2) - staggered_uhalo
+ do nr = 1, size(vel, 1) - 1
+ if (vel(nr,ew,ns) /= 0.d0) then
+ resid = max(resid, abs(usav(nr,ew,ns,pt) - vel(nr,ew,ns)) / vel(nr,ew,ns))
+ endif
+ enddo
+ enddo
+ enddo
+
+ resid = parallel_reduce_max(resid)
+ !locat = maxloc( abs((usav(1:nr-1,:,:,pt) - vel(1:nr-1,:,:) ) / vel(1:nr-1,:,:) ), &
+ ! MASK = vel /= 0.d0)
+
+ case(HO_RESID_MEANU)
+ call not_parallel(__FILE__, __LINE__)
+ !JEFF This has not been translated to parallel.
+ resid = 0.d0
+ nr = size( vel, dim=1 )
+ vel_ne_0 = 0
+ where ( vel /= 0.d0 ) vel_ne_0 = 1
+
+ ! include basal velocities in resid. calculation when using MEAN
+ ! JEFF Compute sums across nodes in order to compute mean.
+ resid = sum( abs((usav(:,:,:,pt) - vel ) / vel ), &
+ MASK = vel /= 0.d0)
+
+ resid = parallel_reduce_sum(resid)
+ sum_vel_ne_0 = sum( vel_ne_0 )
+ sum_vel_ne_0 = parallel_reduce_sum(sum_vel_ne_0)
+
+ resid = resid / sum_vel_ne_0
+
+ ! ignore basal velocities in resid. calculation when using MEAN
+ ! resid = sum( abs((usav(1:nr-1,:,:,pt) - vel(1:nr-1,:,:) ) / vel(1:nr-1,:,:) ), &
+ ! MASK = vel /= 0.d0) / sum( vel_ne_0(1:nr-1,:,:) )
+
+ ! NOTE that the location of the max residual is somewhat irrelevent here
+ ! since we are using the mean resid for convergence testing
+ ! locat = maxloc( abs((usav(:,:,:,pt) - vel ) / vel ), MASK = vel /= 0.d0)
+
+ case(HO_RESID_L2NORM)
+
+!! SFP - the L2norm option is handled entirely external to this subroutine. That is, if the L2norm option
+!! for the residul is specified (it is currently the default), the residual is calculated as the L2norm of
+!! the system residul, r = Ax - b (rather than defining the residual according to the velocity update, as
+!! is done in all the parts of this subroutine). If the L2norm option is active, the value of "residual"
+!! passed out of this subroutine is NOT used for determining when to halt iterations on the velocity solution.
+!! The original code that was here for this option has been removed.
+
+ end select
+
+ if (GLC_DEBUG) then
+ ! Additional debugging line, useful when trying to determine if convergence is being consistently
+ ! help up by the residual at one or a few particular locations in the domain.
+ ! print '("* ",i3,g20.6,3i6,g20.6)', counter, resid, locat, vel(locat(1),locat(2),locat(3))*vel0
+ end if
+
+ ! SAVE VELOCITY AND CALCULATE CORRECTION
+
+ corr(:,:,:,new(pt),pt) = vel(:,:,:) - usav(:,:,:,pt) ! changed
+
+! if (counter > 1) then
+! where (acos((corr(:,:,:,new(pt),pt) * corr(:,:,:,old(pt),pt)) / &
+! (abs(corr(:,:,:,new(pt),pt)) * abs(corr(:,:,:,old(pt),pt)) + small)) > &
+! ssthres .and. corr(:,:,:,new(pt),pt) - corr(:,:,:,old(pt),pt) /= 0.d0 )
+! mindcrshstr = usav(:,:,:,pt) + &
+! corr(:,:,:,new(pt),pt) * abs(corr(:,:,:,old(pt),pt)) / &
+! abs(corr(:,:,:,new(pt),pt) - corr(:,:,:,old(pt),pt))
+!! mindcrshstr = vel; ! jfl uncomment this and comment out line above
+!! ! to avoid the unstable manifold correction
+! elsewhere
+! mindcrshstr = vel;
+! end where
+! else
+! mindcrshstr = vel;
+! end if
+! usav(:,:,:,pt) = vel
+! vel = mindcrshstr
+
+ if (counter > 1) then
+
+ ! Replace where clause with explicit, owned variables for each processor.
+
+ do ns = 1 + staggered_lhalo, size(vel,3) - staggered_uhalo
+ do ew = 1 + staggered_lhalo, size(vel,2) - staggered_uhalo
+ do nr = 1, size(vel, 1)
+ temp_vel = vel(nr,ew,ns)
+
+ if (acos((corr(nr,ew,ns,new(pt),pt) * corr(nr,ew,ns,old(pt),pt)) / &
+ (abs(corr(nr,ew,ns,new(pt),pt)) * abs(corr(nr,ew,ns,old(pt),pt)) + small)) > &
+ ssthres .and. corr(nr,ew,ns,new(pt),pt) - corr(nr,ew,ns,old(pt),pt) /= 0.d0) then
+
+ ! theta and alpha are intermediate terms that might be useful to print out
+!! theta = acos((corr(nr,ew,ns,new(pt),pt) * corr(nr,ew,ns,old(pt),pt)) / &
+!! (abs(corr(nr,ew,ns,new(pt),pt)) * abs(corr(nr,ew,ns,old(pt),pt)) + small))
+
+!! alpha = abs(corr(nr,ew,ns,old(pt),pt)) / &
+!! abs(corr(nr,ew,ns,new(pt),pt) - corr(nr,ew,ns,old(pt),pt))
+
+ vel(nr,ew,ns) = usav(nr,ew,ns,pt) + &
+ corr(nr,ew,ns,new(pt),pt) * abs(corr(nr,ew,ns,old(pt),pt)) / &
+ abs(corr(nr,ew,ns,new(pt),pt) - corr(nr,ew,ns,old(pt),pt))
+
+ endif
+
+ usav(nr,ew,ns,pt) = temp_vel
+ enddo
+ enddo
+ enddo
+ else
+
+ usav(:,:,:,pt) = vel
+
+ end if
+
+ ! UPDATE POINTERS
+
+ !*SFP* Old version
+ ! if (new(pt) == 1) then; old(pt) = 1; new(pt) = 2; else; old(pt) = 1; new(pt) = 2; end if
+
+ !*SFP* correction from Carl Gladdish
+ if (new(pt) == 1) then; old(pt) = 1; new(pt) = 2; else; old(pt) = 2; new(pt) = 1; end if
+
+ return
+
+end subroutine mindcrshstr
+
+!***********************************************************************
+
+!NOTE - There are two mindcrshstr subroutines. Remove one of them?
+
+function mindcrshstr2(pt,whichresid,vel,counter,resid)
+
+ ! Function to perform 'unstable manifold correction' (see Hindmarsch and Payne, 1996,
+ ! "Time-step limits for stable solutions of the ice-sheet equation", Annals of
+ ! Glaciology, 23, p.74-85)
+
+ ! Alternate unstable manifold scheme, based on DeSmedt, Pattyn, and De Goen, J. Glaciology 2010
+ ! Written by Carl Gladdish
+
+ use parallel ! Use of WHERE statements is causing inconsistencies on the halos in parallel. Rewrite like mindcrshstr()
+ implicit none
+
+ real(dp), intent(in), dimension(:,:,:) :: vel
+ integer, intent(in) :: counter, pt, whichresid
+ real(dp), intent(out) :: resid
+
+ real(dp), dimension(size(vel,1),size(vel,2),size(vel,3)) :: mindcrshstr2
+
+ integer, parameter :: start_umc = 3
+ real(dp), parameter :: cvg_accel = 2.d0
+ real(dp), parameter :: small = 1.0d-16
+
+ real(dp) in_prod, len_new, len_old, mean_rel_diff, sig_rel_diff
+ real(dp) :: theta
+
+ integer, dimension(2), save :: new = 1, old = 2
+ integer :: locat(3)
+
+ integer :: nr
+ integer, dimension(size(vel,1),size(vel,2),size(vel,3)) :: vel_ne_0
+ real(dp),dimension(size(vel,1),size(vel,2),size(vel,3)) :: rel_diff
+
+ call not_parallel(__FILE__, __LINE__)
+
+ if (counter == 1) then
+ usav(:,:,:,pt) = 0.d0
+ corr(:,:,:,:,:) = 0.d0
+ end if
+
+ corr(:,:,:,new(pt),pt) = vel - usav(:,:,:,pt)
+
+ if (counter >= start_umc) then
+
+ in_prod = sum( corr(:,:,:,new(pt),pt) * corr(:,:,:,old(pt),pt) )
+ len_new = sqrt(sum( corr(:,:,:,new(pt),pt) * corr(:,:,:,new(pt),pt) ))
+ len_old = sqrt(sum( corr(:,:,:,old(pt),pt) * corr(:,:,:,old(pt),pt) ))
+
+ theta = acos( in_prod / (len_new * len_old + small) )
+
+ if (theta < (1.d0/8.d0)*pi) then
+ mindcrshstr2 = usav(:,:,:,pt) + cvg_accel * corr(:,:,:,new(pt),pt)
+! print *, theta/pi, 'increased correction'
+ else if(theta < (19.d0/20.d0)*pi) then
+ mindcrshstr2 = vel
+! print *, theta/pi, 'standard correction'
+ else
+ mindcrshstr2 = usav(:,:,:,pt) + (1.0/cvg_accel) * corr(:,:,:,new(pt),pt)
+! print *, theta/pi, 'decreasing correction'
+ end if
+
+ else
+
+ mindcrshstr2 = vel;
+ ! print *, 'Not attempting adjustment to correction'
+
+ end if
+
+
+ ! now swap slots for storing the previous correction
+ if (new(pt) == 1) then
+ old(pt) = 1; new(pt) = 2
+ else
+ old(pt) = 2; new(pt) = 1
+ end if
+
+ if (counter == 1) then
+ usav_avg = 1.d0
+ else
+ usav_avg(1) = sum( abs(usav(:,:,:,1)) ) / size(vel) ! a x-dir transport velocity scale
+ usav_avg(2) = sum( abs(usav(:,:,:,2)) ) / size(vel) ! a y-dir transport velocity scale
+ end if
+
+! print *, 'usav_avg(1)',usav_avg(1),'usav_avg(2)',usav_avg(2)
+
+ select case (whichresid)
+
+ ! options for residual calculation method, as specified in configuration file
+ ! (see additional notes in "higher-order options" section of documentation)
+ ! case(0): use max of abs( vel_old - vel ) / vel )
+ ! case(1): use max of abs( vel_old - vel ) / vel ) but ignore basal vels
+ ! case(2): use mean of abs( vel_old - vel ) / vel )
+
+ case(HO_RESID_MAXU)
+ rel_diff = 0.d0
+ vel_ne_0 = 0
+ where ( mindcrshstr2 /= 0.d0 )
+ vel_ne_0 = 1
+ rel_diff = abs((usav(:,:,:,pt) - mindcrshstr2) / mindcrshstr2) &
+ * usav_avg(pt)/sqrt(sum(usav_avg ** 2.0))
+ end where
+
+ resid = maxval( rel_diff, MASK = mindcrshstr2 /= 0.d0 )
+ locat = maxloc( rel_diff, MASK = mindcrshstr2 /= 0.d0 )
+
+! mean_rel_diff = sum(rel_diff) / sum(vel_ne_0)
+! sig_rel_diff = sqrt( sum((rel_diff - mean_rel_diff) ** 2.d0 )/ sum(vel_ne_0) )
+! print *, 'mean', mean_rel_diff, 'sig', sig_rel_diff
+
+ !write(*,*) 'locat', locat
+ !call write_xls('resid1.txt',abs((usav(1,:,:,pt) - mindcrshstr2(1,:,:)) / (mindcrshstr2(1,:,:) + 1e-20)))
+
+ case(HO_RESID_MAXU_NO_UBAS)
+ !**cvg*** should replace vel by mindcrshstr2 in the following lines, I belive
+ nr = size( vel, dim=1 ) ! number of grid points in vertical ...
+ resid = maxval( abs((usav(1:nr-1,:,:,pt) - vel(1:nr-1,:,:) ) / vel(1:nr-1,:,:) ), &
+ MASK = vel /= 0.d0)
+ locat = maxloc( abs((usav(1:nr-1,:,:,pt) - vel(1:nr-1,:,:) ) / vel(1:nr-1,:,:) ), &
+ MASK = vel /= 0.d0)
+
+ case(HO_RESID_MEANU)
+ !**cvg*** should replace vel by mindcrshstr2 in the following lines, I believe
+ nr = size( vel, dim=1 )
+ vel_ne_0 = 0
+ where ( vel /= 0.d0 ) vel_ne_0 = 1
+
+ ! include basal velocities in resid. calculation when using MEAN
+ resid = sum( abs((usav(:,:,:,pt) - vel ) / vel ), &
+ MASK = vel /= 0.d0) / sum( vel_ne_0 )
+
+ ! ignore basal velocities in resid. calculation when using MEAN
+ ! resid = sum( abs((usav(1:nr-1,:,:,pt) - vel(1:nr-1,:,:) ) / vel(1:nr-1,:,:) ), &
+ ! MASK = vel /= 0.d0) / sum( vel_ne_0(1:nr-1,:,:) )
+
+ ! NOTE that the location of the max residual is somewhat irrelevent here
+ ! since we are using the mean resid for convergence testing
+ locat = maxloc( abs((usav(:,:,:,pt) - vel ) / vel ), MASK = vel /= 0.d0)
+
+ end select
+
+ usav(:,:,:,pt) = mindcrshstr2
+
+ ! Additional debugging line, useful when trying to determine if convergence is being consistently
+ ! held up by the residual at one or a few particular locations in the domain.
+! print '("* ",i3,g20.6,3i6,g20.6)', counter, resid, locat, vel(locat(1),locat(2),locat(3))*vel0
+
+ return
+
+end function mindcrshstr2
+
+!***********************************************************************
+
+subroutine findcoefstr(ewn, nsn, upn, &
+ dew, dns, sigma, &
+ pt, efvs, &
+ thisvel, othervel, &
+ thck, thisdusrfdx, &
+ dusrfdew, dthckdew, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdns, dthckdns, &
+ d2usrfdns2, d2thckdns2, &
+ d2usrfdewdns,d2thckdewdns, &
+ dlsrfdew, dlsrfdns, &
+ stagthck, whichbabc, &
+ uindx, mask, &
+ lsrf, topg, &
+ flwa, &
+ beta, &
+ beta_const, &
+ mintauf, &
+ bwat, &
+ basal_physics, &
+ btraction, &
+ assembly )
+
+ ! Main subroutine for determining coefficients that go into the LHS matrix A
+ ! in the expression Au = b. Calls numerous other subroutines, including boundary
+ ! condition subroutines, which determine "b".
+
+ use glissade_basal_traction, only: calcbeta
+ use parallel
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn, assembly
+ real(dp), intent(in) :: dew, dns
+ real(dp), dimension(:), intent(in) :: sigma
+
+ real(dp), dimension(:,:,:), intent(in) :: efvs, thisvel, &
+ othervel
+ real(dp), dimension(:,:), intent(in) :: stagthck, thisdusrfdx, &
+ dusrfdew, dthckdew, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdns, dthckdns, &
+ d2usrfdns2, d2thckdns2, &
+ d2usrfdewdns,d2thckdewdns, &
+ dlsrfdew, dlsrfdns, &
+ thck, lsrf, topg
+
+ real(dp), dimension(:,:), intent(inout) :: beta
+ real(dp), dimension(:,:), intent(in) :: mintauf
+ real(dp), intent(in) :: beta_const ! spatially uniform beta (Pa yr/m)
+ real(dp), intent(in), dimension(:,:) :: bwat ! basal water depth
+ type(glide_basal_physics), intent(inout) :: basal_physics ! basal_physics object
+
+ real(dp), dimension(:,:,:), intent(in) :: flwa
+ real(dp), dimension(:,:,:), intent(inout) :: btraction
+
+ integer, dimension(:,:), intent(in) :: mask, uindx
+ integer, intent(in) :: pt, whichbabc
+
+ real(dp), dimension(2,2,2) :: localefvs
+ real(dp), dimension(3,3,3) :: localothervel
+ real(dp), dimension(upn) :: boundaryvel
+ real(dp) :: flwabar
+
+ integer, dimension(6,2) :: loc2
+ integer, dimension(2) :: loc2plusup
+ integer, dimension(3) :: shift
+ integer :: ew, ns, up, up_start
+
+ logical :: comp_bound
+
+!WHL - debug
+ integer :: i, j
+
+ ct_nonzero = 1 ! index to count the number of non-zero entries in the sparse matrix
+
+ if( assembly == 1 )then ! for normal assembly (assembly=0), start vert index at sfc and go to bed
+ up_start = upn ! for boundary traction calc (assembly=1), do matrix assembly on for equations at bed
+ else
+ up_start = 1
+ end if
+
+ ! Note loc2_array is defined only for non-halo ice grid points.
+ ! JEFFLOC returns an array with starting indices into solution vector for each ice grid point.
+
+ allocate(loc2_array(ewn,nsn,2))
+
+!WHL - Using a different procedure depending on whether or not we are using trilinos.
+! This is needed to avoid an error when using the SLAP solver in a
+! single-processor parallel run.
+
+ loc2_array = getlocationarray(ewn, nsn, upn, mask, uindx)
+
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+ loc2_array = getlocationarray(ewn, nsn, upn, mask, uindx, &
+ return_global_IDs = .false.)
+ else
+ loc2_array = getlocationarray(ewn, nsn, upn, mask, uindx)
+ endif
+
+!WHL - debug
+! print*, ' '
+! print*, 'loc2_array(1)'
+! do ns = nsn, 1, -1
+! write(6,'(34i6)') loc2_array(:,ns,1)
+! enddo
+
+! print*, ' '
+! print*, 'loc2_array(2)'
+! do ns = nsn, 1, -1
+! write(6,'(34i6)') loc2_array(:,ns,2)
+! enddo
+
+ ! !!!!!!!!! useful for debugging !!!!!!!!!!!!!!
+ ! print *, 'loc2_array = '
+ ! print *, loc2_array
+ ! pause
+
+ ! Note: With nhalo = 2, efvs has been computed in a layer of halo cells,
+ ! so we have its value in all neighbors of locally owned velocity points.
+
+ ! Compute or prescribe the basal traction coefficient 'beta'
+ ! Note: The initial value of model%velocity%beta can change depending on
+ ! the value of model%options%which_ho_babc.
+
+ ! Note: Arguments must be converted to dimensional units
+
+ beta(:,:) = beta(:,:) * tau0/(vel0*scyr) ! convert to Pa yr/m
+
+ call calcbeta (whichbabc, &
+ dew * len0, dns * len0, & ! m
+ ewn, nsn, &
+ thisvel(upn,:,:) * vel0*scyr, & ! m/yr
+ othervel(upn,:,:) * vel0*scyr, &
+ bwat * thk0, & ! m
+ beta_const * tau0/(vel0*scyr), & ! Pa yr/m
+ mintauf * tau0, & ! Pa
+ basal_physics, &
+ flwa(upn,:,:) * vis0*scyr, &
+ thck, &
+ mask, &
+ beta )
+
+ beta(:,:) = beta(:,:) / (tau0/(vel0*scyr)) ! convert to dimensionless
+
+ do ns = 1+staggered_lhalo, size(mask,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(mask,1)-staggered_uhalo
+
+ !Theoretically, this should just be .false. to remove it from the if statements and let the ghost cells
+ !take over. However, with only one process, this give an exception error when calc_F calls savetrilinosmatrix(0).
+ !Therefore, it will currently revert back to the old BC's when using only one task for now. I am working to
+ !debug and fix this case, but for now, it does no harm for the original BC's.
+
+! comp_bound = ( nslb < 1 .and. ns < staggered_lhalo+1+ghost_shift ) .or. &
+! ( ewlb < 1 .and. ew < staggered_lhalo+1+ghost_shift ) .or. &
+! ( nsub > global_nsn .and. ns > size(mask,2)-staggered_uhalo -ghost_shift ) .or. &
+! ( ewub > global_ewn .and. ew > size(mask,1)-staggered_uhalo -ghost_shift )
+
+ comp_bound = .false.
+
+ ! Calculate the depth-averaged value of the rate factor, needed below when applying an ice shelf
+ ! boundary condition (complicated code so as not to include funny values at boundaries ...
+ ! ... kind of a mess and could be redone or made into a function or subroutine).
+ ! SUM has the definition SUM(ARRAY, DIM, MASK) where MASK is either scalar or the same shape as ARRAY
+ ! JEFFLOC Concerned about the edges at (ew+1, ns), (ew, ns+1), and (ew+1,ns+1)
+
+ !SCALING - The following is OK because flwa*vis0 is equal to the dimensional flow factor.
+ ! The product will still equal the dimensional flow factor when vis0 = 1.
+ flwabar = ( sum( flwa(:,ew,ns), 1, flwa(1,ew,ns)*vis0 < 1.0d-10 )/real(upn) + &
+ sum( flwa(:,ew,ns+1), 1, flwa(1,ew,ns+1)*vis0 < 1.0d-10 )/real(upn) + &
+ sum( flwa(:,ew+1,ns), 1, flwa(1,ew+1,ns)*vis0 < 1.0d-10 )/real(upn) + &
+ sum( flwa(:,ew+1,ns+1), 1, flwa(1,ew+1,ns+1)*vis0 < 1.0d-10 )/real(upn) ) / &
+ ( sum( flwa(:,ew,ns)/flwa(:,ew,ns), 1, flwa(1,ew,ns)*vis0 < 1.0d-10 )/real(upn) + &
+ sum( flwa(:,ew,ns+1)/flwa(:,ew,ns+1), 1, flwa(1,ew,ns+1)*vis0 < 1.0d-10 )/real(upn) + &
+ sum( flwa(:,ew+1,ns)/flwa(:,ew+1,ns), 1, flwa(1,ew+1,ns)*vis0 < 1.0d-10 )/real(upn) + &
+ sum( flwa(:,ew+1,ns+1)/flwa(:,ew+1,ns+1), 1, flwa(1,ew+1,ns+1)*vis0 < 1.0d-10 )/real(upn) )
+
+ loc2(1,:) = loc2_array(ew,ns,:)
+
+ ! >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+ if ( GLIDE_HAS_ICE(mask(ew,ns)) .and. .not. &
+ comp_bound .and. .not. &
+ GLIDE_IS_MARGIN(mask(ew,ns)) .and. .not. &
+ GLIDE_IS_DIRICHLET_BOUNDARY(mask(ew,ns)) .and. .not. &
+ GLIDE_IS_CALVING(mask(ew,ns) ) .and. .not. &
+ GLIDE_IS_THIN(mask(ew,ns) ) ) then
+ ! print *, 'In main body ... ew, ns = ', ew, ns
+ ! >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+
+ call calccoeffs( upn, sigma, &
+ stagthck(ew,ns), &
+ dusrfdew(ew,ns), dusrfdns(ew,ns), &
+ dthckdew(ew,ns), dthckdns(ew,ns), &
+ d2usrfdew2(ew,ns), d2usrfdns2(ew,ns), &
+ d2usrfdewdns(ew,ns), &
+ d2thckdew2(ew,ns), d2thckdns2(ew,ns), &
+ d2thckdewdns(ew,ns))
+
+ ! get index of cardinal neighbours
+ loc2(2,:) = loc2_array(ew+1,ns,:)
+ loc2(3,:) = loc2_array(ew-1,ns,:)
+ loc2(4,:) = loc2_array(ew,ns+1,:)
+ loc2(5,:) = loc2_array(ew,ns-1,:)
+
+ ! this loop fills coeff. for all vertical layers at index ew,ns (including sfc. and bed bcs)
+ do up = up_start, upn
+
+ ! Function to adjust indices at sfc and bed so that most correct values of 'efvs' and 'othervel'
+ ! are passed to function. Because of the fact that efvs goes from 1:upn-1 rather than 1:upn
+ ! we simply use the closest values. This could probably be improved upon at some point
+ ! by extrapolating values for efvs at the sfc and bed using one-sided diffs, and it is not clear
+ ! how important this simplfication is.
+ !JEFFLOC indshift() returns three-element shift index for up, ew, and ns respectively.
+ !JEFFLOC It does get passed loc2_array, but it doesn't use it. Further, the shifts can be at most 1 unit in any direction.
+
+ shift = indshift( 0, ew, ns, up, ewn, nsn, upn, loc2_array(:,:,1), stagthck(ew-1:ew+1,ns-1:ns+1) )
+
+ !HALO - Note that ew and ns below are locally owned velocity points.
+ !HALO - This means we need efvs in one layer of halo cells.
+ !JEFFLOC As long as not accessing halo ice points, then won't shift off of halo of size at least 1.
+ !JEFFLOC Completed scan on 11/23. Testing change of definition of loc2_array.
+
+ call bodyset(ew, ns, up, &
+ ewn, nsn, upn, &
+ dew, dns, &
+ pt, loc2_array,&
+ loc2, stagthck, &
+ thisdusrfdx, &
+ dusrfdew, dusrfdns, &
+ dlsrfdew, dlsrfdns, &
+ efvs(up-1+shift(1):up+shift(1),ew:ew+1,ns:ns+1), &
+ othervel(up-1+shift(1):up+1+shift(1), &
+ ew-1+shift(2):ew+1+shift(2), &
+ ns-1+shift(3):ns+1+shift(3)), &
+ thisvel(up-1+shift(1):up+1+shift(1), &
+ ew-1+shift(2):ew+1+shift(2), &
+ ns-1+shift(3):ns+1+shift(3)), &
+ beta(ew,ns), &
+ btraction, &
+ whichbabc, assembly )
+ enddo ! upn
+ ! >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+ !NOTE - Not sure COMP_DOMAIN_BND condition is needed
+ elseif ( GLIDE_IS_CALVING( mask(ew,ns) ) .and. .not. &
+ comp_bound .and. .not. &
+ GLIDE_IS_DIRICHLET_BOUNDARY(mask(ew,ns)) .and. .not. &
+ GLIDE_IS_THIN(mask(ew,ns) ) ) then
+ ! print *, 'At a SHELF boundary ... ew, ns = ', ew, ns
+ ! >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+
+ call calccoeffs( upn, sigma, &
+ stagthck(ew,ns), &
+ dusrfdew(ew,ns), dusrfdns(ew,ns), &
+ dthckdew(ew,ns), dthckdns(ew,ns), &
+ d2usrfdew2(ew,ns), d2usrfdns2(ew,ns), &
+ d2usrfdewdns(ew,ns), &
+ d2thckdew2(ew,ns), d2thckdns2(ew,ns), &
+ d2thckdewdns(ew,ns))
+
+ do up = up_start, upn
+
+ lateralboundry = .true.
+ shift = indshift( 1, ew, ns, up, &
+ ewn, nsn, upn, &
+ loc2_array(:,:,1), &
+ stagthck(ew-1:ew+1,ns-1:ns+1) )
+
+ call bodyset(ew, ns, up, &
+ ewn, nsn, upn, &
+ dew, dns, &
+ pt, loc2_array,&
+ loc2, stagthck, &
+ thisdusrfdx, &
+ dusrfdew, dusrfdns, &
+ dlsrfdew, dlsrfdns, &
+ efvs(up-1+shift(1):up+shift(1),ew:ew+1,ns:ns+1), &
+ othervel(up-1+shift(1):up+1+shift(1), &
+ ew-1+shift(2):ew+1+shift(2), &
+ ns-1+shift(3):ns+1+shift(3)), &
+ thisvel(up-1+shift(1):up+1+shift(1), &
+ ew-1+shift(2):ew+1+shift(2), &
+ ns-1+shift(3):ns+1+shift(3)), &
+ beta(ew,ns), &
+ btraction, &
+ whichbabc, assembly, &
+ abar=flwabar)
+ enddo
+ lateralboundry = .false.
+ ! >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+ !NOTE - Here we deal with cells on the computational domain boundary.
+ ! Currently the velocity is always set to a specified value on this boundary.
+ ! With open (non-Dirichlet) BCs, we might want to solve for these velocities,
+ ! using the code above to compute the matrix elements.
+ elseif ( GLIDE_HAS_ICE(mask(ew,ns)) .and. ( GLIDE_IS_DIRICHLET_BOUNDARY(mask(ew,ns)) .or. &
+ comp_bound ) .or. GLIDE_IS_LAND_MARGIN(mask(ew,ns)) .or. &
+ GLIDE_IS_THIN(mask(ew,ns)) ) then
+ ! print*, ' '
+ ! print*, 'At a NON-SHELF boundary ... ew, ns = ', ew, ns
+ ! print*, 'LAND_MARGIN =', GLIDE_IS_LAND_MARGIN(mask(ew,ns))
+ ! print*, 'MASK(ew,ns) =', mask(ew,ns)
+ ! >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+
+ ! Put specified value for vel on rhs. NOTE that this is NOT zero by default
+ ! unless the initial guess is zero. It will be set to whatever the initial value
+ ! for the vel at location up,ew,ns is in the initial array!
+ loc2plusup = loc2(1,:)
+ call valueset(0.d0, loc2plusup)
+
+ loc2plusup = loc2(1,:) + upn + 1
+ call valueset(0.d0, loc2plusup)
+
+ do up = up_start, upn
+ loc2plusup = loc2(1,:) + up
+ call valueset( thisvel(up,ew,ns), loc2plusup ) ! vel at margin set to initial value
+ !call valueset( 0.d0 ) ! vel at margin set to 0
+ enddo
+ endif
+ enddo ! ew
+ enddo ! ns
+
+ deallocate(loc2_array)
+
+end subroutine findcoefstr
+
+!***********************************************************************
+
+subroutine bodyset(ew, ns, up, &
+ ewn, nsn, upn, &
+ dew, dns, &
+ pt, loc2_array, &
+ loc2, stagthck, &
+ thisdusrfdx, &
+ dusrfdew, dusrfdns, &
+ dlsrfdew, dlsrfdns, &
+ local_efvs, &
+ local_othervel, &
+ local_thisvel, &
+ beta, &
+ btraction, &
+ whichbabc, assembly, &
+ abar)
+
+ ! This subroutine does the bulk of the work in calling the appropriate discretiztion routines,
+ ! which determine the values for coefficients that will go into the sparse matrix, for points
+ ! on and inside of the boundaries.
+
+ use glimmer_paramets, only: evs0, evs_scale
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+ integer, intent(in) :: ew, ns, up
+ real(dp), intent(in) :: dew, dns
+ integer, intent(in) :: pt, whichbabc, assembly
+ integer, dimension(ewn,nsn,2), intent(in) :: loc2_array
+ integer, dimension(6,2), intent(in) :: loc2
+
+ real(dp), dimension(:,:), intent(in) :: stagthck
+ real(dp), dimension(:,:), intent(in) :: dusrfdew, dusrfdns
+ real(dp), dimension(:,:), intent(in) :: dlsrfdew, dlsrfdns
+ real(dp), dimension(:,:), intent(in) :: thisdusrfdx
+ real(dp), dimension(2,2,2), intent(in) :: local_efvs
+ ! "local_othervel" is the other vel component (i.e. u when v is being calc and vice versa),
+ ! which is taken as a known value (terms involving it are moved to the RHS and treated as sources)
+ real(dp), dimension(3,3,3), intent(in) :: local_othervel, local_thisvel
+ real(dp), intent(in) :: beta
+ real(dp), dimension(:,:,:), intent(inout) :: btraction
+ real(dp), intent(in), optional :: abar
+
+ ! storage space for coefficients that go w/ the discretization at the local point up, ew, ns.
+ ! Note that terms other than 'g' are used for storing particular parts needed for calculation
+ ! of the basal traction vector.
+ real(dp), dimension(3,3,3) :: g, h, g_cros, g_vert, g_norm, g_vel_lhs, g_vel_rhs
+
+ ! source term for the rhs when using ice shelf lateral boundary condition,
+ ! e.g. source = rho*g*H/(2*Neff) * ( 1 - rho_i / rho_w ) for ice shelf
+ real(dp) :: source
+
+ real(dp) :: slopex, slopey ! local sfc (or bed) slope terms
+
+ ! lateral boundary normal and vector to indicate use of forward
+ ! or bacward one-sided diff. when including specified stress lateral bcs
+ real(dp), dimension(2) :: fwdorbwd, normal
+
+ real(dp) :: nz ! z dir normal vector component at sfc or bed (takes diff value for each)
+
+ integer, dimension(2) :: bcflag ! indicates choice of sfc and basal bcs ...
+
+ real(dp) :: scalebabc
+
+ integer, dimension(2) :: loc2plusup
+
+ logical :: fons, foew ! true when geom. requires using 1st-order one sided diffs. at floating ice boundary
+ ! (default is 2nd-order, which requires larger stencil)
+
+ loc2plusup = loc2(1,:) + up
+
+ if( lateralboundry )then
+
+ ! *********************************************************************************************
+ ! lateral boundary conditions
+
+ ! if at sfc or bed, source due to seawater pressure is 0 and bc normal vector
+ ! should contain sfc/bed slope components, e.g. (-ds/dx, -ds/dy, 1) or (db/dx, db/dy, -1)
+ source = 0.d0
+
+ call getlatboundinfo( ew, ns, up, &
+ ewn, nsn, upn, &
+ stagthck(ew-2:ew+2, ns-2:ns+2), &
+ loc2_array(:,:,1), fwdorbwd, normal, &
+ loc_latbc, foew, fons)
+
+ if( up == 1 .or. up == upn )then
+
+ if( up == 1 )then ! specify necessary variables and flags for free sfc
+ bcflag = (/1,0/)
+ loc2plusup = loc2(1,:) + up - 1 ! reverse the sparse matrix / rhs vector row index by 1 ...
+ slopex = -dusrfdew(ew,ns); slopey = -dusrfdns(ew,ns); nz = 1.d0
+ else ! specify necessary variables and flags for basal bc
+
+ if( whichbabc == HO_BABC_NO_SLIP )then
+ bcflag = (/0,0/) ! flag for u=v=0 at bed; doesn't work well so commented out here...
+ ! better to specify very large value for beta below
+ elseif( whichbabc == HO_BABC_CONSTANT .or. whichbabc == HO_BABC_SIMPLE .or. &
+ whichbabc == HO_BABC_YIELD_PICARD .or. whichbabc == HO_BABC_BETA_BWAT .or. &
+ whichbabc == HO_BABC_LARGE_BETA .or. whichbabc == HO_BABC_EXTERNAL_BETA .or. &
+ whichbabc == HO_BABC_POWERLAW .or. whichbabc == HO_BABC_COULOMB_FRICTION) then
+ bcflag = (/1,1/) ! flag for specififed stress at bed: Tau_zx = beta * u_bed,
+ ! where beta is MacAyeal-type traction parameter
+ end if
+
+ loc2plusup = loc2(1,:) + up + 1 ! advance the sparse matrix / rhs row vector index by 1 ...
+ slopex = dlsrfdew(ew,ns); slopey = dlsrfdns(ew,ns); nz = -1.d0
+
+ end if
+
+!NOTE: conduct realistic test cases with and w/o this hack
+! !! Hack to avoid bad sfc and basal bc normal vectors !!
+! slopex = 0.d0; slopey = 0.d0
+
+ ! get coeffs. associated with horiz. normal stresses lateral boundary
+ g = normhorizmainbc_lat(dew, dns, &
+ slopex, slopey, &
+ dsigmadew(up), dsigmadns(up), &
+ pt, 2, &
+ dup(up), local_efvs, &
+ oneorfour, fourorone, &
+ onesideddiff, &
+ normal, fwdorbwd, &
+ foew, fons )
+
+ ! add on coeffs. associated with vertical shear stresses
+ g(:,3,3) = g(:,3,3) &
+ + vertimainbc( stagthck(ew,ns), bcflag, dup(up), &
+ local_efvs, beta, g_vert, nz )
+
+ !! scale basal bc coeffs when using JFNK solver
+ scalebabc = scalebasalbc( g, bcflag, lateralboundry, beta, local_efvs )
+ g = g / scalebabc ! put the coeff. for the b.c. equation in the same place as the prev. equation
+ ! (w.r.t. cols), on a new row ...
+ call fillsprsebndy( g, loc2plusup(1), loc_latbc, up, normal, pt )
+
+
+ ! get coeffs. for horiz shear stress terms, multiply by other vel and put into RHS vector
+
+ ! NOTE that in the following expression, the "-" sign on the crosshoriz terms,
+ ! which results from moving them from the LHS over to the RHS, has been moved
+ ! inside of "croshorizmainbc_lat".
+ rhsd(loc2plusup(2)) = sum( croshorizmainbc_lat(dew, dns, &
+ slopex, slopey, &
+ dsigmadew(up), dsigmadns(up), &
+ pt, 2, &
+ dup(up), local_othervel,&
+ local_efvs, &
+ oneortwo, twoorone, &
+ onesideddiff, &
+ normal, fwdorbwd, &
+ foew, fons ) &
+ * local_othervel ) /scalebabc
+
+ end if ! up = 1 or up = upn (IF at lateral boundary and IF at surface or bed)
+
+ ! If in main body and at ice/ocean boundary, calculate depth-averaged stress
+ ! due to sea water, bc normal vector components should be boundary normal
+ loc2plusup = loc2(1,:) + up
+
+ ! for this bc, the normal vector components are not the sfc/bed slopes but are taken
+ ! from a normal to the shelf front in map view (x,y plane); slopex,slopey are simply renamed here
+ slopex = normal(1)
+ slopey = normal(2)
+
+ ! There are two options here for the source term associated with the boundary condition for
+ ! floating ice:
+ !
+ ! (1) use the 1d solution that involves the rate factor (not accurate for
+ ! 3d domains, but can be more robust and stable)
+ ! (2) use the more general solution that involves the eff. visc. and normal
+ ! vector orientation at lateral boundary
+ !
+ ! Only one of these options should be active at a time (i.e. comment the other lines out)
+ ! The default setting is (2), which is the more general case that should also work for 1d problems.
+
+ ! In some cases, the two options can be used together to improve performance, e.g. for the Ross
+ ! ice shelf experiment, a number of early iterations could use the more simple bc (option 1) and then
+ ! when the solution has converged a bit, we switch to the more realistic implementation (option 2).
+ ! This has the advantage of "conditioning" the eff. visc. in the source term a bit before turning
+ ! the source term dependence on the eff. visc. "on".
+
+ ! NOTE that the newer sfc, basal, and lateral bc subroutines keep the eff. visc. terms with the LHS
+ ! matrix coeffs. In this case, they do not have any affect on the source term for floating ice bcs
+ ! and the considerations in the above paragraph do not apply (w.r.t. adversely affecting the source term).
+
+! ! --------------------------------------------------------------------------------------
+! ! (1) source term (strain rate at shelf/ocean boundary) from Weertman's analytical solution
+! ! This is primarily of use for debugging purposes, e.g. when a 1d test case is run. Also useful
+! ! if one wants to turn "off" the eff. visc. dependence in the matrix coeffs. that go with this
+! ! boundary condition, since this form of it has no eff. visc. terms.
+! ! --------------------------------------------------------------------------------------
+! ! See eq. 2, Pattyn+, 2006, JGR v.111; eq. 8, Vieli&Payne, 2005, JGR v.110). Note that this
+! ! contains the 1d assumption that ice is not spreading lateraly !(assumes dv/dy = 0 for u along flow)
+!
+! source = abar * vis0 * ( 1.d0/4.d0 * rhoi * grav * stagthck(ew,ns)*thk0 * ( 1.d0 - rhoi/rhoo))**3.d0
+!
+! ! multiply by 4 so that case where v=0, du/dy = 0, LHS gives: du/dx = du/dx|_shelf
+! ! (i.e. LHS = 4*du/dx, requires 4*du/dx_shelf)
+! source = source * 4.d0
+!
+! ! split source based on the boundary normal orientation and non-dimensinoalize
+! ! Note that it is not really appropriate to apply option (1) to 2d flow, since terms other than du/dx in
+! ! eff. strain rate are ignored. For 2d flow, should use option (2) below.
+! source = source * normal(pt)
+! source = source * tim0 ! make source term non-dim
+! ! --------------------------------------------------------------------------------------
+
+ ! --------------------------------------------------------------------------------------
+ ! (2) source term (strain rate at shelf/ocean boundary) from MacAyeal depth-ave solution.
+ ! --------------------------------------------------------------------------------------
+
+ source = (rhoi*grav*stagthck(ew,ns)*thk0) / tau0 / 2.d0 * ( 1.d0 - rhoi / rhoo )
+
+ source = source * normal(pt) ! partition according to normal vector at lateral boundary
+ ! NOTE that source term is already non-dim here
+ ! --------------------------------------------------------------------------------------
+
+ ! get matrix coefficients that go with horiz normal stresses at a floating ice boundary
+ g = normhorizmainbc_lat(dew, dns, &
+ slopex, slopey, &
+ dsigmadew(up), dsigmadns(up), &
+ pt, 1, &
+ dup(up), local_efvs, &
+ oneorfour, fourorone, &
+ onesideddiff, &
+ normal, fwdorbwd, &
+ foew, fons )
+
+ ! NOTE that for lateral floating ice boundary, we assume u_sfc ~ u_bed and stress free bc
+ ! at both upper and lower sfc boundaries, so that there are no coeffs. for vert. shear stresses
+
+ ! put the coeff. for the b.c. equation in the same place as the prev. equation
+ ! (w.r.t. cols), on a new row ...
+
+!NOTE: is above comment correct or is this now just a normal scatter of coeffs. into the matrix?
+ call fillsprsebndy( g, loc2plusup(1), loc_latbc, up, normal, pt )
+
+
+ ! get matrix coefficients that go with the horiz shear stresses at a floating ice
+ ! boundary, multiply by their respective "other" velocity and put into RHS vector
+
+ ! NOTE that in the following expression, the "-" sign on the crosshoriz terms,
+ ! which results from moving them from the LHS over to the RHS, has been moved
+ ! inside of "croshorizmainbc_lat".
+ rhsd(loc2plusup(2)) = sum( croshorizmainbc_lat(dew, dns, &
+ slopex, slopey, &
+ dsigmadew(up), dsigmadns(up), &
+ pt, 1, &
+ dup(up), local_othervel, &
+ local_efvs, &
+ oneortwo, twoorone, &
+ onesideddiff, &
+ normal, fwdorbwd, &
+ foew, fons ) &
+ * local_othervel ) + source
+
+ else ! NOT at a lateral boundary
+
+! *********************************************************************************************
+! normal discretization for points inside of lateral boundary and inside main body of ice sheet
+
+ ! This if construct skips the normal discretization for the RHS and LHS for the sfc and basal indices
+ ! because these vertical levels are handled by different subroutines.
+ if( up /= upn .and. up /= 1 )then
+
+ g = normhorizmain(pt,up,local_efvs) ! normal stress grad coeffs
+
+ g(:,2,2) = g(:,2,2) + vertimain(hsum(local_efvs),up) ! add vert stress grad coeffs
+
+ ! NOTE that version of 'fillspremain' for one-sided bcs needs additional argument to specify a
+ ! column shift of coeffs. of rows in LHS matrix. That is the "0" past last here (no shift for internal bcs)
+ call fillsprsemain(g,loc2plusup(1),loc2(:,1),up,pt,0)
+
+ ! NOTE that in the following expression, the "-" sign on the crosshoriz terms,
+ ! which results from moving them from the LHS over to the RHS, is explicit and
+ ! hast NOT been moved inside of "croshorizmin" (as is the case for the analogous
+ ! boundary condition routines).
+ rhsd(loc2plusup(2)) = thisdusrfdx(ew,ns) - & ! shear stress grad coeffs into RHS vector
+ sum(croshorizmain(pt,up,local_efvs) * local_othervel)
+ end if
+
+ ! The follow two if constructs set the ghost cell storage to have ones on the martrix diag and zeros
+ ! on the rhs, enforcing a zero vel bc for the ghost cells. Eventually, the capacity allowing for ghost
+ ! cells can probably be removed but keeping here for now for backward compatibility.
+ if( up == upn )then
+ loc2plusup = loc2(1,:) + upn + 1 ! basal ghost cells
+ call valueset(0.d0, loc2plusup)
+ endif
+ if( up == 1 )then
+ loc2plusup = loc2(1,:) ! sfc ghost cells
+ call valueset(0.d0, loc2plusup)
+ endif
+
+ end if
+
+! *********************************************************************************************
+! higher-order sfc and bed boundary conditions in main body of ice sheet (NOT at lat. boundry)
+
+ if( ( up == upn .or. up == 1 ) .and. .not. lateralboundry) then
+
+ if( up == 1 )then ! specify necessary variables and flags for free sfc
+ bcflag = (/1,0/)
+ loc2plusup = loc2(1,:) + up - 1 ! reverse the sparse matrix / rhs vector row index by 1 ...
+ slopex = -dusrfdew(ew,ns); slopey = -dusrfdns(ew,ns); nz = 1.d0
+ else ! specify necessary variables and flags for basal bc
+
+ if( whichbabc == HO_BABC_NO_SLIP )then
+ bcflag = (/0,0/) ! flag for u=v=0 at bed; doesn't work well so commented out here...
+ ! better to specify very large value for beta below
+
+ elseif( whichbabc == HO_BABC_CONSTANT .or. whichbabc == HO_BABC_SIMPLE .or. &
+ whichbabc == HO_BABC_YIELD_PICARD .or. whichbabc == HO_BABC_BETA_BWAT .or. &
+ whichbabc == HO_BABC_LARGE_BETA .or. whichbabc == HO_BABC_EXTERNAL_BETA .or. &
+ whichbabc == HO_BABC_POWERLAW .or. whichbabc == HO_BABC_COULOMB_FRICTION) then
+ bcflag = (/1,1/) ! flag for specififed stress at bed: Tau_zx = beta * u_bed,
+ ! where beta is MacAyeal-type traction parameter
+ end if
+
+ loc2plusup = loc2(1,:) + up + 1 ! advance the sparse matrix / rhs row vector index by 1 ...
+ slopex = dlsrfdew(ew,ns); slopey = dlsrfdns(ew,ns); nz = -1.d0
+
+ end if
+
+ ! get matrix coefficients that go with normal stresses at sfc or basal boundary
+ g = normhorizmainbcos(dew, dns, &
+ slopex, slopey, &
+ dsigmadew(up), dsigmadns(up), &
+ pt, bcflag, &
+ dup(up), local_efvs, &
+ oneorfour, fourorone)
+
+ g_norm = g ! save these coeffs, as needed for basal traction calculation
+
+
+ ! get matrix coefficients that go with vertical stresses at sfc or basal boundary
+ g(:,2,2) = g(:,2,2) &
+ + vertimainbcos( stagthck(ew,ns),bcflag,dup(up),local_efvs, &
+ beta, g_vert, nz )
+
+ !! scale basal bc coeffs when using JFNK solver
+ scalebabc = scalebasalbc( g, bcflag, lateralboundry, beta, local_efvs )
+ g = g / scalebabc
+
+ loc2plusup = loc2(1,:) + up ! Need to reset this index since we want the bc on the actual row
+ ! coinciding with the boundary at up=1
+
+ ! Replace ghost cells w/ one-sided diffs at sfc/basal indices. This section shifts the LHS matrix coeffs for the sfc
+ ! and basal bcs back on to the main diagonal, as opposed to staggered off the diag, which was necessary for the ghost
+ ! cell implementation.
+ if( up == 1 .or. up == upn )then
+ if( up == 1 )then
+ call fillsprsemain(g,loc2plusup(1),loc2(:,1),up,pt,1)
+ else if( up == upn )then
+ call fillsprsemain(g,loc2plusup(1),loc2(:,1),up,pt,-1)
+ end if
+ end if
+
+ ! calc shear stress coeffs., multiply by other vel and move to RHS vector
+ rhsd(loc2plusup(2)) = sum( croshorizmainbcos(dew, dns, &
+ slopex, slopey, &
+ dsigmadew(up), dsigmadns(up), &
+ pt, bcflag, &
+ dup(up), local_othervel, &
+ local_efvs, &
+ oneortwo, twoorone, g_cros ) &
+ * local_othervel ) / scalebabc
+
+ ! The following calculates the basal traction AFTER an updated solution is obtain by passing the new
+ ! values of uvel, vvel back to the matrix assembly routines, and thus obtaining updated values of the
+ ! relevant coefficients. The if construct allows the assembly routines to be called for only the vert
+ ! layers that are needed to cacluate the basal traction (as opposed to all vert levels 1:upn).
+ if( assembly == 1 )then
+
+ g_vel_lhs = local_thisvel
+ g_vel_rhs = local_othervel
+
+!HALO - Since ew and ns are locally owned velocity points, we will have btraction at all such points.
+ btraction(pt,ew,ns) = sum( (g_norm+g_vert)*g_vel_lhs*thk0/len0 ) &
+ - sum( g_cros*g_vel_rhs*thk0/len0 )
+ end if
+
+ end if ! (up = 1 or up = upn) and lateralboundry = F
+
+ return
+
+end subroutine bodyset
+
+!***********************************************************************
+
+subroutine valueset(local_value, loc2plusup)
+
+ ! plugs given value into the right location in the rhs vector of matrix equation Ax=rhs
+
+ implicit none
+
+ real(dp), intent(in) :: local_value
+ integer, dimension(2), intent(in) :: loc2plusup
+
+ call putpcgc(1.d0,loc2plusup(1),loc2plusup(1))
+ rhsd(loc2plusup(2)) = local_value
+
+ return
+
+end subroutine valueset
+
+!***********************************************************************
+
+subroutine calccoeffsinit (upn, dew, dns)
+
+ ! determines constants used in various FD calculations associated with 'findcoefst'
+ ! In general, the constants contain (1) grid spacing info, (2) numeric constants
+ ! used for averaging of eff. visc. from normal grid in horiz onto stag grid in horiz.
+ implicit none
+
+ integer, intent(in) :: upn
+ real(dp), intent(in) :: dew, dns
+
+ ! this coefficient used in finite differences of vertical terms.
+ cvert(:) = (len0**2) / (4.d0 * thk0**2 * dup**2)
+
+ ! these coefficients used in finite differences of horizontal terms
+ ! for d/dx(fdu/dx), d/dx(fdu/dy), d/dsigma(fdu/dx), d/dx(fdu/dsigma) and
+ ! du/dsigma.
+ cdxdx = (/ 0.25d0 / dew**2, 0.25d0 / dns**2 /)
+ cdsdx(:,1) = 0.0625d0 / (dew * dup); cdsdx(:,2) = 0.0625d0 / (dns * dup);
+ cdsds = 0.25d0 / (dup * dup)
+ cds = 0.0625d0 / dup
+ cdxdy = 0.0625d0 / (dew * dns)
+
+ return
+
+end subroutine calccoeffsinit
+
+!***********************************************************************
+
+subroutine calccoeffs(upn, sigma, &
+ stagthck, &
+ dusrfdew, dusrfdns, &
+ dthckdew, dthckdns, &
+ d2usrfdew2, d2usrfdns2, d2usrfdewdns, &
+ d2thckdew2, d2thckdns2, d2thckdewdns)
+
+ ! Called from 'findcoefst' to find coefficients in stress balance equations
+ ! Detemines coeficients needed for finite differencing.
+ ! This is a column-based operation. In general these coefficients refer
+ ! to grid transformations and averaging of efvs to half grid points.
+
+ implicit none
+
+ integer, intent(in) :: upn
+ real(dp), dimension(:), intent(in) :: sigma
+ real(dp), intent(in) :: stagthck, dusrfdew, dusrfdns, dthckdew, dthckdns, &
+ d2usrfdew2, d2usrfdns2, d2usrfdewdns, &
+ d2thckdew2, d2thckdns2, d2thckdewdns
+
+ fvert(:) = cvert(:) / stagthck**2
+
+ dsigmadew = calcdsigmadx(upn, sigma, dusrfdew, dthckdew, stagthck)
+ dsigmadns = calcdsigmadx(upn, sigma, dusrfdns, dthckdns, stagthck)
+
+ d2sigmadew2 = calcd2sigmadxdy(upn, sigma, &
+ d2usrfdew2, d2thckdew2, &
+ dusrfdew, dusrfdew, &
+ dthckdew, dthckdew, &
+ stagthck)
+
+ d2sigmadns2 = calcd2sigmadxdy(upn, sigma, &
+ d2usrfdns2, d2thckdns2, &
+ dusrfdns, dusrfdns, &
+ dthckdns, dthckdns, &
+ stagthck)
+
+ d2sigmadewdns = calcd2sigmadxdy(upn, sigma, &
+ d2usrfdewdns, d2thckdewdns, &
+ dusrfdew, dusrfdns, &
+ dthckdew, dthckdns, &
+ stagthck)
+
+ d2sigmadewdsigma = calcd2sigmadxdsigma(dthckdew,stagthck)
+ d2sigmadnsdsigma = calcd2sigmadxdsigma(dthckdns,stagthck)
+
+ return
+
+end subroutine calccoeffs
+
+!***********************************************************************
+
+function calcdsigmadx(upn, sigma, &
+ dusrfdx, dthckdx, &
+ stagthck)
+
+ implicit none
+
+ integer, intent(in) :: upn
+ real(dp), dimension(:), intent(in) :: sigma
+ real(dp), intent(in) :: stagthck, dusrfdx, dthckdx
+ real(dp), dimension(upn) :: calcdsigmadx
+
+ calcdsigmadx = (dusrfdx - sigma * dthckdx) / stagthck
+
+ return
+
+end function calcdsigmadx
+
+!***********************************************************************
+
+function calcd2sigmadxdy(upn, sigma, &
+ d2usrfdxdy, d2thckdxdy, &
+ dusrfdx, dusrfdy, &
+ dthckdx, dthckdy, &
+ stagthck)
+
+ implicit none
+
+ integer, intent(in) :: upn
+ real(dp), dimension(:), intent(in) :: sigma
+ real(dp), intent(in) :: d2usrfdxdy, d2thckdxdy, dusrfdx, dusrfdy, &
+ dthckdx, dthckdy, stagthck
+ real(dp), dimension(upn) :: calcd2sigmadxdy
+
+ calcd2sigmadxdy = (stagthck * d2usrfdxdy - &
+ dusrfdx * dthckdy - dusrfdy * dthckdx + &
+ sigma * (2.d0 * dthckdx * dthckdy - &
+ stagthck * d2thckdxdy)) / stagthck**2
+
+ return
+
+end function calcd2sigmadxdy
+
+!***********************************************************************
+
+function calcd2sigmadxdsigma(dthckdx,stagthck)
+
+ implicit none
+
+ real(dp), intent(in) :: dthckdx, stagthck
+ real(dp) :: calcd2sigmadxdsigma
+
+ calcd2sigmadxdsigma = - dthckdx / stagthck
+
+ return
+
+end function calcd2sigmadxdsigma
+
+!***********************************************************************
+
+function vertimain(efvs,up)
+
+ implicit none
+
+ real(dp), dimension(2), intent(in) :: efvs
+
+ real(dp), dimension(3) :: vertimain
+
+ integer, intent(in) :: up
+
+ vertimain(3) = fvert(up) * efvs(2)
+ vertimain(1) = fvert(up) * efvs(1)
+ vertimain(2) = - vertimain(3) - vertimain(1)
+
+ return
+
+end function vertimain
+
+!***********************************************************************
+
+function normhorizmain(which,up,efvs)
+
+ ! Called from 'findcoefst' to calculate normal-stress grad terms
+ ! like: d/dx(f(du/dx)), d/dy(f(dv/dy)), etc.
+ ! ... calls FUNCTIONS: horiztermdxdx, horiztermdsdx, horiztermdxds,
+ ! horiztermdsds, horiztermds
+ ! determines coefficients from d/dx(fdu/dx) and d/dy(fdu/dy)
+
+ implicit none
+
+ integer, intent(in) :: which, up
+ real(dp), dimension(:,:,:), intent(in) :: efvs
+
+ real(dp), dimension(3,3,3) :: normhorizmain
+ real(dp), dimension(3,3,3) :: g, h
+ real(dp), dimension(2) :: sumefvsup, sumefvsew, sumefvsns
+ real(dp) :: sumefvs
+
+ g = 0.d0
+ h = 0.d0
+
+ sumefvsup = hsum(efvs)
+ sumefvsew = sum(sum(efvs,3),1)
+ sumefvsns = sum(sum(efvs,2),1)
+ sumefvs = sum(efvs)
+
+! for d(f.du/dx)/dx
+
+ g(2,:,2) = horiztermdxdx(sumefvsew,cdxdx(1))
+ g(:,1:3:2,2) = g(:,1:3:2,2) + horiztermdsdx(dsigmadew(up),sumefvsup,cdsdx(up,1))
+ g(1:3:2,:,2) = g(1:3:2,:,2) + horiztermdxds(dsigmadew(up),sumefvsew,cdsdx(up,1))
+ g(:,2,2) = g(:,2,2) + horiztermdsds(dsigmadew(up)**2,sumefvsup,cdsds(up))
+ g(1:3:2,2,2) = g(1:3:2,2,2) + horiztermds(d2sigmadew2(up)+d2sigmadewdsigma*dsigmadew(up),sumefvs,cds(up))
+
+! for d(f.du/dy)/dy
+
+ h(2,2,:) = horiztermdxdx(sumefvsns,cdxdx(2))
+ h(:,2,1:3:2) = h(:,2,1:3:2) + horiztermdsdx(dsigmadns(up),sumefvsup,cdsdx(up,2))
+ h(1:3:2,2,:) = h(1:3:2,2,:) + horiztermdxds(dsigmadns(up),sumefvsns,cdsdx(up,2))
+ h(:,2,2) = h(:,2,2) + horiztermdsds(dsigmadns(up)**2,sumefvsup,cdsds(up))
+ h(1:3:2,2,2) = h(1:3:2,2,2) + horiztermds(d2sigmadns2(up)+d2sigmadnsdsigma*dsigmadns(up),sumefvs,cds(up))
+
+ normhorizmain = g * fourorone(which) + h * oneorfour(which)
+
+ return
+
+end function normhorizmain
+
+!***********************************************************************
+
+function croshorizmain(which,up,efvs)
+
+ ! Called from 'findcoefst' to calculate cross-stress grad terms
+ ! like: d/dx(f(du/dy)), d/dy(f(dv/dx)), etc.
+ ! ... calls FUNCTIONS: horiztermdxdy, horiztermdsdx, horiztermdxds,
+ ! horiztermdsds, horiztermds
+ ! determines coefficients from d/dx(fdu/dy) and d/dy(fdu/dx)
+
+ implicit none
+
+ integer, intent(in) :: which, up
+ real(dp), dimension(:,:,:), intent(in) :: efvs
+
+ real(dp), dimension(3,3,3) :: croshorizmain
+ real(dp), dimension(3,3,3) :: g = 0.d0, h = 0.d0
+ real(dp), dimension(2) :: sumefvsup, sumefvsew, sumefvsns
+ real(dp) :: sumefvs
+
+ g = 0.d0
+ h = 0.d0
+
+ sumefvsup = hsum(efvs)
+ sumefvsew = sum(sum(efvs,3),1)
+ sumefvsns = sum(sum(efvs,2),1)
+ sumefvs = sum(efvs)
+
+! for d(f.du/dy)/dx
+
+ g(2,:,1:3:2) = horiztermdxdy(sumefvsew,cdxdy)
+ g(:,2,1:3:2) = g(:,2,1:3:2) + horiztermdsdx(dsigmadew(up),sumefvsup,cdsdx(up,2))
+ g(1:3:2,:,2) = g(1:3:2,:,2) + horiztermdxds(dsigmadns(up),sumefvsew,cdsdx(up,1))
+ g(:,2,2) = g(:,2,2) + horiztermdsds(dsigmadew(up)*dsigmadns(up),sumefvsup,cdsds(up))
+ g(1:3:2,2,2) = g(1:3:2,2,2) + horiztermds(d2sigmadewdns(up)+d2sigmadnsdsigma*dsigmadew(up),sumefvs,cds(up))
+
+! for d(f.du/dx)/dy
+
+ h(2,1:3:2,:) = transpose(horiztermdxdy(sumefvsns,cdxdy))
+ h(:,1:3:2,2) = h(:,1:3:2,2) + horiztermdsdx(dsigmadns(up),sumefvsup,cdsdx(up,1))
+ h(1:3:2,2,:) = h(1:3:2,2,:) + horiztermdxds(dsigmadew(up),sumefvsns,cdsdx(up,2))
+ h(:,2,2) = h(:,2,2) + horiztermdsds(dsigmadew(up)*dsigmadns(up),sumefvsup,cdsds(up))
+ h(1:3:2,2,2) = h(1:3:2,2,2) + horiztermds(d2sigmadewdns(up)+d2sigmadewdsigma*dsigmadns(up),sumefvs,cds(up))
+
+ croshorizmain = g * twoorone(which) + h * oneortwo(which)
+
+ return
+
+end function croshorizmain
+
+!***********************************************************************
+
+! ***************************************************************************
+! start of functions to deal with higher-order boundary conditions at sfc and bed
+! ***************************************************************************
+
+function vertimainbc(thck, bcflag, dup, efvs, beta, g_vert, nz )
+
+! altered form of 'vertimain' that calculates coefficients for higher-order
+! b.c. that go with the 'normhorizmain' term: -(X/H)^2 * dsigma/dzhat * du/dsigma
+
+ implicit none
+
+ real(dp), intent(in) :: dup, thck, beta
+ real(dp), intent(in) :: nz ! sfc normal vect comp in z-dir
+ real(dp), intent(in), dimension(2,2,2) :: efvs
+ real(dp), intent(out), dimension(3,3,3) :: g_vert
+ integer, intent(in), dimension(2) :: bcflag
+
+ real(dp) :: c
+ real(dp), dimension(3) :: vertimainbc
+
+ c = 0.d0
+ g_vert = 0.d0
+
+ ! for higher-order FREE SURFACE B.C. for x ('which'=1) or y ('which'=2) direction ...
+ if( bcflag(1) == 1 )then
+
+ c = nz / thck / (2.d0*dup) * (len0**2 / thk0**2) ! value of coefficient
+
+ vertimainbc(:) = 0.d0
+ vertimainbc(3) = -c
+ vertimainbc(1) = c
+ vertimainbc(2) = vertimainbc(3) + vertimainbc(1) ! should = 0
+
+ ! this is the part of the vertimain coeff. block that we want to keep for calc
+ ! of boundary tractions (note that it DOES NOT include terms from boundary forcing)
+ g_vert(:,2,2) = vertimainbc
+
+ ! for higher-order BASAL B.C. w/ specified basal traction, add on the necessary source term ...
+ if( bcflag(2) == 1 )then
+
+ ! last set of terms is mean visc. of ice nearest to the bed
+ vertimainbc(2) = vertimainbc(2) &
+ + ( beta / ( sum( efvs(2,:,:) ) / 4.d0 ) ) * (len0 / thk0)
+ end if
+
+ ! for higher-order BASAL B.C. U=V=0, in x ('which'=1) or y ('which'=2) direction ...
+ ! NOTE that this is not often implemented, as it is generally sufficient to implement
+ ! an "almost" no slip BC by just making the coeff. for beta very large (and the
+ ! the code converges more quickly/stably in this case than for actual no-slip).
+ else if( bcflag(1) == 0 )then
+
+ ! if u,v set to 0, there are no coeff. assoc. with du/digma terms ...
+ vertimainbc(:) = 0.d0
+
+ end if
+
+ return
+
+end function vertimainbc
+
+!***********************************************************************
+
+function vertimainbcos(thck, bcflag, dup, efvs, beta, g_vert, nz )
+
+! altered form of 'vertimain' that calculates coefficients for higher-order
+! b.c. that go with the 'normhorizmain' term: -(X/H)^2 * dsigma/dzhat * du/dsigma
+
+ implicit none
+
+ real (dp), intent(in) :: dup, thck, beta
+ real (dp), intent(in) :: nz ! sfc normal vect comp in z-dir
+ real (dp), intent(in), dimension(2,2,2) :: efvs
+ real (dp), intent(out), dimension(3,3,3) :: g_vert
+ integer, intent(in), dimension(2) :: bcflag
+
+ real (dp) :: c
+ real (dp), dimension(3) :: vertimainbcos
+ real (dp) :: bar_sfc, bar_bed, efvsbar_bed, efvsbar_sfc
+
+ ! averaging number for eff. visc. at domain edges
+ bar_sfc = sum( (efvs(1,:,:)/efvs(1,:,:)), efvs(1,:,:) > effstrminsq )
+ bar_bed = sum( (efvs(2,:,:)/efvs(2,:,:)), efvs(2,:,:) > effstrminsq )
+
+ ! average visc. to use in coeff. calc.
+ efvsbar_sfc = sum( efvs(1,:,:), efvs(1,:,:) > effstrminsq ) / bar_sfc
+ efvsbar_bed = sum( efvs(2,:,:), efvs(2,:,:) > effstrminsq ) / bar_bed
+
+ ! make the following lines active to turn OFF the visc. dependence in the LHS matrix coeffs.
+ !efvsbar_sfc = 1.0d0; efvsbar_bed = 1.0d0
+
+ c = 0.d0
+ g_vert = 0.d0
+
+ ! for higher-order FREE SURFACE B.C. for x ('which'=1) or y ('which'=2) direction ...
+ if( bcflag(1) == 1 .and. bcflag(2) == 0 )then
+
+ c = nz / thck / (2.d0*dup) * (len0**2 / thk0**2) * efvsbar_sfc ! value of coefficient
+
+ vertimainbcos(:) = 0.d0
+ vertimainbcos(1) = 3.d0*c
+ vertimainbcos(2) = -4.d0*c
+ vertimainbcos(3) = c
+
+ ! this is the part of the vertimain coeff. block that we want to keep for calc
+ ! of boundary tractions (note that it DOES NOT include terms from boundary forcing)
+ g_vert(:,2,2) = vertimainbcos
+
+ end if
+
+ ! for higher-order BASAL B.C. w/ specified basal traction, add on the necessary source term ...
+ if( bcflag(1) == 1 .and. bcflag(2) == 1 )then
+
+ c = nz / thck / (2.d0*dup) * (len0**2 / thk0**2) * efvsbar_bed ! value of coefficient
+
+ vertimainbcos(:) = 0.d0
+ vertimainbcos(1) = -1.d0*c
+ vertimainbcos(2) = 4.d0*c
+ vertimainbcos(3) = -3.d0*c
+
+ ! this is the part of the vertimain coeff. block that we want to keep for calc
+ ! of boundary tractions (note that it DOES NOT include terms from boundary forcing)
+ ! NOTE that here we do this BEFORE adding in the sliding coefficient, as in the standard
+ ! expression for the BC, this term is on the RHS.
+ g_vert(:,2,2) = vertimainbcos
+
+ ! this is the part of the vertimain coeff. block that we want to keep for calc
+ ! of boundary tractions (note that it DOES NOT include terms from boundary forcing)
+
+ ! last set of terms is mean visc. of ice nearest to the bed
+! vertimainbcos(3) = vertimainbcos(3) &
+! + ( beta / efvsbar_bed ) * (len0 / thk0)
+ vertimainbcos(3) = vertimainbcos(3) &
+ + ( beta ) * (len0 / thk0)
+
+ end if
+
+ ! for higher-order BASAL B.C. U=V=0, in x ('which'=1) or y ('which'=2) direction ...
+ ! NOTE that this is not often implemented, as it is generally sufficient to implement
+ ! an "almost" no slip BC by just making the coeff. for beta very large (and the
+ ! the code converges more quickly/stably in this case than for actual no-slip).
+ if( bcflag(1) == 0 )then
+
+ ! if u,v set to 0, there are no coeff. assoc. with du/digma terms ...
+ vertimainbcos(:) = 0.d0
+
+ end if
+
+ return
+
+end function vertimainbcos
+
+!***********************************************************************
+
+function normhorizmainbcos(dew, dns, &
+ dusrfdew, dusrfdns, &
+ dsigmadew, dsigmadns, &
+ which, bcflag, &
+ dup, efvs, &
+ oneorfour, fourorone)
+
+ ! Determines higher-order surface and basal boundary conditions for LHS of equation.
+ ! Gives 3x3x3 coeff. array for either u or v component of velocity, depending on the
+ ! value of the flag 'which'. Example of function call:
+ !
+ ! g = normhorizmainbc(dusrfew(ew,ns),dusrfnx(ew,ns),dsigmadew(up),dsigmadns(up),which,up,bcflag)
+ !
+ ! ... where g is a 3x3x3 array.
+ !
+ ! 'bcflag' is a 1 x 2 vector to indicate (1) which b.c. is being solved for (surface or bed) and
+ ! (2), if solving for the bed b.c., which type of b.c. to use. For example, bcflag = [ 0, 0 ]
+ ! denotes free sfc bc; bcflag = [ 1, 0 ] denotes basal bc w/ u=v=0, etc. (see also subroutine
+ ! "bodyset"). "fourorone" and "oneorfour" are given by vectors: fourorone = [ 4 1 ]; oneorfour = [ 1 4 ].
+ ! A single value is chosen from each vector and applied to the calculation of coefficients below.
+ ! The "correct" value needed to satisfy the expression is chosen based on the "which" flag, which
+ ! takes on a value of 1 for calculations in the x direction and a value of 2 for calculations in
+ ! the y direction.
+
+ implicit none
+
+ real (kind = dp), intent(in) :: dew, dns
+ real (kind = dp), intent(in) :: dusrfdew, dusrfdns, dsigmadew, dsigmadns, dup
+ real (kind = dp), intent(in), dimension(2) :: oneorfour, fourorone
+ real (kind = dp), dimension(3,3,3) :: normhorizmainbcos
+ real (kind = dp), dimension(3,3,3) :: g
+ real (kind = dp) :: c
+
+ integer, intent(in) :: which
+ integer, intent(in), dimension(2) :: bcflag
+ real (kind = dp), intent(in), dimension(2,2,2) :: efvs
+
+ c = 0.d0
+ g(:,:,:) = 0.d0
+
+ ! for higher-order FREE SURFACE B.C. for x ('which'=1) or y ('which'=2) direction ...
+ ! NOTE that this handles the case for specified stress at the bed as well, as we
+ ! simply pass in a different value for the normal vector (slope) components (still
+ ! called "dusrfdns", "dusrfdew" here, but args passed in are different).
+ if( bcflag(1) == 1 .and. bcflag(2) == 0 )then
+
+
+ ! first, coeff. that go with du/dsigma, and thus are associated
+ ! with u(1,2,2) and u(3,2,2) ...
+! c = ( fourorone(which) * dusrfdew * dsigmadew &
+! + oneorfour(which) * dusrfdns * dsigmadns )/(2.d0*dup)
+ c = ( fourorone(which) * dusrfdew * dsigmadew &
+ + oneorfour(which) * dusrfdns * dsigmadns )/(2.d0*dup) * ( sum( efvs(1,:,:) ) / 4.d0 )
+
+ g(1,2,2) = 3.d0*c
+ g(2,2,2) = -4.d0*c
+ g(3,2,2) = c
+
+ ! next, coeff. that go with du/dxhat and du/dyhat terms ...
+! c = fourorone(which) * dusrfdew / (2*dew)
+ c = fourorone(which) * dusrfdew / (2*dew) * ( sum( efvs(1,:,:) ) / 4.d0 )
+ g(1,3,2) = c
+ g(1,1,2) = -c
+
+! c = oneorfour(which) * dusrfdns / (2*dns)
+ c = oneorfour(which) * dusrfdns / (2*dns) * ( sum( efvs(1,:,:) ) / 4.d0 )
+ g(1,2,3) = c
+ g(1,2,1) = -c
+
+ end if
+
+ ! higher-order, specified traction basal bc, must use fwd rather than bwd one-sided
+ ! diff in vertical direction
+ if( bcflag(1) == 1 .and. bcflag(2) == 1 )then
+
+ ! first, coeff. that go with du/dsigma, and thus are associated
+ ! with u(1,2,2) and u(3,2,2) ...
+! c = ( fourorone(which) * dusrfdew * dsigmadew &
+! + oneorfour(which) * dusrfdns * dsigmadns )/(2*dup)
+ c = ( fourorone(which) * dusrfdew * dsigmadew &
+ + oneorfour(which) * dusrfdns * dsigmadns )/(2*dup) * ( sum( efvs(2,:,:) ) / 4.d0 )
+
+ g(1,2,2) = -1.d0*c
+ g(2,2,2) = 4.d0*c
+ g(3,2,2) = -3.d0*c
+
+ ! next, coeff. that go with du/dxhat and du/dyhat terms ...
+! c = fourorone(which) * dusrfdew / (2*dew)
+ c = fourorone(which) * dusrfdew / (2.d0*dew) * ( sum( efvs(2,:,:) ) / 4.d0 )
+ g(3,3,2) = c
+ g(3,1,2) = -c
+
+! c = oneorfour(which) * dusrfdns / (2*dns)
+ c = oneorfour(which) * dusrfdns / (2.d0*dns) * ( sum( efvs(2,:,:) ) / 4.d0 )
+ g(3,2,3) = c
+ g(3,2,1) = -c
+
+ end if
+
+ ! for higher-order BASAL B.C. U=V=0, in x ('which'=1) or y ('which'=2) direction ...
+ ! note that this requires that rhs(up) be set to 0 as well ...
+ if( bcflag(1) == 0 )then
+
+ g(:,:,:) = 0.d0
+ g(2,2,2) = 1.d0;
+
+ end if
+
+ normhorizmainbcos = g
+
+ return
+
+end function normhorizmainbcos
+
+!***********************************************************************
+
+function croshorizmainbcos(dew, dns, &
+ dusrfdew, dusrfdns, &
+ dsigmadew, dsigmadns, &
+ which, bcflag, &
+ dup, local_othervel, &
+ efvs, &
+ oneortwo, twoorone, &
+ g_cros, velbc )
+
+ ! As described for "normhorizmainbc" above. The vectors "twoorone" and
+ ! "oneortwo" are given by: twoorone = [ 2 1 ]; oneortwo = [ 1 2 ];
+
+ implicit none
+
+ integer, intent(in) :: which
+ integer, intent(in), dimension(:) :: bcflag
+
+ real (kind = dp), intent(in) :: dew, dns
+ real (kind = dp), intent(in), dimension(:) :: oneortwo, twoorone
+ real (kind = dp), intent(in) :: dusrfdew, dusrfdns, dsigmadew, dsigmadns, dup
+ real (kind = dp), intent(in), dimension(:,:,:) :: local_othervel
+ real (kind = dp), intent(in), dimension(:,:,:) :: efvs
+ real (kind = dp), intent(in), optional :: velbc
+ real (kind = dp), intent(out),dimension(:,:,:) :: g_cros
+
+
+ real (kind = dp), dimension(3,3,3) :: g, croshorizmainbcos
+ real (kind = dp) :: c
+ integer :: nz
+
+ c = 0.d0
+ g(:,:,:) = 0.d0
+ g_cros = g
+ nz = 0
+
+ ! for higher-order FREE SURFACE B.C. for x ('which'=1) or y ('which'=2) direction ...
+ ! NOTE that this handles the case for specified stress at the bed as well, as we
+ ! simply pass in a different value for the normal vector (slope) components (still
+ ! called "dusrfdns", "dusrfdew" here, but args passed in are different).
+ if( bcflag(1) == 1 .and. bcflag(2) == 0 )then
+
+ ! first, coeff. that go with du/dsigma, and thus are associated
+ ! with u(1,2,2) and u(3,2,2) ...
+! c = ( - twoorone(which) * dusrfdew * dsigmadns &
+! - oneortwo(which) * dusrfdns * dsigmadew )/(2.d0*dup)
+ c = ( - twoorone(which) * dusrfdew * dsigmadns &
+ - oneortwo(which) * dusrfdns * dsigmadew )/(2.d0*dup) * ( sum( efvs(1,:,:) ) / 4.d0 )
+
+ g(1,2,2) = 3.d0*c
+ g(2,2,2) = -4.d0*c
+ g(3,2,2) = c
+
+ ! next, coeff. that go with du/dxhat and du/dyhat terms ...
+! c = - oneortwo(which) * dusrfdns / (2*dew)
+ c = - oneortwo(which) * dusrfdns / (2.d0*dew) * ( sum( efvs(1,:,:) ) / 4.d0 )
+ g(1,3,2) = c
+ g(1,1,2) = -c
+
+! c = - twoorone(which) * dusrfdew / (2*dns)
+ c = - twoorone(which) * dusrfdew / (2.d0*dns) * ( sum( efvs(1,:,:) ) / 4.d0 )
+ g(1,2,3) = c
+ g(1,2,1) = -c
+
+ end if
+
+ ! higher-order, specified traction basal bc, must use fwd rather than bwd one-sided
+ ! diff in vertical direction
+ if( bcflag(1) == 1 .and. bcflag(2) == 1 )then
+
+ ! first, coeff. that go with du/dsigma, and thus are associated
+ ! with u(1,2,2) and u(3,2,2) ...
+! c = ( - twoorone(which) * dusrfdew * dsigmadns &
+! - oneortwo(which) * dusrfdns * dsigmadew )/(2*dup)
+ c = ( - twoorone(which) * dusrfdew * dsigmadns &
+ - oneortwo(which) * dusrfdns * dsigmadew )/(2.d0*dup) * ( sum( efvs(2,:,:) ) / 4.d0 )
+
+ g(1,2,2) = -1.d0*c
+ g(2,2,2) = 4.d0*c
+ g(3,2,2) = -3.d0*c
+
+ ! next, coeff. that go with du/dxhat and du/dyhat terms ...
+! c = - oneortwo(which) * dusrfdns / (2*dew)
+ c = - oneortwo(which) * dusrfdns / (2.d0*dew) * ( sum( efvs(2,:,:) ) / 4.d0 )
+ g(3,3,2) = c
+ g(3,1,2) = -c
+
+
+! c = - twoorone(which) * dusrfdew / (2*dns)
+ c = - twoorone(which) * dusrfdew / (2.d0*dns) * ( sum( efvs(2,:,:) ) / 4.d0 )
+ g(3,2,3) = c
+ g(3,2,1) = -c
+
+ end if
+
+ ! for higher-order BASAL B.C. U=V=0, in x ('which'=1) or y ('which'=2) direction ...
+ ! This forces the multiplication by 'local_otherval' in the main program
+ ! to result in a value of 1, thus leaving the boundary vel. unchanged
+ ! ... conditional makes sure there is no div by zero if the bc value IS also zero
+ if( bcflag(1) == 0 )then
+
+ g(:,:,:) = 0.d0
+
+ where( local_othervel /= 0.d0 )
+ g = 1.d0
+ elsewhere
+ g = 0.d0
+ endwhere
+
+ nz = sum( g )
+ g(:,:,:) = 0.d0
+
+ where( local_othervel /= 0.d0 )
+ g = ( velbc / nz ) / local_othervel
+ elsewhere
+ g = 0.d0
+ endwhere
+
+ end if
+
+ ! NOTE: here we define 'g_cros' FIRST, because we want the value w/o the plastic
+ ! bed coeff. included (needed for estimate of basal traction in plastic bed iteration)
+ g_cros = g
+
+ croshorizmainbcos = g
+
+ return
+
+end function croshorizmainbcos
+
+!***********************************************************************
+
+function normhorizmainbc_lat(dew, dns, &
+ dusrfdew, dusrfdns, &
+ dsigmadew, dsigmadns, &
+ which, what, &
+ dup, efvs, &
+ oneorfour, fourorone, &
+ onesideddiff, &
+ normal, fwdorbwd, &
+ foew, fons )
+
+ ! Analogous to "normhorizmainbc" but for the case of lateral stress (ice shelf)
+ ! boundary conditions. Note that the basic form of the equations is the same.
+ ! What changes here is (1) the value of the normal vector that is passed in (at
+ ! the sfc and bed we pass in the surface or basal slopes, while at the boundaries
+ ! we use the normal vector orientation to the boundary in map view) and (2) we to
+ ! to use one sided diffs at the lateral boundaries rather than centerd diffs.
+
+ ! Note that we assume here that du/dz (and thus du/dsigma) is approx. 0 for an ice
+ ! shelf, and also that the sfc/basal slopes of an ice shelf are very flat at/near
+ ! the boundary. Thus, we assume flow is depth independent and we ignore gradients
+ ! in sigma.
+
+ implicit none
+
+ real(dp), intent(in) :: dew, dns
+ real(dp), intent(in) :: dusrfdew, dusrfdns, dsigmadew, dsigmadns, dup
+ real(dp), intent(in), dimension(2) :: oneorfour, fourorone, normal, fwdorbwd
+ real(dp), intent(in), dimension(3) :: onesideddiff
+ real (kind = dp), intent(in), dimension(:,:,:) :: efvs
+
+ integer, intent(in) :: which, what
+
+ logical, intent(in) :: fons, foew ! true when geom. requires 1st-order one sided diffs for shelf bcs
+
+ real(dp), dimension(3,3,3) :: normhorizmainbc_lat
+ real(dp), dimension(3,3,3) :: g
+ real(dp), dimension(2) :: whichbc
+ real(dp) :: c
+ real (kind = dp) :: bar, efvsbar
+
+ c = 0.d0; g(:,:,:) = 0.d0; whichbc = (/ 0.d0, 1.d0 /)
+
+ ! averaging number for eff. visc. at domain edges
+ bar = sum( (efvs(:,:,:)/efvs(:,:,:)), efvs(:,:,:) > effstrminsq )
+
+ ! average visc. to use in coeff. calc.
+ efvsbar = sum( efvs(:,:,:), efvs(:,:,:) > effstrminsq ) / bar
+
+ ! make the following lines active to turn OFF the visc. dependence in the LHS matrix coeffs.
+ !efvsbar = 1.0d0;
+
+ ! for higher-order FREE SURFACE B.C. for x ('which'=1) or y ('which'=2) direction ...
+ ! (also applies to basal stress bc)
+
+ ! first, coeff. that go with du/dsigma, and thus are associated with u(1,2,2) and u(3,2,2) ...
+ ! ...note that these are stored in an empty column of 'g' (a corner column) so that we don't
+ ! overwrite these values in the case of fwd/bwd horiz. diffs., which require 3 spaces
+ c = ( fourorone(which) * dusrfdew * dsigmadew &
+ + oneorfour(which) * dusrfdns * dsigmadns )/(2*dup) * efvsbar
+ g(3,3,3) = -c * whichbc(what)
+ g(1,3,3) = c * whichbc(what)
+
+ if( normal(1) == 0.d0 )then ! centered in x ...
+
+ c = fourorone(which) * dusrfdew / (2*dew) * efvsbar
+ g(2,3,2) = c
+ g(2,1,2) = -c
+
+ elseif( normal(1) /= 0.d0 )then ! forward/backward in x ...
+
+ if( foew )then
+ c = -1.d0 * fwdorbwd(1) * fourorone(which) * dusrfdew / dew * efvsbar
+ else
+ c = fourorone(which) * fwdorbwd(1) * onesideddiff(1) * dusrfdew / (2.d0*dew) * efvsbar
+ endif
+ g(2,2-int(fwdorbwd(1)),2) = c
+
+ if( foew )then
+ c = fwdorbwd(1)*fourorone(which) * dusrfdew / dew * efvsbar
+ else
+ c = fourorone(which) * fwdorbwd(1) * onesideddiff(2) * dusrfdew / (2.d0*dew) * efvsbar
+ endif
+ g(2,2,2) = c
+
+ if( foew )then
+ c = 0.d0
+ else
+ c = fourorone(which) * fwdorbwd(1) * onesideddiff(3) * dusrfdew / (2.d0*dew) * efvsbar
+ endif
+ g(2,2+int(fwdorbwd(1)),2) = c
+
+ end if
+
+ if( normal(2) == 0.d0 ) then ! centered in y ...
+ ! (NOTE that y coeff. are stored in g(1,:,:) )
+
+ c = oneorfour(which) * dusrfdns / (2*dns) * efvsbar
+ g(1,2,3) = c
+ g(1,2,1) = -c
+
+ elseif( normal(2) /= 0.d0) then ! forward/backward in y ...
+
+ if( fons )then
+ c = -1.d0 * fwdorbwd(2) * oneorfour(which) * dusrfdns / dns * efvsbar
+ else
+ c = oneorfour(which) * fwdorbwd(2) * onesideddiff(1) * dusrfdns / (2.d0*dns) * efvsbar
+ endif
+ g(1,2,2-int(fwdorbwd(2))) = c
+
+ if( fons )then
+ c = fwdorbwd(2)*oneorfour(which) * dusrfdns / dns * efvsbar
+ else
+ c = oneorfour(which) * fwdorbwd(2) * onesideddiff(2) * dusrfdns / (2.d0*dns) * efvsbar
+ endif
+ g(1,2,2) = c
+
+ if( fons )then
+ c = 0.d0
+ else
+ c = oneorfour(which) * fwdorbwd(2) * onesideddiff(3) * dusrfdns / (2.d0*dns) * efvsbar
+ endif
+ g(1,2,2+int(fwdorbwd(2))) = c
+
+ end if
+
+ normhorizmainbc_lat = g
+
+ return
+
+end function normhorizmainbc_lat
+
+!***********************************************************************
+
+function croshorizmainbc_lat (dew, dns, &
+ dusrfdew, dusrfdns, &
+ dsigmadew, dsigmadns, &
+ which, what, &
+ dup, local_othervel, &
+ efvs, &
+ oneortwo, twoorone, &
+ onesideddiff, &
+ normal, fwdorbwd, &
+ foew, fons )
+
+ ! Analagous to "normhorizmainbc_lat" but for cross terms. See notes above.
+
+ implicit none
+
+ real(dp), intent(in) :: dew, dns
+ real(dp), intent(in), dimension(2) :: oneortwo, twoorone, fwdorbwd, normal
+ real(dp), intent(in), dimension(3) :: onesideddiff
+ real(dp), intent(in) :: dusrfdew, dusrfdns, dsigmadew, dsigmadns, dup
+ real(dp), intent(in), dimension(3,3,3) :: local_othervel
+ real (kind = dp), intent(in), dimension(:,:,:) :: efvs
+
+ integer, intent(in) :: which, what
+
+ real(dp), dimension(3,3,3) :: g, croshorizmainbc_lat
+ real(dp), dimension(3) :: gvert
+ real(dp), dimension(2) :: whichbc
+ real(dp) :: c
+
+ integer, dimension(2) :: inormal
+
+ logical, intent(in) :: fons, foew ! true when geom. requires 1st-order one sided diffs for shelf bcs
+
+ real (kind = dp) :: bar, efvsbar
+
+ ! averaging number for eff. visc. at domain edges
+ bar = sum( (efvs(:,:,:)/efvs(:,:,:)), efvs(:,:,:) > effstrminsq )
+
+ ! average visc. to use in coeff. calc.
+ efvsbar = sum( efvs(:,:,:), efvs(:,:,:) > effstrminsq ) / bar
+
+ ! make the following lines active to turn OFF the visc. dependence in the LHS matrix coeffs.
+! efvsbar = 1.0d0;
+
+ c = 0.d0
+ g(:,:,:) = 0.d0
+ gvert = 0.d0
+ whichbc = (/ 0.d0, 1.d0 /)
+ croshorizmainbc_lat = 0.d0
+
+ ! first, coeff. that go with du/dsigma, and thus are associated with u(1,2,2) and u(3,2,2)
+ ! ... note that these are stored in a separate vector (to avoid being overwritten if stored in normal 'g')
+ c = ( - twoorone(which) * dusrfdew * dsigmadns &
+ - oneortwo(which) * dusrfdns * dsigmadew )/(2.d0*dup) * efvsbar
+ gvert(3) = -c * whichbc(what)
+ gvert(1) = c * whichbc(what)
+
+ if( normal(1) == 0.d0 )then ! centered in x ...
+
+ c = -oneortwo(which) * dusrfdns / (2.d0*dew) * efvsbar
+ g(2,3,2) = c
+ g(2,1,2) = -c
+
+ elseif( normal(1) /= 0.d0 )then ! forward/backward in x ...
+ ! (NOTE that x coeff. are stored in g(2,:,:) )
+
+ if( foew )then
+ c = oneortwo(which) * fwdorbwd(1) * dusrfdns / dew * efvsbar
+ else
+ c = -oneortwo(which) * fwdorbwd(1) * onesideddiff(1) * dusrfdns / (2.d0*dew) * efvsbar
+ endif
+ g(2,2-int(fwdorbwd(1)),2) = c
+
+ if( foew )then
+ c = -oneortwo(which) * fwdorbwd(1) * dusrfdns / dew * efvsbar
+ else
+ c = -oneortwo(which) * fwdorbwd(1) * onesideddiff(2) * dusrfdns / (2.d0*dew) * efvsbar
+ endif
+ g(2,2,2) = c
+
+ if( foew )then
+ c = 0.d0
+ else
+ c = -oneortwo(which) * fwdorbwd(1) * onesideddiff(3) * dusrfdns / (2.d0*dew) * efvsbar
+ endif
+ g(2,2+int(fwdorbwd(1)),2) = c
+
+ end if
+
+ if( normal(2) == 0.d0 )then ! centered in y ...
+ ! (NOTE that y coeff. are stored in g(1,:,:) )
+
+ c = -twoorone(which) * dusrfdew / (2.d0*dns) * efvsbar
+ g(1,2,3) = c
+ g(1,2,1) = -c
+
+ elseif( normal(2) /= 0.d0 )then ! forward/backward in y ...
+
+ if( fons )then
+ c = twoorone(which) * fwdorbwd(2) * dusrfdew / dns * efvsbar
+ else
+ c = -twoorone(which) * fwdorbwd(2) * onesideddiff(1) * dusrfdew / (2.d0*dns) * efvsbar
+ endif
+ g(1,2,2-int(fwdorbwd(2))) = c
+
+ if( fons )then
+ c = -twoorone(which) * fwdorbwd(2) * dusrfdew / dns * efvsbar
+ else
+ c = -twoorone(which) * fwdorbwd(2) * onesideddiff(2) * dusrfdew / (2.d0*dns) * efvsbar
+ endif
+ g(1,2,2) = c
+
+ if( fons )then
+ c = 0.d0
+ else
+ c = -twoorone(which) * fwdorbwd(2) * onesideddiff(3) * dusrfdew / (2.d0*dns) * efvsbar
+ endif
+ g(1,2,2+int(fwdorbwd(2))) = c
+
+ end if
+
+ ! Now rearrange position of coefficients in structure 'g' so that they are multiplied by
+ ! the correct velocity component of 'local_othervel' in 'bodyset' ...
+ ! ... this can be done by using the boundary normal vector to shift the indices of the rows/columns
+ ! in 'g', in the appropriate direction. First, convert the boundary normal to an integer index ...
+ inormal(1) = int( normal(1)/abs(normal(1)) )
+ inormal(2) = int( normal(2)/abs(normal(2)) )
+ if( abs( inormal(1) ) /= 1 )then; inormal(1) = 0; end if
+ if( abs( inormal(2) ) /= 1 )then; inormal(2) = 0; end if
+
+ croshorizmainbc_lat(2,:,2+inormal(2)) = g(2,:,2) ! move x-coeffs. appropriate amount
+ croshorizmainbc_lat(1,2+inormal(1),:) = g(1,2,:) ! move y-coeffs. appropriate amount
+
+ ! sum coeffs. that are in same column and flatten so that all coeff. are on level (2,:,:)
+ croshorizmainbc_lat(2,:,:) = croshorizmainbc_lat(2,:,:) + croshorizmainbc_lat(1,:,:)
+
+ ! set remaining coeff. on this level to to 0 ...
+ croshorizmainbc_lat(1,:,:) = 0.d0
+
+ ! accounter for vertical terms stored seperately and temporarily in 'gvert'
+ croshorizmainbc_lat(1,2+inormal(1),2+inormal(2)) = gvert(1) * whichbc(what)
+ croshorizmainbc_lat(3,2+inormal(1),2+inormal(2)) = gvert(3) * whichbc(what)
+
+ return
+
+end function croshorizmainbc_lat
+
+!***********************************************************************
+
+! ---> the following routines are for derivatives in the main body
+
+function horiztermdxdx(efvs,fact)
+
+ ! this is the d/dx(f.du/dx) and d/dy(f.du/dy) terms
+
+ implicit none
+
+ real(dp), dimension(2), intent(in) :: efvs
+ real(dp), intent(in) :: fact
+
+ real(dp), dimension(3) :: horiztermdxdx
+
+ horiztermdxdx(3) = efvs(2) * fact
+ horiztermdxdx(1) = efvs(1) * fact
+ horiztermdxdx(2) = - horiztermdxdx(3) - horiztermdxdx(1)
+
+ return
+
+end function horiztermdxdx
+
+!***********************************************************************
+
+function horiztermdxdy(efvs,fact)
+
+ ! this is the d/dy(f.du/dx) and d/dx(f.du/dy) terms
+
+ implicit none
+
+ real(dp), dimension(2), intent(in) :: efvs
+ real(dp), intent(in) :: fact
+
+ real(dp), dimension(3,2) :: horiztermdxdy
+
+ horiztermdxdy(3,2) = efvs(2) * fact
+ horiztermdxdy(2,2) = horiztermdxdy(3,2)
+ horiztermdxdy(3,1) = - horiztermdxdy(3,2)
+ horiztermdxdy(2,1) = - horiztermdxdy(3,2)
+
+ horiztermdxdy(1,2) = - efvs(1) * fact
+ horiztermdxdy(2,2) = horiztermdxdy(2,2) + horiztermdxdy(1,2)
+ horiztermdxdy(2,1) = horiztermdxdy(2,1) - horiztermdxdy(1,2)
+ horiztermdxdy(1,1) = - horiztermdxdy(1,2)
+
+ return
+
+end function horiztermdxdy
+
+!***********************************************************************
+
+function horiztermdsdx(dsigmadxy,efvs,fact)
+
+ ! this is the d/ds(f.du/dx) and d/ds(f.du/dy) terms
+
+ implicit none
+
+ real(dp), dimension(2), intent(in) :: efvs
+ real(dp), intent(in) :: dsigmadxy, fact
+
+ real(dp), dimension(3,2) :: horiztermdsdx
+
+ horiztermdsdx(3,2) = dsigmadxy * efvs(2) * fact
+ horiztermdsdx(2,2) = horiztermdsdx(3,2)
+ horiztermdsdx(3,1) = - horiztermdsdx(3,2)
+ horiztermdsdx(2,1) = - horiztermdsdx(3,2)
+
+ horiztermdsdx(1,2) = - dsigmadxy * efvs(1) * fact
+ horiztermdsdx(2,2) = horiztermdsdx(2,2) + horiztermdsdx(1,2)
+ horiztermdsdx(2,1) = horiztermdsdx(2,1) - horiztermdsdx(1,2)
+ horiztermdsdx(1,1) = - horiztermdsdx(1,2)
+
+ return
+
+end function horiztermdsdx
+
+!***********************************************************************
+
+function horiztermdxds(dsigmadxy,efvs,fact)
+
+ ! this is the d/dx(f.du/ds) and d/dy(f.du/ds) terms
+
+ implicit none
+
+ real(dp), dimension(2), intent(in) :: efvs
+ real(dp), intent(in) :: dsigmadxy, fact
+
+ real(dp), dimension(2,3) :: horiztermdxds
+
+ horiztermdxds(2,3) = dsigmadxy * efvs(2) * fact
+ horiztermdxds(2,2) = horiztermdxds(2,3)
+ horiztermdxds(1,3) = - horiztermdxds(2,3)
+ horiztermdxds(1,2) = - horiztermdxds(2,3)
+
+ horiztermdxds(2,1) = - dsigmadxy * efvs(1) * fact
+ horiztermdxds(2,2) = horiztermdxds(2,2) + horiztermdxds(2,1)
+ horiztermdxds(1,2) = horiztermdxds(1,2) - horiztermdxds(2,1)
+ horiztermdxds(1,1) = - horiztermdxds(2,1)
+
+ return
+
+end function horiztermdxds
+
+!***********************************************************************
+
+function horiztermdsds(dsigmadxysq,efvs,fact)
+
+ ! this is the d/ds(f.du/ds) term
+
+ implicit none
+
+ real(dp), dimension(2), intent(in) :: efvs
+ real(dp), intent(in) :: dsigmadxysq, fact
+
+ real(dp), dimension(3) :: horiztermdsds
+
+ horiztermdsds(3) = dsigmadxysq * efvs(2) * fact
+ horiztermdsds(1) = dsigmadxysq * efvs(1) * fact
+
+ horiztermdsds(2) = - horiztermdsds(3) - horiztermdsds(1)
+
+ return
+
+end function horiztermdsds
+
+!***********************************************************************
+
+function horiztermds(d2sigmadxy2etc,efvs,fact)
+
+ ! this is the f.du/ds term
+
+ implicit none
+
+ real(dp), intent(in) :: efvs, d2sigmadxy2etc, fact
+
+ real(dp), dimension(2) :: horiztermds
+
+ horiztermds(2) = d2sigmadxy2etc * efvs * fact
+ horiztermds(1) = - horiztermds(2)
+
+ return
+
+end function horiztermds
+
+! ---> end of routines for derivatives in the main body
+
+!***********************************************************************
+
+subroutine fillsprsemain(inp,locplusup,ptindx,up,pt,osshift)
+
+ ! scatter coefficients from 3x3x3 block "g" onto sparse matrix row
+ implicit none
+
+ real(dp), dimension(3,3,3), intent(in):: inp
+ integer, intent(in) :: locplusup, up, pt
+ integer, dimension(6), intent(in) :: ptindx
+ integer, intent(in) :: osshift
+
+ ! insert entries to "g" that are on same level
+ call putpcgc(inp(2,2,2),ptindx(1)+up+osshift,locplusup,pt)
+ call putpcgc(inp(2,3,2),ptindx(2)+up+osshift,locplusup,pt)
+ call putpcgc(inp(2,1,2),ptindx(3)+up+osshift,locplusup,pt)
+ call putpcgc(inp(2,2,3),ptindx(4)+up+osshift,locplusup,pt)
+ call putpcgc(inp(2,2,1),ptindx(5)+up+osshift,locplusup,pt)
+
+ ! add points for level above (that is, points in "g" with a LARGER first index,
+ ! which correspond to grid points that are CLOSER TO THE BED than at current level)
+ call putpcgc(inp(3,2,2),ptindx(1)+up+1+osshift,locplusup,pt)
+ call putpcgc(inp(3,3,2),ptindx(2)+up+1+osshift,locplusup,pt)
+ call putpcgc(inp(3,1,2),ptindx(3)+up+1+osshift,locplusup,pt)
+ call putpcgc(inp(3,2,3),ptindx(4)+up+1+osshift,locplusup,pt)
+ call putpcgc(inp(3,2,1),ptindx(5)+up+1+osshift,locplusup,pt)
+
+ ! add points for level below (that is, points in "g" with a SMALLER first index,
+ ! which correspond to grid points that are CLOSER TO THE SURFACE than at current level)
+ call putpcgc(inp(1,2,2),ptindx(1)+up-1+osshift,locplusup,pt)
+ call putpcgc(inp(1,3,2),ptindx(2)+up-1+osshift,locplusup,pt)
+ call putpcgc(inp(1,1,2),ptindx(3)+up-1+osshift,locplusup,pt)
+ call putpcgc(inp(1,2,3),ptindx(4)+up-1+osshift,locplusup,pt)
+ call putpcgc(inp(1,2,1),ptindx(5)+up-1+osshift,locplusup,pt)
+
+ return
+
+end subroutine fillsprsemain
+
+!***********************************************************************
+
+subroutine fillsprsebndy(inp,locplusup,ptindx,up,normal,pt)
+
+ ! scatter coeff. from 3x3x3 block "g" onto sparse matrix row. This subroutine
+ ! is specifically for the boundary conditions, which are handled differently
+ ! than points in the "main" body of the domain (interior to boundaries).
+ implicit none
+
+ integer, intent(in) :: locplusup, up, pt
+ integer, dimension(6), intent(in) :: ptindx
+ real(dp), dimension(3,3,3), intent(in) :: inp
+ real(dp), dimension(2), intent(in) :: normal
+
+ ! at points where mixed centered and one-side diffs. would apply
+ if( normal(1) == 0.d0 )then ! at boundary normal to y, centered diffs in x
+ if( normal(2) == -1.d0 )then ! at boundary w/ normal [0,-1]
+ call putpcgc(inp(1,3,3),ptindx(5)+up-1,locplusup,pt)
+ call putpcgc( inp(2,3,3)+inp(1,2,1),ptindx(5)+up,locplusup,pt)
+ call putpcgc(inp(3,3,3),ptindx(5)+up+1,locplusup,pt)
+ call putpcgc(inp(1,2,3),ptindx(4)+up,locplusup,pt)
+ else ! at boundary w/ normal [0,1]
+ call putpcgc(inp(1,3,3),ptindx(4)+up-1,locplusup,pt)
+ call putpcgc(inp(2,3,3)+inp(1,2,3),ptindx(4)+up,locplusup,pt)
+ call putpcgc(inp(3,3,3),ptindx(4)+up+1,locplusup,pt)
+ call putpcgc(inp(1,2,1),ptindx(5)+up,locplusup,pt)
+ end if
+ call putpcgc(inp(1,2,2),ptindx(1)+up,locplusup,pt)
+ end if
+
+ if( normal(2) == 0.d0 )then ! at boundary normal to x, centered diffs in y
+ if( normal(1) == -1.d0 )then ! at boundary w/ normal [-1,0]
+ call putpcgc(inp(1,3,3),ptindx(3)+up-1,locplusup,pt)
+ call putpcgc( inp(2,3,3)+inp(2,1,2),ptindx(3)+up,locplusup,pt)
+ call putpcgc(inp(3,3,3),ptindx(3)+up+1,locplusup,pt)
+ call putpcgc(inp(2,3,2),ptindx(2)+up,locplusup,pt)
+ else ! at boundary w/ normal [1,0]
+ call putpcgc(inp(1,3,3),ptindx(2)+up-1,locplusup,pt)
+ call putpcgc( inp(2,3,3)+inp(2,3,2),ptindx(2)+up,locplusup,pt)
+ call putpcgc(inp(3,3,3),ptindx(2)+up+1,locplusup,pt)
+ call putpcgc(inp(2,1,2),ptindx(3)+up,locplusup,pt)
+ end if
+ call putpcgc(inp(2,2,2),ptindx(1)+up,locplusup,pt)
+ end if
+
+ ! at corners where only one-side diffs. apply
+ if( normal(1) > 0.d0 .and. normal(2) /= 0.d0 )then
+ if( normal(2) > 0.d0 )then ! corner w/ normal [ 1/sqrt(2), 1/sqrt(2) ]
+ call putpcgc(inp(1,3,3),ptindx(2)+up-1,locplusup,pt)
+ call putpcgc(inp(3,3,3),ptindx(2)+up+1,locplusup,pt)
+ call putpcgc(inp(2,3,3)+inp(2,3,2)+inp(1,2,3),ptindx(2)+up,locplusup,pt)
+ call putpcgc(inp(2,2,2),ptindx(1)+up,locplusup,pt)
+ call putpcgc(inp(1,2,2),ptindx(6)+up,locplusup,pt)
+ call putpcgc(inp(1,2,1),ptindx(5)+up,locplusup,pt)
+ call putpcgc(inp(2,1,2),ptindx(3)+up,locplusup,pt)
+ else ! corner w/ normal [ 1/sqrt(2), -1/sqrt(2) ]
+ call putpcgc(inp(1,3,3),ptindx(2)+up-1,locplusup,pt)
+ call putpcgc(inp(3,3,3),ptindx(2)+up+1,locplusup,pt)
+ call putpcgc(inp(2,3,3)+inp(1,2,1)+inp(2,3,2),ptindx(2)+up,locplusup,pt)
+ call putpcgc(inp(2,2,2),ptindx(1)+up,locplusup,pt)
+ call putpcgc(inp(2,1,2),ptindx(3)+up,locplusup,pt)
+ call putpcgc(inp(1,2,2),ptindx(6)+up,locplusup,pt)
+ call putpcgc(inp(1,2,3),ptindx(4)+up,locplusup,pt)
+ end if
+ end if
+
+ if( normal(1) < 0.d0 .and. normal(2) /= 0.d0 )then
+ if( normal(2) > 0.d0 )then ! corner w/ normal [ -1/sqrt(2), 1/sqrt(2) ]
+ call putpcgc(inp(1,3,3),ptindx(3)+up-1,locplusup,pt)
+ call putpcgc(inp(3,3,3),ptindx(3)+up+1,locplusup,pt)
+ call putpcgc(inp(2,3,3)+inp(1,2,3)+inp(2,1,2),ptindx(3)+up,locplusup,pt)
+ call putpcgc(inp(2,2,2),ptindx(1)+up,locplusup,pt)
+ call putpcgc(inp(2,3,2),ptindx(2)+up,locplusup,pt)
+ call putpcgc(inp(1,2,2),ptindx(6)+up,locplusup,pt)
+ call putpcgc(inp(1,2,1),ptindx(5)+up,locplusup,pt)
+ else ! corner w/ normal [ -1/sqrt(2), -1/sqrt(2) ]
+ call putpcgc(inp(1,3,3),ptindx(3)+up-1,locplusup,pt)
+ call putpcgc(inp(3,3,3),ptindx(3)+up+1,locplusup,pt)
+ call putpcgc(inp(2,3,3)+inp(2,1,2)+inp(1,2,1),ptindx(3)+up,locplusup,pt)
+ call putpcgc(inp(2,2,2),ptindx(1)+up,locplusup,pt)
+ call putpcgc(inp(1,2,2),ptindx(6)+up,locplusup,pt)
+ call putpcgc(inp(2,3,2),ptindx(2)+up,locplusup,pt)
+ call putpcgc(inp(1,2,3),ptindx(4)+up,locplusup,pt)
+ end if
+ end if
+
+ return
+
+end subroutine fillsprsebndy
+
+!***********************************************************************
+
+subroutine getlatboundinfo( ew, ns, up, ewn, nsn, upn, &
+ thckin, loc_array, &
+ fwdorbwd, normal, loc_latbc, &
+ foew, fons)
+
+ ! Calculate map plane normal vector at 45 deg. increments
+ ! for regions of floating ice
+ implicit none
+
+ integer, intent(in) :: ew, ns, up
+ integer, intent(in) :: ewn, nsn, upn
+ integer, dimension(ewn,nsn), intent(in) :: loc_array
+
+ real(dp), dimension(5,5), intent(in) :: thckin
+
+ real(dp), dimension(2), intent(out) :: fwdorbwd, normal
+ integer, dimension(6), intent(out) :: loc_latbc
+
+ logical, intent(out) :: fons, foew
+
+ real(dp), dimension(3,3) :: mask, maskcorners
+
+ integer, dimension(5,5) :: thckinmask
+
+ real(dp), dimension(3,3) :: thckmask, thck
+ real(dp), dimension(3) :: testvect
+ real(dp) :: phi, deg2rad
+
+ thck(:,:) = thckin(2:4,2:4)
+ thckinmask = 0
+
+! deg2rad = 3.141592654d0 / 180.d0
+ deg2rad = pi / 180.d0
+ loc_latbc = 0; phi = 0.d0
+ mask(:,1) = (/ 0.d0, 180.d0, 0.d0 /)
+ mask(:,2) = (/ 270.d0, 0.d0, 90.d0 /)
+ mask(:,3) = (/ 0.d0, 360.d0, 0.d0 /)
+ maskcorners(:,1) = (/ 225.d0, 0.d0, 135.d0 /)
+ maskcorners(:,2) = (/ 0.d0, 0.d0, 0.d0 /)
+ maskcorners(:,3) = (/ 315.d0, 0.d0, 45.d0 /)
+
+ !! first section below contains logic to ID where 1st-order one-sided diffs are needed
+ where( thckin /= 0.d0 )
+ thckinmask = 1
+ endwhere
+ !! check if 1st-order one sided diffs. are needed in n/s direction
+ if( (thckinmask(3,3)+thckinmask(3,4)+thckinmask(3,5)) < 3 .and. (thckinmask(3,1)+thckinmask(3,2)) < 2 )then
+ !print *, '1st-order one-sided diffs. in N/S direction at ew,ns = ', ew, ns
+ fons = .true.
+ elseif( (thckinmask(3,1)+thckinmask(3,2)+thckinmask(3,3)) < 3 .and. (thckinmask(3,4)+thckinmask(3,5)) < 2 )then
+ !print *, '1st-order one-sided diffs. in N/S direction at ew,ns = ', ew, ns
+ fons = .true.
+ else
+ fons = .false.
+ endif
+ !! check if 1st-order one sided diffs. are needed in n/s direction
+ if( (thckinmask(3,3)+thckinmask(4,3)+thckinmask(5,3)) < 3 .and. (thckinmask(1,3)+thckinmask(2,3)) < 2 )then
+ !print *, '1st-order one-sided diffs. in E/W direction at ew,ns = ', ew, ns
+ foew = .true.
+ elseif( (thckinmask(1,3)+thckinmask(2,3)+thckinmask(3,3)) < 3 .and. (thckinmask(4,3)+thckinmask(5,3)) < 2 )then
+ !print *, '1st-order one-sided diffs. in E/W direction at ew,ns = ', ew, ns
+ foew = .true.
+ else
+ foew = .false.
+ endif
+
+ ! specify new value of 'loc' vector such that fwd/bwd diffs. are set up correctly in sparse matrix
+ ! when function 'fillsprsebndy' is called. Also, specify appropriate values for the vectors 'normal'
+ ! and 'fwdorbwd', which specify the orientation of the boundary normal and the direction of forward or
+ ! backward differencing to be done in the lateral boundary condition functions 'normhorizmainbc_lat'
+ ! and 'croshorizmainbc_lat'
+
+ ! following is algorithm for calculating boundary normal at 45 deg. increments, based on arbitray
+ ! boundary shape (based on initial suggestions by Anne LeBrocq)
+ where( thck /= 0.d0 )
+ thckmask = 0.d0
+ elsewhere( thck == 0.d0 )
+ thckmask = 1.d0
+ endwhere
+
+ testvect = sum( thckmask * mask, 1 )
+
+ ! calculate the angle of the normal in cart. (x,y) system w/ 0 deg. at 12 O'clock,
+ ! 90 deg. at 3 O'clock, etc.
+ if( sum( sum( thckmask, 1 ) ) == 1.d0 )then
+ phi = sum( sum( thckmask * maskcorners, 1 ) )
+ else
+ if( any( testvect == 360.d0 ) )then
+ if( sum( testvect ) == 450.d0 )then
+ phi = 45.d0
+ elseif( sum( testvect ) == 630.d0 )then
+ phi = 315.d0
+ else
+ phi = 0.d0
+ end if
+ elseif( all( testvect /= 360 ) )then
+ phi = sum( testvect ) / sum( testvect/testvect, testvect /= 0.d0 )
+ end if
+ end if
+
+ ! define normal vectors and change definition of loc_array based on this angle
+ if( phi == 0.d0 )then
+ loc_latbc(1) = loc_array(ew,ns-1); loc_latbc(4) = loc_array(ew,ns); loc_latbc(5) = loc_array(ew,ns-2)
+ loc_latbc(2) = loc_array(ew+1,ns); loc_latbc(3) = loc_array(ew-1,ns)
+ normal = (/ 0.d0, 1.d0 /); fwdorbwd = (/ -1.d0, -1.d0 /)
+ elseif( phi == 45.d0 )then
+ loc_latbc(1) = loc_array(ew-1,ns); loc_latbc(2) = loc_array(ew,ns); loc_latbc(3) = loc_array(ew-2,ns)
+ loc_latbc(6) = loc_array(ew,ns-1); loc_latbc(4) = loc_array(ew,ns); loc_latbc(5) = loc_array(ew,ns-2)
+ normal = (/ 1.d0/sqrt(2.d0), 1.d0/sqrt(2.d0) /); fwdorbwd = (/ -1.d0, -1.d0 /)
+ elseif( phi == 90.d0 )then
+ loc_latbc(1) = loc_array(ew-1,ns); loc_latbc(2) = loc_array(ew,ns); loc_latbc(3) = loc_array(ew-2,ns)
+ loc_latbc(4) = loc_array(ew,ns+1); loc_latbc(5) = loc_array(ew,ns-1)
+ normal = (/ 1.d0, 0.d0 /); fwdorbwd = (/ -1.d0, -1.d0 /)
+ elseif( phi == 135.d0 )then
+ loc_latbc(1) = loc_array(ew-1,ns); loc_latbc(2) = loc_array(ew,ns); loc_latbc(3) = loc_array(ew-2,ns)
+ loc_latbc(6) = loc_array(ew,ns+1); loc_latbc(4) = loc_array(ew,ns+2); loc_latbc(5) = loc_array(ew,ns)
+ normal = (/ 1.d0/sqrt(2.d0), -1.d0/sqrt(2.d0) /); fwdorbwd = (/ -1.d0, 1.d0 /)
+ elseif( phi == 180.d0 )then
+ loc_latbc(1) = loc_array(ew,ns+1); loc_latbc(4) = loc_array(ew,ns+2); loc_latbc(5) = loc_array(ew,ns)
+ loc_latbc(2) = loc_array(ew+1,ns); loc_latbc(3) = loc_array(ew-1,ns)
+ normal = (/ 0.d0, -1.d0 /); fwdorbwd = (/ 1.d0, 1.d0 /)
+ elseif( phi == 225.d0 )then
+ loc_latbc(1) = loc_array(ew+1,ns); loc_latbc(2) = loc_array(ew+2,ns); loc_latbc(3) = loc_array(ew,ns)
+ loc_latbc(6) = loc_array(ew,ns+1); loc_latbc(4) = loc_array(ew,ns+2); loc_latbc(5) = loc_array(ew,ns);
+ normal = (/ -1.d0/sqrt(2.d0), -1.d0/sqrt(2.d0) /); fwdorbwd = (/ 1.d0, 1.d0 /)
+ elseif( phi == 270.d0 )then
+ loc_latbc(1) = loc_array(ew+1,ns); loc_latbc(2) = loc_array(ew+2,ns); loc_latbc(3) = loc_array(ew,ns)
+ loc_latbc(4) = loc_array(ew,ns+1); loc_latbc(5) = loc_array(ew,ns-1)
+ normal = (/ -1.d0, 0.d0 /); fwdorbwd = (/ 1.d0, 1.d0 /)
+ else
+ loc_latbc(1) = loc_array(ew+1,ns); loc_latbc(2) = loc_array(ew+2,ns); loc_latbc(3) = loc_array(ew,ns)
+ loc_latbc(6) = loc_array(ew,ns-1); loc_latbc(4) = loc_array(ew,ns); loc_latbc(5) = loc_array(ew,ns-2)
+ normal = (/ -1.d0/sqrt(2.d0), 1.d0/sqrt(2.d0) /); fwdorbwd = (/ 1.d0, -1.d0 /)
+ end if
+
+ return
+
+end subroutine getlatboundinfo
+
+!***********************************************************************
+
+function indshift( which, ew, ns, up, ewn, nsn, upn, loc_array, thck )
+
+ ! Subroutine to rearrange indices slightly at sfc,bed, and lateral boundaries,
+ ! so that values one index inside of the domain are used for, e.g. eff. visc.
+
+ ! Function output is a vector containing necessary index shifts for portions of 'othervel' and 'efvs'
+ ! extracted near domain boundaries. NOTE that this contains duplication of some of the code in the
+ ! subroutine "getlatboundinfo", and the two could be combined at some point.
+
+!NOTE: Function indshift does not use loc_array. Remove from argument list?
+
+ implicit none
+
+ integer, intent(in) :: which
+ integer, intent(in) :: ew, ns, up, ewn, nsn, upn
+ integer, dimension(ewn,nsn), intent(in) :: loc_array
+ real(dp), dimension(3,3), intent(in) :: thck
+
+ integer, dimension(3) :: indshift
+ integer :: upshift = 0, ewshift = 0, nsshift = 0
+
+ real(dp), dimension(3,3) :: mask, maskcorners
+ real(dp), dimension(3,3) :: thckmask
+ real(dp), dimension(3) :: testvect
+ real(dp) :: phi, deg2rad
+
+! deg2rad = 3.141592654d0 / 180.d0
+ deg2rad = pi / 180.d0
+ mask(:,1) = (/ 0.d0, 180.d0, 0.d0 /)
+ mask(:,2) = (/ 270.d0, 0.d0, 90.d0 /)
+ mask(:,3) = (/ 0.d0, 360.d0, 0.d0 /)
+ maskcorners(:,1) = (/ 225.d0, 0.d0, 135.d0 /)
+ maskcorners(:,2) = (/ 0.d0, 0.d0, 0.d0 /)
+ maskcorners(:,3) = (/ 315.d0, 0.d0, 45.d0 /)
+
+ if( up == 1 )then !! first treat bed/sfc, which aren't complicated
+ upshift = 1
+ elseif( up == upn )then
+ upshift = -1
+ else
+ upshift = 0
+ end if
+
+ !NOTE - Remove hardwiring of case numbers?
+ select case(which)
+
+ case(0) !! internal to lateral boundaries; no shift to ew,ns indices
+
+ ewshift = 0; nsshift = 0;
+
+ case(1) !! at lateral boundaries; shift to ew,ns may be non-zero
+
+ where( thck /= 0.d0 )
+ thckmask = 0.d0
+ elsewhere( thck == 0.d0 )
+ thckmask = 1.d0
+ endwhere
+
+ testvect = sum( thckmask * mask, 1 )
+
+ ! calculate the angle of the normal in cart. (x,y) system w/ 0 deg. at 12 O'clock, 90 deg. at 3 O'clock, etc.
+ if( sum( sum( thckmask, 1 ) ) == 1.d0 )then
+ phi = sum( sum( thckmask * maskcorners, 1 ) )
+ else
+ if( any( testvect == 360.d0 ) )then
+ if( sum( testvect ) == 450.d0 )then
+ phi = 45.d0
+ elseif( sum( testvect ) == 630.d0 )then
+ phi = 315.d0
+ else
+ phi = 0.d0
+ end if
+ elseif( all( testvect /= 360 ) )then
+ phi = sum( testvect ) / sum( testvect/testvect, testvect /= 0.d0 )
+ end if
+ end if
+
+ ! define shift to indices based on this angle
+ if( phi == 0.d0 )then
+ nsshift = -1; ewshift = 0
+ elseif( phi == 45.d0 )then
+ nsshift = -1; ewshift = -1
+ elseif( phi == 90.d0 )then
+ nsshift = 0; ewshift = -1
+ elseif( phi == 135.d0 )then
+ nsshift = 1; ewshift = -1
+ elseif( phi == 180.d0 )then
+ nsshift = 1; ewshift = 0
+ elseif( phi == 225.d0 )then
+ nsshift = 1; ewshift = 1
+ elseif( phi == 270.d0 )then
+ nsshift = 0; ewshift = 1
+ elseif( phi == 315.d0 )then
+ nsshift = -1; ewshift = 1
+ end if
+
+ end select
+
+ indshift = (/ upshift, ewshift, nsshift /)
+
+ return
+
+end function indshift
+
+!***********************************************************************
+
+function vertintg(upn, sigma, in)
+
+ implicit none
+
+ integer, intent(in) :: upn
+ real(dp), dimension(:), intent(in) :: sigma
+ real(dp), dimension(:), intent(in) :: in
+ real(dp) :: vertintg
+
+ integer :: up
+
+ vertintg = 0.d0
+
+ do up = upn-1, 1, -1
+ vertintg = vertintg + sum(in(up:up+1)) * dups(up)
+ end do
+
+ vertintg = vertintg / 2.d0
+
+ return
+
+end function vertintg
+
+!***********************************************************************
+
+subroutine geom2derscros(ewn, nsn, &
+ dew, dns, &
+ ipvr, stagthck, opvrewns)
+
+ ! geometric (2nd) cross-deriv. for generic input variable 'ipvr', output as 'opvr'
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn
+ real(dp), intent(in) :: dew, dns
+ real(dp), intent(out), dimension(:,:) :: opvrewns
+ real(dp), intent(in), dimension(:,:) :: ipvr, stagthck
+
+ integer :: ew, ns
+ real(dp) :: dewdns
+
+ dewdns = dew*dns
+
+! NOTE: Check this over and if ok remove old code !!
+! *SFP* OLD method; replaced (below) w/ loops and logic for compatibility w/ gnu compilers
+! where (stagthck /= 0.d0)
+! opvrewns = (eoshift(eoshift(ipvr,1,0.d0,2),1,0.d0,1) + ipvr &
+! - eoshift(ipvr,1,0.d0,1) - eoshift(ipvr,1,0.d0,2)) / (dewdns)
+! elsewhere
+! opvrewns = 0.d0
+! end where
+
+! *SFP* NEW method
+
+ opvrewns = ( ipvr(2:ewn,2:nsn) - ipvr(2:ewn,1:nsn-1) - ipvr(1:ewn-1,2:nsn) + ipvr(1:ewn-1,1:nsn-1) ) / dewdns
+
+ do ns = 1, nsn-1
+ do ew = 1, ewn-1
+ if (stagthck(ew,ns) == 0.d0) then
+ opvrewns(ew,ns) = 0.d0
+ end if
+ end do
+ end do
+
+ return
+
+end subroutine geom2derscros
+
+
+!***********************************************************************
+
+subroutine geom2ders(ewn, nsn, &
+ dew, dns, &
+ ipvr, stagthck, &
+ opvrew, opvrns)
+
+ ! geometric 1st deriv. for generic input variable 'ipvr',
+ ! output as 'opvr' (includes 'upwinding' for boundary values)
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn
+ real(dp), intent(in) :: dew, dns
+ real(dp), intent(out), dimension(:,:) :: opvrew, opvrns
+ real(dp), intent(in), dimension(:,:) :: ipvr, stagthck
+
+ integer :: ew, ns
+ real(dp) :: dewsq4, dnssq4
+
+ integer :: pt(2)
+
+ dewsq4 = 4.d0 * dew * dew
+ dnssq4 = 4.d0 * dns * dns
+
+ do ns = 2, nsn-2
+ do ew = 2, ewn-2
+ if (stagthck(ew,ns) > 0.d0) then
+ opvrew(ew,ns) = centerew(ew,ns,ipvr,dewsq4)
+ opvrns(ew,ns) = centerns(ew,ns,ipvr,dnssq4)
+ else
+ opvrew(ew,ns) = 0.d0
+ opvrns(ew,ns) = 0.d0
+ end if
+ end do
+ end do
+
+ ! *** 2nd order boundaries using upwinding
+
+!NOTE - If nhalo = 2, then I'm not clear on why upwinding is needed.
+! Where are these values used in the computation?
+! I don't think they should be used for any interior halo cells.
+! Are they needed at the global boundaries? If so, then need to use the correct indices for global boundaries.
+! Would be easier if we could set global halos in a way that gives reasonable 2nd derivs
+! without a special case.
+
+ do ew = 1, ewn-1, ewn-2
+
+ pt = whichway(ew)
+
+ do ns = 2, nsn-2
+ if (stagthck(ew,ns) > 0.d0) then
+ opvrew(ew,ns) = boundyew(ns,pt,ipvr,dewsq4)
+ opvrns(ew,ns) = centerns(ew,ns,ipvr,dnssq4)
+ else
+ opvrew(ew,ns) = 0.d0
+ opvrns(ew,ns) = 0.d0
+ end if
+ end do
+
+ end do
+
+ do ns = 1, nsn-1, nsn-2
+
+ pt = whichway(ns)
+
+ do ew = 2, ewn-2
+ if (stagthck(ew,ns) > 0.d0) then
+ opvrew(ew,ns) = centerew(ew,ns,ipvr,dewsq4)
+ opvrns(ew,ns) = boundyns(ew,pt,ipvr,dnssq4)
+ else
+ opvrew(ew,ns) = 0.d0
+ opvrns(ew,ns) = 0.d0
+ end if
+ end do
+
+ end do
+
+ do ns = 1, nsn-1, nsn-2
+ do ew = 1, ewn-1, ewn-2
+ if (stagthck(ew,ns) > 0.d0) then
+ pt = whichway(ew)
+ opvrew(ew,ns) = boundyew(ns,pt,ipvr,dewsq4)
+ pt = whichway(ns)
+ opvrns(ew,ns) = boundyns(ew,pt,ipvr,dnssq4)
+ else
+ opvrew(ew,ns) = 0.d0
+ opvrns(ew,ns) = 0.d0
+ end if
+ end do
+ end do
+
+end subroutine geom2ders
+
+!***********************************************************************
+
+ function centerew(ew, ns, ipvr, dewsq4)
+
+ implicit none
+
+ integer, intent(in) :: ew, ns
+ real(dp), intent(in) :: ipvr(:,:)
+ real(dp), intent(in) :: dewsq4
+ real(dp) :: centerew
+
+ centerew = (sum(ipvr(ew+2,ns:ns+1)) + sum(ipvr(ew-1,ns:ns+1)) - &
+ sum(ipvr(ew+1,ns:ns+1)) - sum(ipvr(ew,ns:ns+1))) / dewsq4
+
+ return
+
+ end function centerew
+
+!***********************************************************************
+
+ function centerns(ew, ns, ipvr, dnssq4)
+
+ implicit none
+
+ integer, intent(in) :: ew, ns
+ real(dp), intent(in) :: ipvr(:,:)
+ real(dp), intent(in) :: dnssq4
+ real(dp) :: centerns
+
+ centerns = (sum(ipvr(ew:ew+1,ns+2)) + sum(ipvr(ew:ew+1,ns-1)) - &
+ sum(ipvr(ew:ew+1,ns+1)) - sum(ipvr(ew:ew+1,ns))) / dnssq4
+
+ return
+
+ end function centerns
+
+!***********************************************************************
+
+ function boundyew(ns,pt,ipvr,dewsq4)
+
+ implicit none
+
+ integer, intent(in) :: ns
+ integer, intent(in) :: pt(2)
+ real(dp), intent(in) :: ipvr(:,:)
+ real(dp), intent(in) :: dewsq4
+ real(dp) :: boundyew
+
+ boundyew = pt(1) * (3.d0 * sum(ipvr(pt(2),ns:ns+1)) - 7.d0 * sum(ipvr(pt(2)+pt(1),ns:ns+1)) + &
+ 5.d0 * sum(ipvr(pt(2)+2*pt(1),ns:ns+1)) - sum(ipvr(pt(2)+3*pt(1),ns:ns+1))) / dewsq4
+
+ return
+
+ end function boundyew
+
+!***********************************************************************
+
+ function boundyns(ew,pt,ipvr,dnssq4)
+
+ implicit none
+
+ integer, intent(in) :: ew
+ integer, intent(in) :: pt(2)
+ real(dp), intent(in) :: ipvr(:,:)
+ real(dp), intent(in) :: dnssq4
+ real(dp) :: boundyns
+
+ boundyns = pt(1) * (3.d0 * sum(ipvr(ew:ew+1,pt(2))) - 7.d0 * sum(ipvr(ew:ew+1,pt(2)+pt(1))) + &
+ 5.d0 * sum(ipvr(ew:ew+1,pt(2)+2*pt(1))) - sum(ipvr(ew:ew+1,pt(2)+3*pt(1)))) / dnssq4
+
+ return
+
+ end function boundyns
+
+!***********************************************************************
+
+ function whichway(i)
+
+ implicit none
+
+ integer, intent(in) :: i
+ integer :: whichway(2)
+
+ if (i == 1) then
+ whichway = (/1,1/)
+ else
+ whichway = (/-1,i+1/)
+ end if
+
+ return
+
+ end function whichway
+
+
+!***********************************************************************
+
+ function hsum(inp)
+
+ implicit none
+
+ real(dp), dimension(:,:,:), intent(in) :: inp
+ real(dp), dimension(size(inp,dim=1)) :: hsum
+
+ hsum = sum(sum(inp(:,:,:),dim=3),dim=2)
+
+ return
+
+ end function hsum
+
+!***********************************************************************
+
+subroutine putpcgc(value,col,row,pt)
+
+ implicit none
+
+ integer, intent(in) :: row, col
+ integer, intent(in), optional :: pt
+ real(dp), intent(in) :: value
+
+ !*SFP*for now, ignoring the possibility of using JFNK w/ Trilinos ...
+ if( nonlinear == HO_NONLIN_PICARD )then
+
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+ ! Option to load entry into Triad sparse matrix format
+ if (value /= 0.d0) then
+ pcgval(ct_nonzero) = value
+ pcgcol(ct_nonzero) = col
+ pcgrow(ct_nonzero) = row
+ ct_nonzero = ct_nonzero + 1
+ end if
+#ifdef TRILINOS
+ else
+ ! Option to load entry directly into Trilinos sparse matrix
+ if (value /= 0.d0) then
+ !AGS: If we find that sparsity changes inside a time step,
+ ! consider adding entry even for value==0.
+ call putintotrilinosmatrix(row, col, value)
+
+ !JEFF: Verify matrix matches for globalIDs case
+ ! call verify_trilinos_rowcolval(row, col, value)
+ end if
+#endif
+ end if
+
+
+ !*SFP* if using JFNK, store the main block diagonal coeffs and off diag coeffs
+ elseif ( nonlinear == HO_NONLIN_JFNK )then
+
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then ! if using Triad format to store matrix entries
+
+ ! load entry into Triad sparse matrix format
+ if (value /= 0.d0) then
+ pcgval(ct_nonzero) = value
+ pcgcol(ct_nonzero) = col
+ pcgrow(ct_nonzero) = row
+ ct_nonzero = ct_nonzero + 1
+ end if
+
+#ifdef TRILINOS
+ else ! if storing matrix entires directly in Trilinos sparse format
+
+ if (value /= 0.d0) then
+ !AGS: If we find that sparsity changes inside a time step,
+ ! consider adding entry even for value==0.
+ call putintotrilinosmatrix(row, col, value)
+ end if
+#endif
+ end if ! end of "if using Triad or Trilinos storage format" construct
+
+ end if ! end of "if using Picard or JFNK for nonlinear solve" construct
+
+ return
+
+end subroutine putpcgc
+
+!***********************************************************************
+
+ subroutine distributed_create_partition(ewn, nsn, upstride, indxmask, mySize, myIndices, myX, myY, myZ)
+
+ ! distributed_create_partition builds myIndices ID vector for Trilinos using (ns,ew) coordinates in indxmask
+ ! upstride is the total number of vertical layers including any ghosts
+ ! indxmask is ice mask with non-zero values for cells with ice.
+ ! mySize is number of elements in myIndices
+ ! myIndices is integer vector in which IDs are def
+
+ use parallel
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upstride
+ integer, intent(in), dimension(:,:) :: indxmask
+ integer, intent(in) :: mySize
+ integer, intent(out), dimension(:) :: myIndices
+ real(dp), intent(out), dimension(:) :: myX, myY, myZ
+
+ integer :: ew, ns, pointno
+ integer :: glblID, upindx, slnindx
+
+ ! Step through indxmask, but exclude halo
+
+! SFP: debug line below
+! print *, 'mySize = ', mySize
+
+ do ns = 1+staggered_lhalo, size(indxmask,2)-staggered_uhalo
+ do ew = 1+staggered_lhalo, size(indxmask,1)-staggered_uhalo
+ if ( indxmask(ew,ns) /= 0 ) then
+ pointno = indxmask(ew,ns) ! Note that pointno starts at value 1. If we step through correctly then consecutive values
+ ! write(*,*) "pointno = ", pointno
+ ! first layer ID is set from parallel_globalID, rest by incrementing through layers
+ glblID = parallel_globalID(ns, ew, upstride)
+ ! write(*,*) "global ID (ew, ns) = (", ew, ",", ns, ") ", glblID
+ upindx = 0
+ do slnindx = (pointno - 1) * upstride + 1, pointno * upstride
+ ! slnindx is offset into myIndices for current ice cell's layers. upindx is offset from current globalID.
+ myIndices(slnindx) = glblID + upindx
+ ! Return coordinates for nodes. Assumes structured with dx=1,dy=1,dz=1.0e6
+ myX(slnindx) = (ewlb+ew) * 1.0
+ myY(slnindx) = (nslb+ns) * 1.0
+ myZ(slnindx) = upindx * 1.0e-6
+ upindx = upindx + 1
+ ! write(*,*) "myIndices offset = ", slnindx
+ end do
+ endif
+ end do
+ end do
+
+ return
+
+ end subroutine distributed_create_partition
+
+!***********************************************************************
+
+ function distributed_globalID_to_localindex(globalID)
+
+ ! distributed_globalID_to_localindex converts a globalID to its position in the solution vector.
+ ! It is a utility function that is not currently used, but retained for future debugging capability.
+ ! The function searches loc2_array(:,:,1) for the globalID closest to the
+ ! given globalID, then uses this difference and loc2_array(:,:,2) for the same ew,ns coordinates
+ ! to calculate (and return) the corresponding index.
+ ! Result is checked using myIndices.
+ ! loc2_array is assumed to be a module-level variable set by the routine getlocationarray.
+ ! myIndices is assumed to be a module-level variable which holds the local processor's ID partition list.
+ ! This function will work for both globalIDs and regular partitions.
+ ! In the latter case it is redundant, because the ID will be at the same index, so it is just an identity function.
+ ! Original implementation using myIndices, and then fast inverse, by JEFF 11/2010 and 11/2011
+ ! Current loc2_array-based implementation by PW 12/2011
+
+ use parallel
+
+ implicit none
+
+ integer, intent(in) :: globalID
+
+ integer :: distributed_globalID_to_localindex
+
+#ifdef globalIDs
+ !JEFF integer :: GlobalIDsGet ! C++ function with return value
+ integer :: ew, ns
+ integer :: minew, minns
+ integer :: curdiff, mindiff
+ integer :: lindex
+
+ !LOOP NOTE: Please confirm that these are the correct loop bounds.
+ ! loc2_array-based search
+ minew = 1
+ minns = 1
+ mindiff = globalID
+! do ns = 1+staggered_lhalo,size(loc2_array,2)-staggered_uhalo
+! do ew = 1+staggered_lhalo,size(loc2_array,1)-staggered_uhalo
+ ! loc2_array(:,:,1) defined for all ew,ns,
+ ! while loc2_array(:,:,2) == 0 for halos and ice-free loactions
+ do ns = 1,size(loc2_array,2)
+ do ew = 1,size(loc2_array,1)
+ curdiff = globalID-loc2_array(ew,ns,1)
+ if ((curdiff >= 0) .and. (curdiff < mindiff)) then
+ mindiff = globalID-loc2_array(ew,ns,1)
+ minew = ew
+ minns = ns
+ endif
+ enddo
+ enddo
+ lindex = loc2_array(minew,minns,2) + mindiff
+
+ if ( myIndices(lindex) == globalID ) then
+ distributed_globalID_to_localindex = lindex
+ return
+ else
+ write(*,*) "Error in distributed_globalID_to_localindex()."
+ write(*,*) "GlobalID to match = ", globalID
+ write(*,*) "GlobalID found = ", myIndices(lindex), "(lindex = ",lindex,")"
+ stop
+ endif
+
+ ! linear search from beginning of myIndices.
+ ! Inefficient. There could be some ordering of myIndices that would enable us to us a binary search. Not certain at this time.
+ !JEFF do lindex = 1, size(myIndices)
+ !JEFF if ( myIndices(lindex) == globalID ) then
+ !JEFF distributed_globalID_to_localindex = lindex
+ !JEFF return
+ !JEFF endif
+ !JEFF end do
+
+#else
+ distributed_globalID_to_localindex = globalID
+ return
+#endif
+
+ end function distributed_globalID_to_localindex
+
+!***********************************************************************
+
+ subroutine verify_trilinos_rowcolval(row, col, value)
+ ! Translates back globalID row and col values to their original grid values and outputs the set
+ ! For verification of the matrix passed to Trilinos.
+ ! JEFF November 2010
+ integer, intent(in) :: row, col
+ real(dp), intent(in) :: value
+ integer :: locrow, loccol
+
+#ifdef globalIDs
+ locrow = distributed_globalID_to_localindex(row)
+ loccol = distributed_globalID_to_localindex(col)
+#else
+ locrow = row
+ loccol = col
+#endif
+
+ write (*,*) "Row = ", locrow, " Col = ", loccol, " Value = ", value
+ end subroutine verify_trilinos_rowcolval
+
+!***********************************************************************
+
+function scalebasalbc( coeffblock, bcflag, lateralboundry, beta, efvs )
+
+ ! *SFP* This function is used to scale the matrix coeffs and rhs vector coeff
+ ! of the basal boundary condition when using JFNK for the nonlinear iteration
+ ! (iteration on viscosity).
+ implicit none
+
+ integer, dimension(2), intent(in) :: bcflag
+ logical :: lateralboundry
+ real(dp), dimension(:,:,:), intent(in) :: coeffblock
+ real(dp), dimension(:,:,:), intent(in) :: efvs
+ real(dp), intent(in) :: beta !NOTE - Remove? Commented out in computation below
+
+ real(dp) :: scale, scalebasalbc
+
+ if( nonlinear == 1 )then
+ if( bcflag(1) == 1 )then
+
+ ! use the dominant terms in the coeff associated with the velocity under consideration
+ !scale = beta / ( sum( efvs(2,:,:) ) / 4.d0 ) * (len0 / thk0)
+
+ ! Use the magnitude of the coeff associated with the vert stress gradients.
+ ! NOTE that relevant coeffs are stored in diff parts of block depending
+ ! on type of boudnary
+ if( lateralboundry )then
+ scale = abs( coeffblock(3,3,3) );
+ else
+ scale = abs( coeffblock(3,2,2) );
+ end if
+
+ if( scale <= 0.d0 )then
+ scale = 1.d0
+ end if
+
+ else
+ scale = 1.d0
+ end if
+
+ else
+ scale = 1.d0
+ end if
+
+ scalebasalbc = scale
+
+ return
+
+end function scalebasalbc
+
+!***********************************************************************
+
+subroutine assign_resid(model, uindx, umask, &
+ d2thckdewdns, d2usrfdewdns, pcgsize, gx_flag, matrixA, matrixC, L2norm, ewn, nsn)
+
+
+ use iso_c_binding
+ use glide_types, only : glide_global_type
+ use glimmer_sparse_type, only : sparse_matrix_type
+
+ implicit none
+
+ type(glide_global_type) ,intent(inout) :: model
+ type(sparse_matrix_type) ,intent(in) :: matrixA, matrixC
+
+ integer :: i, j
+ integer ,intent(in) :: ewn, nsn
+ integer, dimension(2) ,intent(in) :: pcgsize
+ integer ,intent(in) :: gx_flag(2*pcgsize(1)) ! 0 :reg cell
+ integer ,intent(in) :: uindx(ewn-1,nsn-1), umask(ewn-1,nsn-1)
+ real(dp) ,intent(in) :: L2norm
+ real(dp) ,intent(in) :: d2thckdewdns(ewn-1,nsn-1), d2usrfdewdns(ewn-1,nsn-1)
+
+ do i = 1, ewn-1
+ do j = 1, nsn-1
+ model%solver_data%ui(i,j) = uindx(i,j)
+ model%solver_data%um(i,j) = umask(i,j)
+ model%solver_data%d2thckcross(i,j) = d2thckdewdns(i,j)
+ model%solver_data%d2usrfcross(i,j) = d2usrfdewdns(i,j)
+ end do
+ end do
+
+ model%solver_data%pcgsize = pcgsize
+ do i = 1, 2*pcgsize(1)
+ model%solver_data%gxf(i) = gx_flag(i)
+ end do
+ model%solver_data%L2norm = L2norm
+ model%solver_data%matrixA = matrixA
+ model%solver_data%matrixC = matrixC
+
+end subroutine assign_resid
+
+!-------------------------------------------------------------------
+
+! uvec is either u^k-1 or v^k-1 on input and Av-b or Cu-d on output
+
+subroutine res_vect ( matrix, uvec, bvec, nu, g_flag, L2square, whatsparse)
+
+use parallel
+
+use glimmer_paramets, only : dp
+use glimmer_sparse_type
+use glimmer_sparse
+use glide_mask
+use profile
+
+implicit none
+
+integer :: i, j, nu, nele, whatsparse ! nu: size of uvec and bvec
+integer, dimension(nu), intent(in) :: g_flag ! 0 :reg cell
+ ! 1 :top ghost, 2 :base ghost
+
+type(sparse_matrix_type), intent(in) :: matrix
+
+real(dp), dimension(nu), intent(in) :: bvec
+real(dp), dimension(nu), intent(inout) :: uvec
+real(dp), dimension(nu) :: Au_b_wig
+real(dp), intent(out) :: L2square
+!
+real(dp) :: scale_ghosts = 0.0d0
+
+! calculate residual vector of the u OR v component
+
+ Au_b_wig = 0d0 ! regular+ghost cells
+
+call t_startf("res_vect_matvec")
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+
+ do nele = 1, matrix%nonzeros
+
+ i = matrix%row(nele)
+ j = matrix%col(nele)
+ Au_b_wig(i) = Au_b_wig(i) + matrix%val(nele) * uvec(j)
+
+ enddo
+
+#ifdef TRILINOS
+ else
+ call matvecwithtrilinos(uvec, Au_b_wig);
+#endif
+ endif
+call t_stopf("res_vect_matvec")
+
+ do i = 1, nu
+ Au_b_wig(i) = Au_b_wig(i) - bvec(i)
+ enddo
+
+ uvec = Au_b_wig
+
+! AGS: Residual norm includes scaling to decrease importance of ghost values
+! By calling it a redefinition of an inner product, it is kosher.
+ L2square = 0.d0
+ do i = 1, nu
+ if (g_flag(i) == 0) then
+ L2square = L2square + Au_b_wig(i) * Au_b_wig(i)
+ else
+ L2square = L2square + scale_ghosts * Au_b_wig(i) * Au_b_wig(i)
+ endif
+ end do
+
+ !JEFF Sum L2square across nodes
+call t_startf("res_vect_reduce")
+ L2square = parallel_reduce_sum(L2square)
+call t_stopf("res_vect_reduce")
+
+ return
+
+end subroutine res_vect
+
+!-------------------------------------------------------------------
+
+subroutine res_vect_jfnk ( matrixA, matrixC, uvec, bvec, nu1, nu2, g_flag, L2square, whatsparse)
+
+! similar to res_vect, but state vector uvec and rhs vector bvec are now both velocities
+! A and C matrices are separate, but eventually could be combined
+
+use glimmer_paramets, only : dp
+use glimmer_sparse_type
+use glimmer_sparse
+use glide_mask
+
+implicit none
+
+integer :: i, j, nu1, nu2, nele, whatsparse ! nu2: size of uvec and bvec, size of u, v within
+
+type(sparse_matrix_type), intent(in) :: matrixA, matrixC
+
+integer, dimension(nu2) :: g_flag ! 0=reg cell, 1: top ghost, 2, base ghost
+real(dp), dimension(nu2), intent(in) :: bvec
+real(dp), dimension(nu2), intent(inout) :: uvec
+real(dp), dimension(nu1) :: Au_b_wig, Cv_d_wig
+real(dp), intent(out) :: L2square
+!
+real(dp) :: scale_ghosts = 0.0d0
+
+! calculate residual vector of the u and v component
+
+ Au_b_wig = 0d0 ! regular+ghost cells
+ Cv_d_wig = 0d0 ! regular+ghost cells
+
+ if (whatsparse /= STANDALONE_TRILINOS_SOLVER) then
+
+ do nele = 1, matrixA%nonzeros
+
+ i = matrixA%row(nele)
+ j = matrixA%col(nele)
+ Au_b_wig(i) = Au_b_wig(i) + matrixA%val(nele) * uvec(j)
+
+ enddo
+
+ do nele = 1, matrixC%nonzeros
+
+ i = matrixC%row(nele)
+ j = matrixC%col(nele)
+ Cv_d_wig(i) = Cv_d_wig(i) + matrixC%val(nele) * uvec(nu1+j)
+
+ enddo
+
+#ifdef TRILINOS
+ else
+
+ call matvecwithtrilinos(uvec(1:nu1), Au_b_wig);
+ call matvecwithtrilinos(uvec(nu1+1:nu2), Cv_d_wig);
+#endif
+ endif
+
+ do i = 1, nu1
+
+ Au_b_wig(i) = Au_b_wig(i) - bvec(i)
+ Cv_d_wig(i) = Cv_d_wig(i) - bvec(nu1+i)
+
+ enddo
+
+! to do: combine A and C
+
+ do i = 1, nu1
+
+ uvec(i) = Au_b_wig(i)
+ uvec(nu1+i) = Cv_d_wig(i)
+
+ enddo
+
+! AGS: Residual norm includes scaling to decrease importance of ghost values
+! By calling it a redefinition of an inner product, it is kosher.
+! L2square = 0.0
+! do i = 1, nu1
+! if (g_flag(i) == 0) then
+! L2square = L2square + Au_b_wig(i) * Au_b_wig(i)
+! else
+! L2square = L2square + scale_ghosts * Au_b_wig(i) * Au_b_wig(i)
+! endif
+! end do
+!
+! do i = 1, nu1
+! if (g_flag(nu1+i) == 0) then
+! L2square = L2square + Cv_d_wig(i) * Cv_d_wig(i)
+! else
+! L2square = L2square + scale_ghosts * Cv_d_wig(i) * Cv_d_wig(i)
+! endif
+! end do
+! when the combined version is used, convergence wrong
+!NOTE (KJE) what is the comment above. What is wrong?
+
+ do i = 1, nu2
+ if (g_flag(i) == 0) then
+ L2square = L2square + uvec(i) * uvec(i)
+ else
+ L2square = L2square + scale_ghosts * uvec(i) * uvec(i)
+ endif
+ end do
+
+
+ return
+
+end subroutine res_vect_jfnk
+
+!-------------------------------------------------------------------
+
+subroutine slapsolve(xk_1, xk_size, c_ptr_to_object, NL_tol, pcgsize)
+
+ use iso_c_binding
+ use glimmer_paramets, only : dp
+ use glide_types ,only : glide_global_type
+ use parallel
+
+ implicit none
+
+ real(dp), dimension(:), intent(out) :: xk_1
+ integer(c_int) ,intent(in) ,value :: xk_size
+ type(c_ptr) ,intent(inout) :: c_ptr_to_object
+ real(dp) ,intent(in) :: NL_tol
+ integer, dimension(2) :: pcgsize
+
+ type(glide_global_type) ,pointer :: fptr=>NULL()
+
+ real(dp), dimension(:), allocatable :: xk_1_plus, vectx
+ real(dp), dimension(:), allocatable :: dx, F, F_plus
+ real(dp), dimension(:), allocatable :: wk1, wk2, rhs
+ real(dp), dimension(:,:), allocatable :: vv, wk
+ real(dp) :: L2norm_wig, tol, gamma_l, epsilon, NL_target
+ integer :: tot_its, itenb, maxiteGMRES, iout, icode
+ integer, parameter :: img = 20, img1 = img+1, kmax = 500
+ integer :: k
+
+ type(sparse_matrix_type) :: matrixA, matrixC
+ real(dp) :: L2norm
+
+ allocate( vectx(2*pcgsize(1)), xk_1_plus(2*pcgsize(1)) )
+ allocate( F(2*pcgsize(1)), F_plus(2*pcgsize(1)), dx(2*pcgsize(1)) )
+ allocate( wk1(2*pcgsize(1)), wk2(2*pcgsize(1)), rhs(2*pcgsize(1)) )
+ allocate( vv(2*pcgsize(1),img1), wk(2*pcgsize(1),img) )
+
+! Iteration loop
+
+ do k = 1, kmax
+
+ call calc_F (xk_1, F, xk_size, c_ptr_to_object, 0)
+
+ call c_f_pointer(c_ptr_to_object,fptr) ! convert C ptr to F ptr
+ L2norm = fptr%solver_data%L2norm
+ matrixA = fptr%solver_data%matrixA
+ matrixC = fptr%solver_data%matrixC
+
+! calcoffdiag = .false. ! next time calling calc_F, DO NOT save off diag matrix components
+
+ L2norm_wig = sqrt(DOT_PRODUCT(F,F)) ! with ghost
+
+!==============================================================================
+! -define nonlinear target (if k=1)
+! -check at all k if target is reached
+!==============================================================================
+
+ if (k == 1) NL_target = NL_tol * (L2norm_wig + 1.0e-2)
+
+ print *, 'L2 w/ghost (k)= ',k,L2norm_wig,L2norm
+
+ if (L2norm_wig < NL_target) exit ! nonlinear convergence criterion
+
+!==============================================================================
+! solve J(u^k-1,v^k-1)dx = -F(u^k-1,v^k-1) with fgmres, dx = [dv, du]
+!==============================================================================
+
+ rhs = -1.d0*F
+
+ dx = 0.d0 ! initial guess
+
+ call forcing_term (k, L2norm_wig, gamma_l)
+
+ tol = gamma_l * L2norm_wig ! setting the tolerance for fgmres
+
+ epsilon = 1.d-07 ! for J*vector approximation
+
+ maxiteGMRES = 300
+
+ iout = 0 ! set higher than 0 to have res(ite)
+
+ icode = 0
+
+ 10 CONTINUE
+! icode = 0 means that fgmres has finished and sol contains the app. solution
+
+ call fgmres (2*pcgsize(1),img,rhs,dx,itenb,vv,wk,wk1,wk2, &
+ tol,maxiteGMRES,iout,icode,tot_its)
+
+ IF ( icode == 1 ) THEN ! precond step: use of Picard linear solver
+ ! wk2 = P^-1*wk1
+ call apply_precond_nox( wk2, wk1, xk_size, c_ptr_to_object )
+ GOTO 10
+ ELSEIF ( icode >= 2 ) THEN ! matvec step: Jacobian free approach
+ ! J*wk1 ~ wk2 = (F_plus - F)/epsilon
+
+! form v^k-1_plus = v^k-1 + epsilon*wk1v. We use solver_postprocess to
+! transform vk_1_plus from a vector to a 3D field. (same idea for u^k-1_plus)
+ vectx(:) = wk1(1:2*pcgsize(1)) ! for v and u
+ xk_1_plus = xk_1 + epsilon*vectx
+
+! form F(x + epsilon*wk1) = F(u^k-1 + epsilon*wk1u, v^k-1 + epsilon*wk1v)
+ call calc_F (xk_1_plus, F_plus, xk_size, c_ptr_to_object, 1)
+
+! put approximation of J*wk1 in wk2
+
+ wk2 = ( F_plus - F ) / epsilon
+
+ GOTO 10
+ ENDIF
+
+!------------------------------------------------------------------------
+! End of FGMRES method
+!------------------------------------------------------------------------
+ if (tot_its == maxiteGMRES) then
+ print *,'WARNING: FGMRES has not converged'
+ stop
+ endif
+
+!------------------------------------------------------------------------
+! Update solution vectors (x^k = x^k-1 + dx) and 3D fields
+!------------------------------------------------------------------------
+ xk_1 = xk_1 + dx(1:2*pcgsize(1))
+
+ end do ! k = 1, kmax
+
+ deallocate(dx, vectx, xk_1_plus)
+ deallocate(F, F_plus, rhs)
+ deallocate(wk1, wk2)
+ deallocate(vv, wk)
+
+end subroutine slapsolve
+
+!-----------------------------------------------------------------------
+
+ subroutine fgmres (n,im,rhs,sol,i,vv,w,wk1, wk2, &
+ eps,maxits,iout,icode,its)
+
+! JFL to be removed
+
+!-----------------------------------------------------------------------
+! jfl Dec 1st 2006. We modified the routine so that it is double precison.
+! Here are the modifications:
+! 1) implicit real (a-h,o-z) becomes implicit real*8 (a-h,o-z)
+! 2) real bocomes real*8
+! 3) subroutine scopy.f has been changed for dcopy.f
+! 4) subroutine saxpy.f has been changed for daxpy.f
+! 5) function sdot.f has been changed for ddot.f
+! 6) 1e-08 becomes 1d-08
+!
+! Be careful with the dcopy, daxpy and ddot code...there is a slight
+! difference with the single precision versions (scopy, saxpy and sdot).
+! In the single precision versions, the array are declared sightly differently.
+! It is written for single precision:
+!
+! modified 12/3/93, array(1) declarations changed to array(*)
+!-----------------------------------------------------------------------
+
+ implicit double precision (a-h,o-z) !jfl modification
+ integer n, im, maxits, iout, icode
+ double precision rhs(*), sol(*), vv(n,im+1),w(n,im)
+ double precision wk1(n), wk2(n), eps
+!-----------------------------------------------------------------------
+! flexible GMRES routine. This is a version of GMRES which allows a
+! a variable preconditioner. Implemented with a reverse communication
+! protocole for flexibility -
+! DISTRIBUTED VERSION (USES DISTDOT FOR DDOT)
+! explicit (exact) residual norms for restarts
+! written by Y. Saad, modified by A. Malevsky, version February 1, 1995
+!-----------------------------------------------------------------------
+! This Is A Reverse Communication Implementation.
+!-------------------------------------------------
+! USAGE: (see also comments for icode below). FGMRES
+! should be put in a loop and the loop should be active for as
+! long as icode is not equal to 0. On return fgmres will
+! 1) either be requesting the new preconditioned vector applied
+! to wk1 in case icode==1 (result should be put in wk2)
+! 2) or be requesting the product of A applied to the vector wk1
+! in case icode==2 (result should be put in wk2)
+! 3) or be terminated in case icode == 0.
+! on entry always set icode = 0. So icode should be set back to zero
+! upon convergence.
+!-----------------------------------------------------------------------
+! Here is a typical way of running fgmres:
+!
+! icode = 0
+! 1 continue
+! call fgmres (n,im,rhs,sol,i,vv,w,wk1, wk2,eps,maxits,iout,icode)
+!
+! if (icode == 1) then
+! call precon(n, wk1, wk2) <--- user's variable preconditioning
+! goto 1
+! else if (icode >= 2) then
+! call matvec (n,wk1, wk2) <--- user's matrix vector product.
+! goto 1
+! else
+! ----- done ----
+! .........
+!-----------------------------------------------------------------------
+! list of parameters
+!-------------------
+!
+! n == integer. the dimension of the problem
+! im == size of Krylov subspace: should not exceed 50 in this
+! version (can be reset in code. looking at comment below)
+! rhs == vector of length n containing the right hand side
+! sol == initial guess on input, approximate solution on output
+! vv == work space of size n x (im+1)
+! w == work space of length n x im
+! wk1,
+! wk2, == two work vectors of length n each used for the reverse
+! communication protocole. When on return (icode \= 1)
+! the user should call fgmres again with wk2 = precon * wk1
+! and icode untouched. When icode==1 then it means that
+! convergence has taken place.
+!
+! eps == tolerance for stopping criterion. process is stopped
+! as soon as ( ||.|| is the euclidean norm):
+! || current residual||/||initial residual|| <= eps
+!
+! maxits== maximum number of iterations allowed
+!
+! iout == output unit number number for printing intermediate results
+! if (iout <= 0) no statistics are printed.
+!
+! icode = integer. indicator for the reverse communication protocole.
+! ON ENTRY : icode should be set to icode = 0.
+! ON RETURN:
+! * icode == 1 value means that fgmres has not finished
+! and that it is requesting a preconditioned vector before
+! continuing. The user must compute M**(-1) wk1, where M is
+! the preconditioing matrix (may vary at each call) and wk1 is
+! the vector as provided by fgmres upun return, and put the
+! result in wk2. Then fgmres must be called again without
+! changing any other argument.
+! * icode == 2 value means that fgmres has not finished
+! and that it is requesting a matrix vector product before
+! continuing. The user must compute A * wk1, where A is the
+! coefficient matrix and wk1 is the vector provided by
+! upon return. The result of the operation is to be put in
+! the vector wk2. Then fgmres must be called again without
+! changing any other argument.
+! * icode == 0 means that fgmres has finished and sol contains
+! the approximate solution.
+! comment: typically fgmres must be implemented in a loop
+! with fgmres being called as long icode is returned with
+! a value \= 0.
+!-----------------------------------------------------------------------
+! local variables -- !jfl modif
+ double precision hh(201,200),c(200),s(200),rs(201),t,ro,ddot,sqrt
+!
+!-------------------------------------------------------------
+! arnoldi size should not exceed 50 in this version..
+! to reset modify sizes of hh, c, s, rs
+!-------------------------------------------------------------
+
+ save
+ data epsmac/1.d-16/
+
+ !WHL - added integer declarations
+ integer :: i, its, i1, ii, j, jj, k, k1, n1
+!
+! computed goto
+!
+ goto (100,200,300,11) icode +1
+ 100 continue
+ n1 = n + 1
+ its = 0
+!-------------------------------------------------------------
+! ** outer loop starts here..
+!--------------compute initial residual vector --------------
+! 10 continue
+ call dcopy (n, sol, 1, wk1, 1) !jfl modification
+ icode = 3
+ return
+ 11 continue
+ do j=1,n
+ vv(j,1) = rhs(j) - wk2(j)
+ enddo
+ 20 ro = ddot(n, vv, 1, vv,1) !jfl modification
+ ro = sqrt(ro)
+ if (ro == 0.0d0) goto 999
+ t = 1.0d0/ ro
+ do j=1, n
+ vv(j,1) = vv(j,1)*t
+ enddo
+ if (its == 0) eps1=eps
+ if (its == 0) r0 = ro
+ if (iout > 0) write(*, 199) its, ro!&
+! print *,'chau',its, ro !write(iout, 199) its, ro
+!
+! initialize 1-st term of rhs of hessenberg system..
+!
+ rs(1) = ro
+ i = 0
+ 4 i=i+1
+ its = its + 1
+ i1 = i + 1
+ do k=1, n
+ wk1(k) = vv(k,i)
+ enddo
+!
+! return
+!
+ icode = 1
+
+ return
+ 200 continue
+ do k=1, n
+ w(k,i) = wk2(k)
+ enddo
+!
+! call matvec operation
+!
+ icode = 2
+ call dcopy(n, wk2, 1, wk1, 1) !jfl modification
+!
+! return
+!
+ return
+ 300 continue
+!
+! first call to ope corresponds to intialization goto back to 11.
+!
+! if (icode == 3) goto 11
+ call dcopy (n, wk2, 1, vv(1,i1), 1) !jfl modification
+!
+! modified gram - schmidt...
+!
+ do j=1, i
+ t = ddot(n, vv(1,j), 1, vv(1,i1), 1) !jfl modification
+ hh(j,i) = t
+ call daxpy(n, -t, vv(1,j), 1, vv(1,i1), 1) !jfl modification
+ enddo
+ t = sqrt(ddot(n, vv(1,i1), 1, vv(1,i1), 1)) !jfl modification
+ hh(i1,i) = t
+ if (t == 0.0d0) goto 58
+ t = 1.0d0 / t
+ do k=1,n
+ vv(k,i1) = vv(k,i1)*t
+ enddo
+!
+! done with modified gram schimd and arnoldi step.
+! now update factorization of hh
+!
+ 58 if (i == 1) goto 121
+!
+! perfrom previous transformations on i-th column of h
+!
+ do k=2,i
+ k1 = k-1
+ t = hh(k1,i)
+ hh(k1,i) = c(k1)*t + s(k1)*hh(k,i)
+ hh(k,i) = -s(k1)*t + c(k1)*hh(k,i)
+ enddo
+ 121 gam = sqrt(hh(i,i)**2 + hh(i1,i)**2)
+ if (gam == 0.0d0) gam = epsmac
+!-----------#determine next plane rotation #-------------------
+ c(i) = hh(i,i)/gam
+ s(i) = hh(i1,i)/gam
+ rs(i1) = -s(i)*rs(i)
+ rs(i) = c(i)*rs(i)
+!
+! determine res. norm. and test for convergence-
+!
+ hh(i,i) = c(i)*hh(i,i) + s(i)*hh(i1,i)
+ ro = abs(rs(i1))
+ if (iout > 0) &
+ write(*, 199) its, ro
+ if (i < im .and. (ro > eps1)) goto 4
+!
+! now compute solution. first solve upper triangular system.
+!
+ rs(i) = rs(i)/hh(i,i)
+ do ii=2,i
+ k=i-ii+1
+ k1 = k+1
+ t=rs(k)
+ do j=k1,i
+ t = t-hh(k,j)*rs(j)
+ enddo
+ rs(k) = t/hh(k,k)
+ enddo
+!
+! done with back substitution..
+! now form linear combination to get solution
+!
+ do j=1, i
+ t = rs(j)
+ call daxpy(n, t, w(1,j), 1, sol,1) !jfl modification
+ enddo
+!
+! test for return
+!
+ if (ro <= eps1 .or. its >= maxits) goto 999
+!
+! else compute residual vector and continue..
+!
+! goto 10
+
+ do j=1,i
+ jj = i1-j+1
+ rs(jj-1) = -s(jj-1)*rs(jj)
+ rs(jj) = c(jj-1)*rs(jj)
+ enddo
+ do j=1,i1
+ t = rs(j)
+ if (j == 1) t = t-1.0d0
+ call daxpy (n, t, vv(1,j), 1, vv, 1)
+ enddo
+!
+! restart outer loop.
+!
+ goto 20
+ 999 icode = 0
+
+ 199 format(' -- fmgres its =', i4, ' res. norm =', d26.16)
+!
+ return
+
+ end subroutine fgmres
+!-----------------------------------------------------------------------
+
+!***********************************************************************************************
+!BELOW here are deprecated boundary condition subroutines that have been replaced by newer
+! ones (using one sided differences) or slightly altered ones.
+!***********************************************************************************************
+
+!***********************************************************************************************
+!NOTE: This subroutine has been deprecated because it is has been replaced by
+! 'normhorizmainbcos', where the "os" stands for one-sided difference.
+function normhorizmainbc(dew, dns, &
+ dusrfdew, dusrfdns, &
+ dsigmadew, dsigmadns, &
+ which, bcflag, &
+ dup, &
+ oneorfour, fourorone)
+
+ ! Determines higher-order surface and basal boundary conditions for LHS of equation.
+ ! Gives 3x3x3 coeff. array for either u or v component of velocity, depending on the
+ ! value of the flag 'which'. Example of function call:
+ !
+ ! g = normhorizmainbc(dusrfew(ew,ns),dusrfnx(ew,ns),dsigmadew(up),dsigmadns(up),which,up,bcflag)
+ !
+ ! ... where g is a 3x3x3 array.
+ !
+ ! 'bcflag' is a 1 x 2 vector to indicate (1) which b.c. is being solved for (surface or bed) and
+ ! (2), if solving for the bed b.c., which type of b.c. to use. For example, bcflag = [ 0, 0 ]
+ ! denotes free sfc bc; bcflag = [ 1, 0 ] denotes basal bc w/ u=v=0, etc. (see also subroutine
+ ! "bodyset"). "fourorone" and "oneorfour" are given by vectors: fourorone = [ 4 1 ]; oneorfour = [ 1 4 ].
+ ! A single value is chosen from each vector and applied to the calculation of coefficients below.
+ ! The "correct" value needed to satisfy the expression is chosen based on the "which" flag, which
+ ! takes on a value of 1 for calculations in the x direction and a value of 2 for calculations in
+ ! the y direction.
+
+ implicit none
+
+ real(dp), intent(in) :: dew, dns
+ real(dp), intent(in) :: dusrfdew, dusrfdns, dsigmadew, dsigmadns, dup
+ real(dp), intent(in), dimension(2) :: oneorfour, fourorone
+ real(dp), dimension(3,3,3) :: normhorizmainbc
+ real(dp), dimension(3,3,3) :: g
+ real(dp) :: c
+
+ integer, intent(in) :: which
+ integer, intent(in), dimension(2) :: bcflag
+
+ c = 0.d0
+ g(:,:,:) = 0.d0
+
+ ! for higher-order FREE SURFACE B.C. for x ('which'=1) or y ('which'=2) direction ...
+ ! NOTE that this handles the case for specified stress at the bed as well, as we
+ ! simply pass in a different value for the normal vector (slope) components (still
+ ! called "dusrfdns", "dusrfdew" here, but args passed in are different).
+ if( bcflag(1) == 1 )then
+
+ ! first, coeff. that go with du/dsigma, and thus are associated
+ ! with u(1,2,2) and u(3,2,2) ...
+ c = ( fourorone(which) * dusrfdew * dsigmadew &
+ + oneorfour(which) * dusrfdns * dsigmadns )/(2*dup)
+ g(3,2,2) = -c
+ g(1,2,2) = c
+
+ ! next, coeff. that go with du/dxhat and du/dyhat terms ...
+ c = fourorone(which) * dusrfdew / (2*dew)
+ g(2,3,2) = c
+ g(2,1,2) = -c
+
+ c = oneorfour(which) * dusrfdns / (2*dns)
+ g(2,2,3) = c
+ g(2,2,1) = -c
+
+ ! for higher-order BASAL B.C. U=V=0, in x ('which'=1) or y ('which'=2) direction ...
+ ! note that this requires that rhs(up) be set to 0 as well ...
+ else if( bcflag(1) == 0 )then
+
+ g(:,:,:) = 0.d0
+ g(2,2,2) = 1.d0;
+
+ end if
+
+ normhorizmainbc = g
+
+ return
+
+end function normhorizmainbc
+
+!***********************************************************************
+!NOTE: This subroutine has been deprecated because it is has been replaced by
+! 'croshorizmainbcos', where the "os" stands for one-sided difference.
+function croshorizmainbc(dew, dns, &
+ dusrfdew, dusrfdns, &
+ dsigmadew, dsigmadns, &
+ which, bcflag, &
+ dup, local_othervel, &
+ efvs, &
+ oneortwo, twoorone, &
+ g_cros, velbc )
+
+ ! As described for "normhorizmainbc" above. The vectors "twoorone" and
+ ! "oneortwo" are given by: twoorone = [ 2 1 ]; oneortwo = [ 1 2 ];
+
+ implicit none
+
+ integer, intent(in) :: which
+ integer, intent(in), dimension(:) :: bcflag
+
+ real(dp), intent(in) :: dew, dns
+ real(dp), intent(in), dimension(:) :: oneortwo, twoorone
+ real(dp), intent(in) :: dusrfdew, dusrfdns, dsigmadew, dsigmadns, dup
+ real(dp), intent(in), dimension(:,:,:) :: local_othervel
+ real(dp), intent(in), dimension(:,:,:) :: efvs
+ real(dp), intent(in), optional :: velbc
+ real(dp), intent(out),dimension(:,:,:) :: g_cros
+
+ real(dp), dimension(3,3,3) :: g, croshorizmainbc
+ real(dp) :: c
+ integer :: nz
+
+ c = 0.d0
+ g(:,:,:) = 0.d0
+ g_cros = g
+ nz = 0
+
+ ! for higher-order FREE SURFACE B.C. for x ('which'=1) or y ('which'=2) direction ...
+ ! NOTE that this handles the case for specified stress at the bed as well, as we
+ ! simply pass in a different value for the normal vector (slope) components (still
+ ! called "dusrfdns", "dusrfdew" here, but args passed in are different).
+ if( bcflag(1) == 1 )then
+
+ ! first, coeff. that go with du/dsigma, and thus are associated
+ ! with u(1,2,2) and u(3,2,2) ...
+ c = ( - twoorone(which) * dusrfdew * dsigmadns &
+ - oneortwo(which) * dusrfdns * dsigmadew )/(2*dup)
+ g(3,2,2) = -c
+ g(1,2,2) = c
+
+ ! next, coeff. that go with du/dxhat and du/dyhat terms ...
+ c = - oneortwo(which) * dusrfdns / (2*dew)
+ g(2,3,2) = c
+ g(2,1,2) = -c
+
+ c = - twoorone(which) * dusrfdew / (2*dns)
+ g(2,2,3) = c
+ g(2,2,1) = -c
+
+ ! for higher-order BASAL B.C. U=V=0, in x ('which'=1) or y ('which'=2) direction ...
+ ! This forces the multiplication by 'local_otherval' in the main program
+ ! to result in a value of 1, thus leaving the boundary vel. unchanged
+ ! ... conditional makes sure there is no div by zero if the bc value IS also zero
+ else if( bcflag(1) == 0 )then
+
+ g(:,:,:) = 0.d0
+
+ where( local_othervel /= 0.d0 )
+ g = 1
+ elsewhere
+ g = 0.d0
+ endwhere
+
+ nz = sum( g )
+ g(:,:,:) = 0.d0
+
+ where( local_othervel /= 0.d0 )
+ g = ( velbc / nz ) / local_othervel
+ elsewhere
+ g = 0.d0
+ endwhere
+
+ end if
+
+ ! NOTE: here we define 'g_cros' FIRST, because we want the value w/o the plastic
+ ! bed coeff. included (needed for estimate of basal traction in plastic bed iteration)
+ g_cros = g
+
+ croshorizmainbc = g
+
+ return
+
+end function croshorizmainbc
+
+!***********************************************************************************************
+!ABOVE here are deprecated boundary condition subroutines that have been replaced by newer
+! ones (using one sided differences) or slightly altered ones.
+!***********************************************************************************************
+
+
+end module glam_strs2
+
+!!!***********************************************************************
diff --git a/components/cism/glimmer-cism/libglide/glam_velo.F90 b/components/cism/glimmer-cism/libglide/glam_velo.F90
new file mode 100644
index 0000000000..ae7977be7b
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glam_velo.F90
@@ -0,0 +1,340 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glam_velo.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+!
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+#include "glide_nan.inc"
+#include "glide_mask.inc"
+
+!NOTE - What is shapedbg?
+#define shapedbg(x) write(*,*) "x", shape(x)
+
+module glam_velo
+
+ use parallel
+ use glimmer_global, only : dp
+
+ ! Driver for glam higher-order velocity solver
+
+ implicit none
+
+ private
+ public :: glam_velo_driver, glam_basal_friction
+
+contains
+
+ subroutine glam_velo_driver(model)
+
+ ! Glissade higher-order velocity driver
+
+ use glimmer_log
+ use glide_types
+ use glam_strs2, only: glam_velo_solver, JFNK_velo_solver
+!!sp use glissade_basal_traction, only: calcbeta
+ use glam_grid_operators, only: glam_geometry_derivs, df_field_2d_staggered
+ use glide_grid_operators, only: stagvarb
+ use glide_mask
+ use glide_stress
+ use glimmer_paramets, only: tau0, vel0
+ use glimmer_physcon, only: scyr
+
+ type(glide_global_type),intent(inout) :: model
+
+ logical, parameter :: verbose_glam_velo = .false.
+ integer :: i, j, k
+
+ !-------------------------------------------------------------------
+ ! Velocity prep; compute geometry info.
+ !-------------------------------------------------------------------
+
+ !NOTE - The next chunk of code needs work. Several calls are repeated.
+ ! We should work out which calls are actually needed.
+
+ ! ------------------------------------------------------------------------
+ ! Now that geometry (thck, topg, lsrf, usrf) is finalized for the time step,
+ ! calculate derivatives that may be needed for the velocity solve.
+ ! ------------------------------------------------------------------------
+
+ !NOTE - Make sure these geometry derivs are computed everywhere they are needed
+ ! (all locally owned velocity points?)
+
+
+ !NOTE - The subroutine glam_geometry_derivs calls subroutine stagthickness to compute stagthck.
+ ! Similarly for dthckdew/ns and dusrfdew/ns
+ ! I don't know why we need to call the next three subroutines as well as glam_geometry_derivs.
+ ! This calculation of stagthck differs from that in glam_geometry_derivs which calls stagthickness()
+ ! in glide_grids.F90 Which do we want to use?
+ ! stagthickness() seems to be noisier but there are notes in there about some issue related to margins.
+
+ ! SFP: not sure if these are all needed here or not. Halo updates for usrf and thck are needed in order
+ ! for periodic bcs to work. Otherwise, global halos do not contain correct values and, presumably, the gradients
+ ! calculated below are incorrect in and near the global halos.
+ ! Calls were added here for other staggered variables (stagusrf, stagtopg, and staglsrf), first providing halo
+ ! updates to the non-stag vars, then calc. their stag values. This was done because debug lines show that these
+ ! stag fields did not have the correct values in their global halos. This may be ok if they are not used at all
+ ! by the dycores called here, but I added them for consistency. More testing needed to determine if they are
+ ! essential or not.
+
+ ! SFP: for consistency, I added these calls, so that all scalars interpolated to the stag mesh
+ ! first have had their global halos updated. As w/ above calls to halo updates, these may be better
+ ! placed elsewhere. The only call originally here was the one to calc stagthck.
+
+ !NOTE - Should we replace these with calls to df_field_2d_staggered?
+
+ call stagvarb(model%geometry%usrf, model%geomderv%stagusrf,&
+ model%general%ewn, model%general%nsn)
+
+ call stagvarb(model%geometry%lsrf, model%geomderv%staglsrf,&
+ model%general%ewn, model%general%nsn)
+
+ call stagvarb(model%geometry%topg, model%geomderv%stagtopg,&
+ model%general%ewn, model%general%nsn)
+
+ call stagvarb(model%geometry%thck, model%geomderv%stagthck,& ! SFP: this call was already here. Calls to calc
+ model%general%ewn, model%general%nsn) ! stagusrf, staglsrf, and stagtopg were added
+
+
+ call df_field_2d_staggered(model%geometry%usrf, &
+ model%numerics%dew, model%numerics%dns, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%geometry%thck, model%numerics%thklim )
+
+ call df_field_2d_staggered(model%geometry%thck, &
+ model%numerics%dew, model%numerics%dns, &
+ model%geomderv%dthckdew, model%geomderv%dthckdns, &
+ model%geometry%thck, model%numerics%thklim )
+
+ !SFP: W.r.t WHL comment below, I went the other route above - that is, did halo updates for the non-stag
+ !fields first, then did the subroutine calls to calc. fields on the unstag mesh. I think this makes sure
+ !you are not populating the stag field global halos with bad information that may have been sitting in the
+ !associated non-stag field halos in the case that you forgot to update them. Maybe?
+
+ !NOTE - Not sure halo updates are needed for dusrfdew, etc.
+ !Halo updates required for inputs to glide_stress?
+ call staggered_parallel_halo (model%geomderv%dusrfdew)
+ call staggered_parallel_halo (model%geomderv%dusrfdns)
+ call staggered_parallel_halo (model%geomderv%dthckdew)
+ call staggered_parallel_halo (model%geomderv%dthckdns)
+ ! call parallel_halo(model%geometry%thkmask) in earlier glide_set_mask call
+
+ ! Compute the new geometry derivatives for this time step
+ ! NOTE Merge glam_geometry_derivs with the above calculation.
+
+ !SFP: For some reason, this next call IS needed. It does not affect the results of the periodic ismip-hom test case either
+ ! way (that is, if it is active or commented out), or the dome test case. But for some reason, if it is not active, it
+ ! messes up both shelf test cases. There must be some important derivs being calculated within this call that are NOT
+ ! being explicitly calculated above.
+
+ ! Compute stagthck, staglsrf, stagtopg, dusrfdew/dns, dthckdew/dns, dlsrfdew/dns, d2thckdew2/dns2, d2usrfdew2/dns2
+
+ call glam_geometry_derivs(model)
+
+ !WHL - This is the end of the geometry calculations that need to be streamlined.
+
+ !NOTE - Verify that glide_set_mask works correctly when the input field is on the velo grid.
+ ! Would be safer to call a set_mask_staggered subroutine?
+
+ !Compute the "geometry mask" (type of square) for the staggered grid
+
+ call glide_set_mask(model%numerics, &
+ model%geomderv%stagthck, model%geomderv%stagtopg, &
+ model%general%ewn-1, model%general%nsn-1, &
+ model%climate%eus, model%geometry%stagmask)
+
+ ! call stag_parallel_halo (model%geometry%stagmask)
+
+ !Augment masks with kinematic boundary condition info
+ call augment_kinbc_mask(model%geometry%thkmask, model%velocity%kinbcmask)
+ call augment_kinbc_mask(model%geometry%stagmask, model%velocity%kinbcmask)
+
+ ! save the final mask to 'dynbcmask' for exporting to netCDF output file
+ model%velocity%dynbcmask = model%geometry%stagmask
+
+ !-------------------------------------------------------------------
+ ! Compute the velocity field
+ !-------------------------------------------------------------------
+
+ if (model%options%which_ho_nonlinear == HO_NONLIN_PICARD ) then ! Picard (standard solver)
+
+ call t_startf('glam_velo_solver')
+ call glam_velo_solver( model%general%ewn, model%general%nsn, &
+ model%general%upn, &
+ model%numerics%dew, model%numerics%dns, &
+ model%numerics%sigma, model%numerics%stagsigma, &
+ model%geometry%thck, model%geometry%usrf, &
+ model%geometry%lsrf, model%geometry%topg, &
+ model%geomderv%dthckdew, model%geomderv%dthckdns, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%geomderv%dlsrfdew, model%geomderv%dlsrfdns, &
+ model%geomderv%stagthck, model%temper%flwa, &
+ model%velocity%btraction, &
+ model%geometry%stagmask, &
+ model%options%which_ho_babc, &
+ model%options%which_ho_efvs, &
+ model%options%which_ho_resid, &
+ model%options%which_ho_nonlinear, &
+ model%options%which_ho_sparse, &
+ model%velocity%beta, &
+ model%paramets%ho_beta_const, &
+ model%basalproc%mintauf, &
+ model%temper%bwat, &
+ model%basal_physics, &
+ model%velocity%uvel, model%velocity%vvel, &
+ model%velocity%uflx, model%velocity%vflx, &
+ model%stress%efvs )
+ call t_stopf('glam_velo_solver')
+
+ else if ( model%options%which_ho_nonlinear == HO_NONLIN_JFNK ) then ! JFNK
+
+ ! noxsolve could eventually go here
+ !NOTE - Remove model%geometry%stagmask from argument list; just pass in model
+ ! (model%geometry%stagmask used to be called geom_mask_stag, which was not part of model derived type)
+
+ call t_startf('JFNK_velo_solver')
+ call JFNK_velo_solver (model, model%geometry%stagmask)
+ call t_stopf('JFNK_velo_solver')
+
+ else
+ call write_log('Invalid which_ho_nonlinear option.',GM_FATAL)
+ end if ! which_ho_nonlinear
+
+ ! Compute internal stresses
+ call glide_calcstrsstr(model)
+
+ !WHL - debug - output internal stresses and velocity at a diagnostic point
+ if (verbose_glam_velo .and. this_rank==model%numerics%rdiag_local) then
+ i = model%numerics%idiag_local
+ j = model%numerics%jdiag_local
+ print*, ' '
+ print*, ' '
+ print*, 'i, j =', i, j
+ print*, 'k, tau_xz, tau_yz, tau_xx, tau_yy, tau_xy, tau_eff:'
+ do k = 1, model%general%upn-1
+ print*, k, tau0*model%stress%tau%xz(k,i,j), tau0*model%stress%tau%yz(k,i,j), &
+ tau0*model%stress%tau%xx(k,i,j), tau0*model%stress%tau%yy(k,i,j), &
+ tau0*model%stress%tau%xy(k,i,j), tau0*model%stress%tau%scalar(k,i,j)
+ enddo
+ print*, 'New velocity: rank, i, j =', this_rank, i, j
+ print*, 'k, uvel, vvel:'
+ do k = 1, model%general%upn
+ print*, k, vel0*scyr*model%velocity%uvel(k,i,j), vel0*scyr*model%velocity%vvel(k,i,j)
+ enddo
+ endif
+
+ end subroutine glam_velo_driver
+
+!=======================================================================
+
+ subroutine glam_basal_friction (ewn, nsn, &
+ ice_mask, floating_mask, &
+ ubas, vbas, &
+ btraction, bfricflx)
+
+ ! Compute frictional heat source due to sliding at the bed
+ ! Based on a subroutine that used to be in glissade_temp.F90
+ ! but now is used only by Glam
+
+ use glimmer_paramets, only: vel0, vel_scale
+
+ !-----------------------------------------------------------------
+ ! Input/output arguments
+ !-----------------------------------------------------------------
+
+ integer, intent(in) :: ewn, nsn ! grid dimensions
+ integer, dimension(:,:), intent(in) :: &
+ ice_mask, & ! = 1 if thck > thklim, else = 0
+ floating_mask ! = 1 if ice is floating, else = 0
+ real(dp), dimension(:,:), intent(in) :: ubas, vbas ! basal velocity
+ real(dp), dimension(:,:,:), intent(in) :: btraction ! basal traction
+ real(dp), dimension(:,:), intent(out) :: bfricflx ! basal friction heat flux (W m-2)
+
+ !-----------------------------------------------------------------
+ ! Local arguments
+ !-----------------------------------------------------------------
+
+ real(dp) :: slterm ! sliding friction
+ integer :: ew, ns, i, j
+ integer :: slide_count ! number of neighbor cells with nonzero sliding
+
+ bfricflx(:,:) = 0.d0
+
+ ! compute heat source due to basal friction
+ ! Note: slterm and bfricflx are defined to be >= 0
+
+ do ns = 2, nsn-1
+ do ew = 2, ewn-1
+
+ slterm = 0.d0
+ slide_count = 0
+
+ ! Note: btraction is computed in glam_strs2.F90
+
+ !WHL - Using thklim instead of thklim_temp because ice thinner than thklim
+ ! is assumed to be at rest.
+
+ if (ice_mask(ew,ns)==1 .and. floating_mask(ew,ns)==0) then
+ do j = ns-1,ns
+ do i = ew-1,ew
+
+ !SCALING - WHL: Multiplied ubas by vel0/vel_scale so we get the same result in these two cases:
+ ! (1) With scaling: vel0 = vel_scale = 500/scyr, and ubas is non-dimensional
+ ! (2) Without scaling: vel0 = 1, vel_scale = 500/scyr, and ubas is in m/s.
+
+!!! if (abs(ubas(i,j)) > 1.0d-6 .or. &
+!!! abs(vbas(i,j)) > 1.0d-6) then
+ if ( abs(ubas(i,j))*(vel0/vel_scale) > 1.0d-6 .or. &
+ abs(vbas(i,j))*(vel0/vel_scale) > 1.0d-6 ) then
+ slide_count = slide_count + 1
+ slterm = slterm + btraction(1,i,j)*ubas(i,j) + btraction(2,i,j)*vbas(i,j)
+ end if
+ end do
+ end do
+
+ endif ! ice_mask = 1, floating_mask = 0
+
+ ! include sliding contrib only if temperature node is surrounded by sliding velo nodes
+ !NOTE - This may result in non-conservation of energy.
+
+ if (slide_count == 4) then
+ slterm = 0.25d0 * slterm
+ else
+ slterm = 0.0d0
+ end if
+
+ bfricflx(ew,ns) = slterm
+
+ enddo ! ns
+ enddo ! ew
+
+ end subroutine glam_basal_friction
+
+!===============================================================================
+
+end module glam_velo
+
+!===============================================================================
diff --git a/components/cism/glimmer-cism/libglide/glide.F90 b/components/cism/glimmer-cism/libglide/glide.F90
new file mode 100644
index 0000000000..51cbfeb684
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide.F90
@@ -0,0 +1,1053 @@
+! WJS (1-30-12): The following (turning optimization off) is needed as a workaround for an
+! xlf compiler bug, at least in IBM XL Fortran for AIX, V12.1 on bluefire
+#ifdef CPRIBM
+@PROCESS OPT(0)
+#endif
+
+!CLEANUP - glide.F90
+! Moved higher-order computations to a new module, glissade.F90.
+! Simplified glide.F90 to include only SIA computations.
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_mask.inc"
+
+!=======================================================================
+
+module glide
+
+ ! Driver for Glide (serial, SIA) dynamical core
+
+ use glide_types
+ use glide_stop
+ use glide_io
+ use glide_lithot
+ use glide_profile
+ use glimmer_config
+ use glimmer_global, only: dp
+
+ use glimmer_paramets, only: oldglide
+
+ implicit none
+
+ integer, private, parameter :: dummyunit=99
+
+!WHL - debug
+ logical, parameter :: verbose_glide = .false.
+
+contains
+
+!=======================================================================
+
+ subroutine glide_config(model,config,fileunit)
+
+ ! Read glide configuration from file and print it to the log
+
+ use glide_setup
+ use isostasy
+ use glimmer_ncparams
+ use glimmer_config
+ use glimmer_map_init
+ use glimmer_filenames
+
+ implicit none
+
+ type(glide_global_type), intent(inout) :: model ! model instance
+ type(ConfigSection), pointer :: config ! structure holding sections of configuration file
+ integer, intent(in), optional :: fileunit ! fileunit for reading config file
+
+ type(ConfigSection), pointer :: ncconfig
+ integer :: unit
+
+ unit = 99
+ if (present(fileunit)) then
+ unit = fileunit
+ endif
+
+ ! read configuration file
+ call glide_readconfig (model,config)
+ call glide_printconfig(model)
+
+ ! read sigma levels from config file, if present
+ call glide_read_sigma(model,config)
+
+ !WHL - Moved isostasy configuration to glide_setup
+! call isos_readconfig(model%isos,config)
+! call isos_printconfig(model%isos)
+
+ ! read mapping from config file
+ ! **** Use of dew and dns here is an ugly fudge that
+ ! **** allows the use of old [GLINT projection] config section
+ ! **** for backwards compatibility. It will be deleted soon.
+ ! **** (You have been warned!)
+ ! **** N.B. Here, dew and dns are unscaled - i.e. real distances in m
+
+ call glimmap_readconfig(model%projection, config, &
+ model%numerics%dew, model%numerics%dns)
+
+ ! netCDF I/O
+ if (trim(model%funits%ncfile) == '') then
+ ncconfig => config
+ else
+ call ConfigRead(process_path(model%funits%ncfile), ncconfig, unit)
+ end if
+
+ call glimmer_nc_readparams(model, ncconfig)
+
+ end subroutine glide_config
+
+!=======================================================================
+
+ subroutine glide_initialise(model)
+
+ ! Initialise Glide model instance
+
+ use glide_setup
+ use glimmer_ncio
+ use glide_velo, only: init_velo
+ use glide_thck
+ use glide_temp
+ use glimmer_log
+ use glimmer_scales
+ use glide_mask
+ use isostasy
+ use glimmer_map_init
+ use glimmer_coordinates, only: coordsystem_new
+ use glide_diagnostics, only: glide_init_diag
+ use glide_bwater
+
+ use parallel, only: distributed_grid
+
+ type(glide_global_type), intent(inout) :: model ! model instance
+
+!TODO - Is glimmer_version_char still used?
+! Old Glide does not include this variable.
+ character(len=100), external :: glimmer_version_char
+
+ integer, parameter :: my_nhalo = 0 ! no halo layers for Glide dycore
+
+!!!Old Glide has this:
+!!! call write_log(glimmer_version)
+
+ call write_log(trim(glimmer_version_char()))
+
+ ! initialise scales
+ call glimmer_init_scales
+
+ ! scale parameters (some conversions to SI units)
+ call glide_scale_params(model)
+
+ ! set up coordinate systems
+
+ ! Note: nhalo = 0 is included in call to distributed_grid to set other halo
+ ! variables (lhalo, uhalo, etc.) to 0 instead of default values
+
+!WHL - distributed_grid is not in old glide
+
+ call distributed_grid(model%general%ewn, model%general%nsn, &
+ nhalo_in=my_nhalo)
+
+ model%general%ice_grid = coordsystem_new(0.d0, 0.d0, &
+ model%numerics%dew, model%numerics%dns, &
+ model%general%ewn, model%general%nsn)
+
+ model%general%velo_grid = coordsystem_new(model%numerics%dew/2.d0, model%numerics%dns/2.d0, &
+ model%numerics%dew, model%numerics%dns, &
+ model%general%ewn-1, model%general%nsn-1)
+
+ ! allocate arrays
+ call glide_allocarr(model)
+
+!TODO - Eliminate the bed softness parameter and set btrc to model%velowo%btrac_const in glide_velo?
+ ! initialise bed softness to uniform parameter
+ model%velocity%bed_softness = model%velowk%btrac_const
+
+ ! set uniform basal heat flux (positive down)
+ !NOTE: This value will be overridden if we read bheatflx from an input file
+ ! (model%options%gthf = 1) or compute it (model%options%gthf = 2)
+ model%temper%bheatflx = model%paramets%geot
+
+ ! compute sigma levels or load from external file
+ ! (if not already read from config file)
+ call glide_load_sigma(model,dummyunit)
+
+ ! open all input files and forcing files
+ call openall_in(model)
+
+ ! read first time slice
+ call glide_io_readall(model,model)
+
+ ! write projection info to log
+ call glimmap_printproj(model%projection)
+
+ !WHL - Should have been read from glide_io_readall
+ ! read lithot if required
+!! if (model%options%gthf > 0) then
+! if (model%options%gthf == GTHF_COMPUTE) then
+! call glide_lithot_io_readall(model,model)
+! end if
+
+ ! handle relaxed/equilibrium topo
+ ! Initialise isostasy first
+ call init_isostasy(model)
+
+ select case(model%options%whichrelaxed)
+
+ case(RELAXED_TOPO_INPUT) ! Supplied topography is relaxed
+ model%isostasy%relx = model%geometry%topg
+ case(RELAXED_TOPO_COMPUTE) ! Supplied topography is in equilibrium
+ !TODO - test case RELAXED_TOPO_COMPUTE
+ call isos_relaxed(model)
+ end select
+
+ ! open all output files
+ call openall_out(model)
+
+ ! create glide variables
+ call glide_io_createall(model, model)
+
+!WHL - debug
+! print*, ' '
+! print*, 'Created Glide variables'
+! print*, 'max, min bheatflx (W/m2)=', maxval(model%temper%bheatflx), minval(model%temper%bheatflx)
+
+ ! If a 2D bheatflx field is present in the input file, it will have been written
+ ! to model%temper%bheatflx. For the case model%options%gthf = 0, we want to use
+ ! a uniform heat flux instead.
+ ! If no bheatflx field is present in the input file, then we default to the
+ ! prescribed uniform value, model%paramets%geot.
+
+ if (model%options%gthf == GTHF_UNIFORM) then
+
+ ! Check to see if this flux was present in the input file
+ ! (by checking whether the flux is nonuniform over the domain)
+ if (abs(maxval(model%temper%bheatflx) - minval(model%temper%bheatflx)) > 1.d-6) then
+ call write_log('Setting uniform prescribed geothermal flux')
+ call write_log('(Set gthf = 1 to read geothermal flux field from input file)')
+ endif
+
+ ! set uniform basal heat flux (positive down)
+ model%temper%bheatflx = model%paramets%geot
+
+!WHL - debug
+! print*, ' '
+! print*, 'Use uniform bheatflx'
+! print*, 'max, min bheatflx (W/m2)=', maxval(model%temper%bheatflx), minval(model%temper%bheatflx)
+
+ endif
+
+ !TODO - Change subroutine names to glide_init_velo, glide_init_thck
+
+ ! initialise velocity calc
+ call init_velo(model)
+
+!WHL - old glide has a call to init_temp, which is similar to glide_init_temp
+! but does not set the temperature or compute flwa until later call to timeevoltemp
+!WHL - In old glide I added artm as a hotstart variable
+
+ ! Initialize temperature field - this needs to happen after input file is
+ ! read so we can assign artm (which could possibly be read in) if temp has not been input.
+ !
+ ! Note: If the temperature field has not been read already from an input or restart file,
+ ! then temperature is initialized by this subroutine based on model%options%temp_init.
+ ! If the temperature has been read already, this subroutine will *not* overwrite it.
+
+ call glide_init_temp(model)
+
+ ! Initialize basal hydrology model, if enabled
+ call bwater_init(model)
+
+ ! initialise thickness evolution calc
+ call init_thck(model)
+
+ if (model%options%gthf == GTHF_COMPUTE) then
+!! call glide_lithot_io_createall(model) !WHL - Variables should have been created by glide_io_createall
+ call init_lithot(model)
+ end if
+
+!WHL - This call will set the ice column temperature to artm as in old glide,
+! regardless of the value of model%options%temp_init
+! Commented out at least for now. To reproduce results of old_glide, make sure
+! model%options%temp_init = TEMP_INIT_ARTM.
+!! if (oldglide) then
+!! if (model%options%hotstart.ne.1) then
+!! ! initialise Glen's flow parameter A using an isothermal temperature distribution
+!! call glide_temp_driver(model,0)
+!! endif
+!! endif ! oldglide
+
+!WHL - This option is disabled for now.
+ ! *mb* added; initialization of basal proc. module
+!! if (model%options%which_bproc == BAS_PROC_FULLCALC .or. &
+!! model%options%which_bproc == BAS_PROC_FASTCALC) then
+!! call Basal_Proc_init (model%general%ewn, model%general%nsn,model%basalproc, &
+!! model%numerics%dttem)
+!! end if
+
+ call glide_set_mask(model%numerics, &
+ model%geometry%thck, model%geometry%topg, &
+ model%general%ewn, model%general%nsn, &
+ model%climate%eus, model%geometry%thkmask, &
+ model%geometry%iarea, model%geometry%ivol)
+
+ ! calculate lower and upper ice surface
+ call glide_calclsrf(model%geometry%thck, model%geometry%topg, model%climate%eus,model%geometry%lsrf)
+
+ model%geometry%usrf = model%geometry%thck + model%geometry%lsrf
+
+ ! initialise thckwk variables; used in timeders subroutine
+ model%thckwk%olds(:,:,1) = model%geometry%thck(:,:)
+ model%thckwk%olds(:,:,2) = model%geometry%usrf(:,:)
+
+ ! initialise standard glide profiling
+ call glide_prof_init(model)
+
+ !TODO - Unclear on how subroutine register_model is used - Is it needed for serial code?
+ ! register the newly created model so that it can be finalised in the case
+ ! of an error without needing to pass the whole thing around to every
+ ! function that might cause an error
+
+ call register_model(model)
+
+ ! initialise model diagnostics \
+
+ call glide_init_diag(model)
+
+!WHL - debug
+! print*, 'After glide_initialise:'
+! print*, 'max, min thck (m)=', maxval(model%geometry%thck)*thk0, minval(model%geometry%thck)*thk0
+! print*, 'max, min usrf (m)=', maxval(model%geometry%usrf)*thk0, minval(model%geometry%usrf)*thk0
+! print*, 'max, min artm =', maxval(model%climate%artm), minval(model%climate%artm)
+! print*, 'max, min temp =', maxval(model%temper%temp), minval(model%temper%temp)
+! print*, 'max, min flwa =', maxval(model%temper%flwa), minval(model%temper%flwa)
+
+! print*, ' '
+! print*, 'thck:'
+! do j = model%general%nsn, 1, -1
+! write(6,'(30f5.0)') thk0 * model%geometry%thck(:,j)
+! enddo
+! print*, ' '
+! print*, 'temp, k = 2:'
+! do j = model%general%nsn+1, 0, -1
+! write(6,'(32f5.0)') model%temper%temp(2,:,j)
+! enddo
+! print*, 'basal temp:'
+! do j = model%general%nsn+1, 0, -1
+! write(6,'(32f5.0)') model%temper%temp(model%general%upn,:,j)
+! enddo
+
+ end subroutine glide_initialise
+
+!=======================================================================
+
+ subroutine glide_init_state_diagnostic(model)
+
+ ! Calculate diagnostic variables for the initial model state
+ ! This provides calculation of output fields at time 0
+ ! This is analagous to glissade_diagnostic_variable_solve but is only
+ ! called from init. The glide tstep routines take care of these calculations
+ ! during time stepping.
+ ! Note that none of this is needed on a restart - this code ensures a complete
+ ! set of diagnostic output fields for the initial state.
+
+ use glide_thck
+ use glide_velo
+ use glide_mask
+ use glimmer_paramets, only: tim0
+ use glimmer_physcon, only: scyr
+ use glide_ground, only: glide_marinlim
+ use glide_bwater, only: calcbwat
+ use glide_temp, only: glide_calcbmlt, glide_calcbpmp
+ use glide_grid_operators
+
+ type(glide_global_type), intent(inout) :: model ! model instance
+
+ integer :: i, j
+
+ if (model%options%is_restart == RESTART_TRUE) then
+ ! On a restart, just assign the basal velocity from uvel/vvel (which are restart variables)
+ ! to ubas/vbas which are used by the temperature solver to calculate basal heating.
+ ! During time stepping ubas/vbas are calculated by slipvelo during thickness evolution or below on a cold start.
+ model%velocity%ubas = model%velocity%uvel(model%general%upn,:,:)
+ model%velocity%vbas = model%velocity%vvel(model%general%upn,:,:)
+
+ else
+ ! Only make the calculations on a cold start.
+
+ ! ------------------------------------------------------------------------
+ ! ***Part 1: Make geometry consistent with calving law, if necessary
+ ! ------------------------------------------------------------------------
+
+ ! ------------------------------------------------------------------------
+ ! Remove ice which is either floating, or is present below prescribed
+ ! depth, depending on value of whichmarn
+ ! ------------------------------------------------------------------------
+
+ ! On a cold start, marinlim needs the mask to be calculated, but a call to
+ ! glide_set_mask occurs in glide_initialise, so we should be set here without calling it again.
+
+ call glide_marinlim(model%options%whichmarn, &
+ model%geometry%thck, &
+ model%isostasy%relx, &
+ model%geometry%topg, &
+ model%geometry%thkmask, &
+ model%numerics%mlimit, &
+ model%numerics%calving_fraction, &
+ model%climate%eus, &
+ model%climate%calving, &
+ model%ground, &
+ model%numerics%dew, &
+ model%numerics%dns, &
+ model%general%nsn, &
+ model%general%ewn)
+
+ ! We now need to recalculate the mask because marinlim may have modified the geometry.
+ call glide_set_mask(model%numerics, &
+ model%geometry%thck, model%geometry%topg, &
+ model%general%ewn, model%general%nsn, &
+ model%climate%eus, model%geometry%thkmask, &
+ model%geometry%iarea, model%geometry%ivol)
+
+ ! Compute total areas of grounded and floating ice
+ call calc_iareaf_iareag(model%numerics%dew, model%numerics%dns, &
+ model%geometry%thkmask, &
+ model%geometry%iareaf, model%geometry%iareag)
+
+ ! ------------------------------------------------------------------------
+ ! ***Part 2: Calculate geometry related fields
+ ! ------------------------------------------------------------------------
+
+ ! ------------------------------------------------------------------------
+ ! calculate upper and lower ice surface
+ ! ------------------------------------------------------------------------
+
+ call glide_calclsrf(model%geometry%thck, model%geometry%topg, &
+ model%climate%eus, model%geometry%lsrf)
+
+ model%geometry%usrf = max(0.d0,model%geometry%thck + model%geometry%lsrf)
+
+ ! ------------------------------------------------------------------------
+ ! Calculate various derivatives
+ !
+ ! This call is needed here to make sure stagthck is calculated
+ ! the same way as in thck_lin_evolve/thck_nonlin_evolve
+ ! ------------------------------------------------------------------------
+
+ call glide_prof_start(model,model%glide_prof%geomderv)
+
+ call glide_geometry_derivs(model) ! stagvarb, geomders as in old Glide
+
+ call glide_prof_stop(model,model%glide_prof%geomderv)
+
+ call glide_prof_start(model,model%glide_prof%ice_mask1)
+
+ !TREY This sets local values of dom, mask, totpts, and empty
+ !EIB! call veries between lanl and gc2, this is lanl version
+ !magi a hack, someone explain what whichthck=5 does
+
+!WHL - Modified this subroutine so that ice can accumulate in regions with
+! a small positive mass balance.
+
+ call glide_thck_index(model%geometry% thck, &
+ model%climate% acab, &
+ model%geometry% thck_index, &
+ model%geometry% totpts, &
+ .true., &
+ model%geometry% empty)
+
+ call glide_prof_stop(model,model%glide_prof%ice_mask1)
+
+
+ ! ------------------------------------------------------------------------
+ ! Part 3: Solve velocity
+ ! ------------------------------------------------------------------------
+
+ ! initial value for flwa should already be calculated as part of glide_init_temp()
+ ! calculate the part of the vertically averaged velocity field which solely depends on the temperature
+
+ call velo_integrate_flwa(model%velowk, &
+ model%geomderv%stagthck, &
+ model%temper%flwa)
+
+ ! Calculate diffusivity
+
+ call velo_calc_diffu(model%velowk, &
+ model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns, &
+ model%velocity%diffu)
+
+ ! If necessary, compute staggered variables required for basal traction calculation
+
+ if (model%options%whichbtrc == BTRC_CONSTANT_BWAT) then
+
+ !TODO - I think the next two calls are not needed, given that bwat should be in restart file for this option.
+
+ ! Calculate basal melt rate --------------------------------------------------
+ ! Note: For the initial state, we won't have values for ubas/vbas (unless they were
+ ! supplied in the input file) to get an initial guess of sliding heating.
+ ! We could iterate on this, but for simplicity that is not done.
+
+ call glide_calcbmlt(model, &
+!! model%options%which_bmelt, &
+ model%temper%temp, &
+ model%geometry%thck, &
+ model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns, &
+ model%velocity%ubas, &
+ model%velocity%vbas, &
+ model%temper%bmlt, &
+ GLIDE_IS_FLOAT(model%geometry%thkmask))
+
+ ! Note: calcbwat computes stagbwat
+ call calcbwat(model, &
+ model%options%whichbwat, &
+ model%temper%bmlt, &
+ model%temper%bwat, &
+ model%temper%bwatflx, &
+ model%geometry%thck, &
+ model%geometry%topg, &
+ model%temper%temp(model%general%upn,:,:), &
+ GLIDE_IS_FLOAT(model%geometry%thkmask), &
+ model%tempwk%wphi)
+
+
+ ! This call is redundant for now, but is needed if the call to calcbwat is removed
+ call stagvarb(model%temper%bwat, &
+ model%temper%stagbwat ,&
+ model%general%ewn, &
+ model%general%nsn)
+
+ elseif (model%options%whichbtrc == BTRC_CONSTANT_TPMP) then
+
+ call stagvarb(model%temper%temp(model%general%upn,1:model%general%ewn,1:model%general%nsn), &
+ model%temper%stagbtemp ,&
+ model%general% ewn, &
+ model%general% nsn)
+
+ call glide_calcbpmp(model, &
+ model%geometry%thck, &
+ model%temper%bpmp)
+
+ call stagvarb(model%temper%bpmp, &
+ model%temper%stagbpmp ,&
+ model%general% ewn, &
+ model%general% nsn)
+
+ endif ! whichbtrc
+
+ !------------------------------------------------------------------------
+ ! Calculate basal traction factor
+ !------------------------------------------------------------------------
+
+ do j = 1, model%general%nsn-1
+ do i = 1, model%general%ewn-1
+ if (model%geomderv%stagthck(i,j)*thk0 < 1000.d0) then
+ model%temper%stagbtemp(i,j) = model%temper%stagbpmp(i,j)
+ else
+ model%temper%stagbtemp(i,j) = -20.d0
+ endif
+ enddo
+ enddo
+
+ call calc_btrc(model, &
+ model%options%whichbtrc, &
+ model%velocity%btrc)
+
+ call slipvelo(model, &
+ 0, &
+ model%velocity%btrc, &
+ model%velocity%ubas, &
+ model%velocity%vbas)
+
+
+ ! Calculate velocity
+ call velo_calc_velo(model%velowk, model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%temper%flwa, model%velocity%diffu, &
+ model%velocity%ubas, model%velocity%vbas, &
+ model%velocity%uvel, model%velocity%vvel, &
+ model%velocity%uflx, model%velocity%vflx, &
+ model%velocity%velnorm)
+
+ endif ! if a restart
+
+
+ ! MJH: I have left these calls outside of the restart if-construct so that there will
+ ! always be a velnorm field calculated, which can be helpful for debugging.
+
+ ! ------------------------------------------------------------------------
+ ! Part 4: Calculate other diagnostic fields that depend on velocity
+ ! ------------------------------------------------------------------------
+
+ ! ------------------------------------------------------------------------
+ ! basal shear stress calculation
+ ! ------------------------------------------------------------------------
+
+ call calc_basal_shear(model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%velocity%tau_x, model%velocity%tau_y)
+
+ ! velocity norm
+ model%velocity%velnorm = sqrt(model%velocity%uvel**2 + model%velocity%vvel**2)
+
+!WHL - debug
+ if (verbose_glide) then
+
+ print*, ' '
+ print*, 'stagthck:'
+ do i = 1, model%general%ewn-1
+ write(6,'(i7)',advance='no') i
+ enddo
+ print*, ' '
+ do j = model%general%nsn-1, 1, -1
+ write(6,'(i3)',advance='no') j
+ do i = 1, model%general%ewn-1
+ write(6,'(f7.2)',advance='no') model%geomderv%stagthck(i,j)*thk0
+ enddo
+ print*, ' '
+ enddo
+
+ print*, ' '
+ print*, 'diffu (m^2/yr):'
+ do i = 1, model%general%ewn-1
+ write(6,'(i8)',advance='no') i
+ enddo
+ print*, ' '
+ do j = model%general%nsn-1, 1, -1
+ write(6,'(i3)',advance='no') j
+ do i = 1, model%general%ewn-1
+ write(6,'(f8.0)',advance='no') -model%velocity%diffu(i,j) * vel0*len0*scyr
+ enddo
+ print*, ' '
+ enddo
+
+ print*, ' '
+ print*, 'ubas:'
+ do i = 1, model%general%ewn-1
+ write(6,'(i7)',advance='no') i
+ enddo
+ print*, ' '
+ do j = model%general%nsn-1, 1, -1
+ write(6,'(i4)',advance='no') j
+ do i = 1, model%general%ewn-1
+ write(6,'(f7.2)',advance='no') model%velocity%uvel(model%general%upn,i,j)*(vel0*scyr)
+ enddo
+ print*, ' '
+ enddo
+
+ print*, ' '
+ print*, 'vbas:'
+ do i = 1, model%general%ewn-1
+ write(6,'(i7)',advance='no') i
+ enddo
+ print*, ' '
+ do j = model%general%nsn-1, 1, -1
+ write(6,'(i4)',advance='no') j
+ do i = 1, model%general%ewn-1
+ write(6,'(f7.2)',advance='no') model%velocity%vvel(model%general%upn,i,j)*(vel0*scyr)
+ enddo
+ print*, ' '
+ enddo
+
+ print*, ' '
+ print*, 'uvel, k = 1:'
+ do i = 1, model%general%ewn-1
+ write(6,'(i8)',advance='no') i
+ enddo
+ print*, ' '
+ do j = model%general%nsn-1, 1, -1
+ write(6,'(i4)',advance='no') j
+ do i = 1, model%general%ewn-1
+ write(6,'(f8.2)',advance='no') model%velocity%uvel(1,i,j) * (vel0*scyr)
+ enddo
+ print*, ' '
+ enddo
+
+ print*, ' '
+ print*, 'u=vvel, k = 1:'
+ do i = 1, model%general%ewn-1
+ write(6,'(i8)',advance='no') i
+ enddo
+ print*, ' '
+ do j = model%general%nsn-1, 1, -1
+ write(6,'(i4)',advance='no') j
+ do i = 1, model%general%ewn-1
+ write(6,'(f8.2)',advance='no') model%velocity%vvel(1,i,j) * (vel0*scyr)
+ enddo
+ print*, ' '
+ enddo
+
+ endif ! verbose_glide
+
+ end subroutine glide_init_state_diagnostic
+
+!=======================================================================
+
+ subroutine glide_tstep_p1(model,time)
+
+ ! Perform first part of time-step of an ice model instance:
+ ! temperature advection, vertical conduction, and internal dissipation.
+
+ use glide_thck
+ use glide_velo
+ use glide_temp
+ use glide_mask
+ use glimmer_paramets, only: tim0
+ use glimmer_physcon, only: scyr
+ use glide_grid_operators
+ use glide_bwater
+
+ type(glide_global_type), intent(inout) :: model ! model instance
+ real(dp), intent(in) :: time ! current time in years
+
+ ! Update internal clock
+ model%numerics%time = time
+ model%temper%newtemps = .false.
+
+ model%thckwk%oldtime = model%numerics%time - (model%numerics%dt * tim0/scyr)
+
+ call glide_prof_start(model,model%glide_prof%geomderv)
+
+ ! Update geometric quantities: stagthck, dusrfdew/dns, dthckdew/dns
+
+ call glide_geometry_derivs(model) ! compute stagthck, dusrfdew/dns, dthckdew/dns
+
+ call glide_prof_stop(model,model%glide_prof%geomderv)
+
+ call glide_prof_start(model,model%glide_prof%ice_mask1)
+
+ !WHL - Modified this subroutine so that ice can accumulate in regions with
+ ! a small positive mass balance.
+
+ call glide_thck_index(model%geometry% thck, &
+ model%climate% acab, &
+ model%geometry% thck_index, &
+ model%geometry% totpts, &
+ .true., &
+ model%geometry% empty)
+
+ call glide_prof_stop(model,model%glide_prof%ice_mask1)
+
+ ! ------------------------------------------------------------------------
+ ! calculate geothermal heat flux
+ ! ------------------------------------------------------------------------
+
+ if (model%options%gthf == GTHF_COMPUTE) then
+ call calc_lithot(model)
+ end if
+
+ ! ------------------------------------------------------------------------
+ ! Calculate temperature evolution and Glen's A, if necessary
+ ! ------------------------------------------------------------------------
+
+ ! Note: These times have units of years.
+ ! dttem has scaled units, so multiply by tim0/scyr to convert to years
+
+ if ( model%numerics%tinc > mod(model%numerics%time,model%numerics%dttem*tim0/scyr)) then
+
+ call glide_prof_start(model,model%glide_prof%temperature)
+
+ if (oldglide) then ! compute vertical velocity in glide_tstep_p1
+ ! In new glide, this is called in glide_tstep_p3
+
+ call glide_velo_vertical(model)
+
+ endif ! oldglide = T
+
+ ! temperature advection, vertical conduction, and internal dissipation
+
+ call glide_temp_driver(model, model%options%whichtemp)
+
+ model%temper%newtemps = .true.
+
+ call glide_prof_stop(model,model%glide_prof%temperature)
+
+ ! Update hydrology, if needed ------------------------------------------------
+ call calcbwat(model, &
+ model%options%whichbwat, &
+ model%temper%bmlt, &
+ model%temper%bwat, &
+ model%temper%bwatflx, &
+ model%geometry%thck, &
+ model%geometry%topg, &
+ model%temper%temp(model%general%upn,:,:), &
+ GLIDE_IS_FLOAT(model%geometry%thkmask), &
+ model%tempwk%wphi)
+
+ end if
+
+ ! ------------------------------------------------------------------------
+ ! Calculate basal traction factor
+ ! ------------------------------------------------------------------------
+
+ call calc_btrc(model, &
+ model%options%whichbtrc, &
+ model%velocity%btrc)
+
+
+!WHL - debug
+! print*, ' '
+! print*, 'After glide_tstep_p1:'
+! print*, 'max, min temp =', maxval(model%temper%temp), minval(model%temper%temp)
+! print*, 'max, min flwa =', maxval(model%temper%flwa), minval(model%temper%flwa)
+
+! print*, ' '
+! print*, 'temp, k = 2:'
+! do j = model%general%nsn+1, 0, -1
+! write(6,'(14f12.7)') model%temper%temp(2,3:16,j)
+! enddo
+! print*, 'basal temp:'
+! do j = model%general%nsn+1, 0, -1
+! write(6,'(14f12.7)') model%temper%temp(model%general%upn,3:16,j)
+! enddo
+
+ end subroutine glide_tstep_p1
+
+!=======================================================================
+
+ subroutine glide_tstep_p2(model)
+
+ ! Perform second part of time-step of an ice model instance:
+ ! thickness evolution by one of several methods.
+
+ use glide_thck
+ use glide_velo
+ use glide_temp
+ use glide_mask
+ use isostasy
+ use glide_ground, only: glide_marinlim
+
+ type(glide_global_type), intent(inout) :: model ! model instance
+
+ ! ------------------------------------------------------------------------
+ ! Calculate flow evolution by various different methods
+ ! ------------------------------------------------------------------------
+
+ call glide_prof_start(model,model%glide_prof%ice_evo)
+
+ select case(model%options%whichevol)
+
+ case(EVOL_PSEUDO_DIFF) ! Use precalculated uflx, vflx -----------------------------------
+
+ call thck_lin_evolve(model,model%temper%newtemps)
+
+ case(EVOL_ADI) ! Use explicit leap frog method with uflx,vflx -------------------
+
+ call stagleapthck(model,model%temper%newtemps)
+
+ case(EVOL_DIFFUSION) ! Use non-linear calculation that incorporates velocity calc -----
+
+ call thck_nonlin_evolve(model,model%temper%newtemps)
+
+ end select
+
+ call glide_prof_stop(model,model%glide_prof%ice_evo)
+
+ ! ------------------------------------------------------------------------
+ ! get new mask
+ ! Note: A call to glide_set_mask is needed before glide_marinlim.
+ ! ------------------------------------------------------------------------
+
+ call glide_prof_start(model,model%glide_prof%ice_mask2)
+
+ !TODO - Calculate area and vol separately from glide_set_mask?
+
+ call glide_set_mask(model%numerics, &
+ model%geometry%thck, model%geometry%topg, &
+ model%general%ewn, model%general%nsn, &
+ model%climate%eus, model%geometry%thkmask, &
+ model%geometry%iarea, model%geometry%ivol)
+
+ call glide_prof_stop(model,model%glide_prof%ice_mask2)
+
+ ! ------------------------------------------------------------------------
+ ! Remove ice which is either floating, or is present below prescribed
+ ! depth, depending on value of whichmarn
+ ! ------------------------------------------------------------------------
+
+ !TODO - Some arguments for glide_marinlim may not be needed.
+ ! Old glide includes only arguments through model%climate%calving.
+
+ call glide_marinlim(model%options%whichmarn, &
+ model%geometry%thck, &
+ model%isostasy%relx, &
+ model%geometry%topg, &
+ model%geometry%thkmask, &
+ model%numerics%mlimit, &
+ model%numerics%calving_fraction, &
+ model%climate%eus, &
+ model%climate%calving, &
+ model%ground, &
+ model%numerics%dew, &
+ model%numerics%dns, &
+ model%general%nsn, &
+ model%general%ewn)
+
+ ! Recalculate the mask following calving
+ ! Note - This call to glide_set_mask is not in old Glide, but should have been.
+
+ if (.not. oldglide) then ! recalculate the thickness mask after calving
+ call glide_set_mask(model%numerics, &
+ model%geometry%thck, model%geometry%topg, &
+ model%general%ewn, model%general%nsn, &
+ model%climate%eus, model%geometry%thkmask, &
+ model%geometry%iarea, model%geometry%ivol)
+ endif ! oldglide = F
+
+ if (.not. oldglide) then ! calculate area of floating and grounded ice
+ call calc_iareaf_iareag(model%numerics%dew, model%numerics%dns, &
+ model%geometry%thkmask, &
+ model%geometry%iareaf, model%geometry%iareag)
+ endif ! oldglide = F
+
+ ! ------------------------------------------------------------------------
+ ! update ice/water load if necessary
+ ! ------------------------------------------------------------------------
+
+ call glide_prof_start(model,model%glide_prof%isos_water)
+
+ if (model%options%isostasy == ISOSTASY_COMPUTE) then
+ if (model%numerics%time >= model%isostasy%next_calc) then
+ model%isostasy%next_calc = model%isostasy%next_calc + model%isostasy%period
+ call isos_icewaterload(model)
+ model%isostasy%new_load = .true.
+ end if
+ end if
+
+ call glide_prof_stop(model,model%glide_prof%isos_water)
+
+ ! ------------------------------------------------------------------------
+ ! basal shear stress calculation
+ ! ------------------------------------------------------------------------
+
+! Old glide just passes 'model'
+
+ call calc_basal_shear(model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%velocity%tau_x, model%velocity%tau_y)
+
+! not in old glide, but this is a useful diagnostic
+
+ ! velocity norm
+ model%velocity%velnorm = sqrt(model%velocity%uvel**2 + model%velocity%vvel**2)
+
+!WHL - debug
+! print*, ' '
+! print*, 'After tstep_p2:'
+! print*, 'max, min thck (m)=', maxval(model%geometry%thck)*thk0, minval(model%geometry%thck)*thk0
+! print*, 'max, min usrf (m)=', maxval(model%geometry%usrf)*thk0, minval(model%geometry%usrf)*thk0
+! print*, 'max uvel, vvel =', maxval(model%velocity%uvel), maxval(model%velocity%vvel)
+
+! print*, ' '
+! print*, 'thck:'
+! do j = model%general%nsn, 1, -1
+! write(6,'(14f12.7)') thk0 * model%geometry%thck(3:16,j)
+! enddo
+! print*, 'sfc uvel:'
+! do j = model%general%nsn-1, 1, -1
+! write(6,'(14f12.7)') model%velocity%uvel(1,3:16,j)
+! enddo
+! print*, 'sfc vvel:'
+! do j = model%general%nsn-1, 1, -1
+! write(6,'(14f12.7)') model%velocity%vvel(1,3:16,j)
+! enddo
+
+ end subroutine glide_tstep_p2
+
+!=======================================================================
+
+ subroutine glide_tstep_p3(model)
+
+ ! Perform third part of time-step of an ice model instance:
+ ! calculate isostatic adjustment and upper and lower ice surface
+
+ use isostasy
+ use glide_setup
+ use glide_velo, only: glide_velo_vertical
+ use glide_thck, only: glide_calclsrf
+ implicit none
+
+ type(glide_global_type), intent(inout) :: model ! model instance
+
+ ! ------------------------------------------------------------------------
+ ! Calculate isostasy
+ ! ------------------------------------------------------------------------
+
+ call glide_prof_start(model,model%glide_prof%isos)
+
+ if (model%options%isostasy == ISOSTASY_COMPUTE) then
+ call isos_compute(model)
+ end if
+
+ call glide_prof_stop(model,model%glide_prof%isos)
+
+ ! ------------------------------------------------------------------------
+ ! calculate upper and lower ice surface
+ ! ------------------------------------------------------------------------
+
+ call glide_calclsrf(model%geometry%thck, model%geometry%topg, &
+ model%climate%eus, model%geometry%lsrf)
+
+ model%geometry%usrf = max(0.d0,model%geometry%thck + model%geometry%lsrf)
+
+ !TODO - Move timecounter to a driver routine?
+ !CESM Glimmer code has this after the netCDF write.
+
+ ! increment time counter
+ model%numerics%timecounter = model%numerics%timecounter + 1
+
+ !TODO - Combine these timeders and vert velo calls into a subroutine?
+
+ ! For exact restart, compute wgrd here and write it to the restart file.
+ ! (This is easier than writing thckwk quantities to the restart file.)
+
+ if (.not. oldglide) then ! compute vertical velocity in glide_tstep_p3
+
+ ! compute vertical velocity
+
+ call t_startf('vertical_velo')
+
+ call glide_velo_vertical(model)
+
+ call t_stopf('vertical_velo')
+
+ endif ! oldglide = F
+
+ !WHL - Moved netCDF output to simple_glide
+ !! call glide_io_writeall(model,model)
+
+ end subroutine glide_tstep_p3
+
+!=======================================================================
+
+end module glide
diff --git a/components/cism/glimmer-cism/libglide/glide_bwater.F90 b/components/cism/glimmer-cism/libglide/glide_bwater.F90
new file mode 100644
index 0000000000..e515bd292a
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_bwater.F90
@@ -0,0 +1,716 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_bwater.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+!TODO - Support Jesse's water-routing code (or something similar) in parallel? Currently serial only.
+
+module glide_bwater
+
+ use glimmer_global, only: dp
+ use glide_types
+
+ implicit none
+
+contains
+
+ subroutine bwater_init(model)
+ ! Driver for initializing basal hydrology
+ use glimmer_paramets
+
+ implicit none
+
+ type(glide_global_type),intent(inout) :: model
+ real(dp) :: estimate
+
+
+ select case(model%options%whichbwat)
+ case(BWATER_LOCAL)
+
+ allocate(model%tempwk%smth(model%general%ewn,model%general%nsn))
+
+ model%paramets%hydtim = tim0 / (model%paramets%hydtim * scyr)
+ estimate = 0.2d0 / model%paramets%hydtim
+ !EIB! following not in lanl glide_temp
+ call find_dt_wat(model%numerics%dttem,estimate,model%tempwk%dt_wat,model%tempwk%nwat)
+
+ model%tempwk%c = (/ model%tempwk%dt_wat, 1.0d0 - 0.5d0 * model%tempwk%dt_wat * model%paramets%hydtim, &
+ 1.0d0 + 0.5d0 * model%tempwk%dt_wat * model%paramets%hydtim, 0.0d0, 0.0d0, 0.0d0, 0.0d0, 0.0d0 /)
+
+ !TODO - Test option BWATER_FLUX. Note: It has not been parallelized.
+
+ case(BWATER_FLUX) ! steady-state routing using flux calculation
+
+ allocate(model%tempwk%wphi(model%general%ewn,model%general%nsn))
+
+ model%tempwk%watvel = model%paramets%hydtim * tim0 / (scyr * len0)
+ estimate = (0.2d0 * model%tempwk%watvel) / min(model%numerics%dew,model%numerics%dns)
+ call find_dt_wat(model%numerics%dttem,estimate,model%tempwk%dt_wat,model%tempwk%nwat)
+
+ !print *, model%numerics%dttem*tim0/scyr, model%tempwk%dt_wat*tim0/scyr, model%tempwk%nwat
+
+ model%tempwk%c = (/ rhow * grav, rhoi * grav, 2.0d0 * model%numerics%dew, 2.0d0 * model%numerics%dns, &
+ 0.25d0 * model%tempwk%dt_wat / model%numerics%dew, 0.25d0 * model%tempwk%dt_wat / model%numerics%dns, &
+ 0.5d0 * model%tempwk%dt_wat / model%numerics%dew, 0.5d0 * model%tempwk%dt_wat / model%numerics%dns /)
+ end select
+
+ end subroutine bwater_init
+
+
+
+ subroutine calcbwat(model, which, bmlt, bwat, bwatflx, thck, topg, btem, floater, wphi)
+ ! Driver for updating basal hydrology
+
+ use parallel
+ use glimmer_paramets, only : thk0
+ use glide_grid_operators, only: stagvarb
+ use glissade_grid_operators, only: glissade_stagger
+
+ implicit none
+
+ type(glide_global_type),intent(inout) :: model
+ integer, intent(in) :: which
+ real(dp), dimension(:,:), intent(inout) :: bwat, wphi, bwatflx
+ real(dp), dimension(:,:), intent(in) :: bmlt, thck, topg, btem
+ logical, dimension(:,:), intent(in) :: floater
+
+ real(dp), dimension(2), parameter :: &
+ blim = (/ 0.00001 / thk0, 0.001 / thk0 /)
+
+ integer :: t_wat,ns,ew
+
+ real(dp), dimension(model%general%ewn,model%general%nsn) :: N, flux, lakes
+ real(dp) :: c_effective_pressure,c_flux_to_depth,p_flux_to_depth,q_flux_to_depth
+
+ real(dp), parameter :: const_bwat = 10.d0 ! constant value for basal water depth (m)
+
+ ! Variables used by BWATER_OCEAN_PENETRATION
+ real(dp), allocatable, dimension(:,:) :: Haf !< Floatation thickness (m)
+ real(dp), allocatable, dimension(:,:) :: Fp !< function that controls ocean pressure transition
+ real(dp) :: ocean_p
+
+ real(dp), dimension(:,:), allocatable :: N_capped ! version of effective pressure capped at 0x and 1x overburden
+
+ c_effective_pressure = 0.0d0 ! For now estimated with c/w
+ c_flux_to_depth = 1./(1.8d-3*12.0d0) !
+ p_flux_to_depth = 2.0d0 ! exponent on the depth
+ q_flux_to_depth = 1.0d0 ! exponent on the potential gradient
+
+ select case (which)
+
+ ! which = BWATER_NONE Nothing, basal water depth = 0.
+ ! which = BWATER_LOCAL Completely local, bwat_new = c1 * melt_rate + c2 * bwat_old
+ ! which = BWATER_FLUX Flux based calculation
+ ! which = BWATER_BASAL_PROC, till water content in the basal processes module
+ ! which = BWATER_OCEAN_PENETRATION, effective pressure from ocean penetration parameterization (Leguy et al 2014)
+
+ case(BWATER_LOCAL)
+
+ ! model%tempwk%c(1) = model%tempwk%dt_wat
+ ! c(2) = 1.0d0 - 0.5d0 * model%tempwk%dt_wat * model%paramets%hydtim
+ ! c(3) = 1.0d0 + 0.5d0 * model%tempwk%dt_wat * model%paramets%hydtim
+
+ do t_wat = 1, model%tempwk%nwat
+
+ !LOOP - For glissade, loop should be over locally owned cells (ilo:ihi,jo:jhi).
+
+ do ns = 1,model%general%nsn
+ do ew = 1,model%general%ewn
+
+ if (model%numerics%thklim < thck(ew,ns) .and. .not. floater(ew,ns)) then
+ bwat(ew,ns) = (model%tempwk%c(1) * bmlt(ew,ns) + model%tempwk%c(2) * bwat(ew,ns)) / &
+ model%tempwk%c(3)
+ if (bwat(ew,ns) < blim(1)) then
+ bwat(ew,ns) = 0.0d0
+ end if
+ else
+ bwat(ew,ns) = 0.0d0
+ end if
+
+ end do
+ end do
+ end do
+
+ model%tempwk%smth = 0.
+ do ns = 2,model%general%nsn-1
+ do ew = 2,model%general%ewn-1
+ call smooth_bwat(ew-1,ew,ew+1,ns-1,ns,ns+1)
+ end do
+ end do
+
+ ! apply periodic BC
+ if (model%options%periodic_ew) then
+ do ns = 2,model%general%nsn-1
+ call smooth_bwat(model%general%ewn-1,1,2,ns-1,ns,ns+1)
+ call smooth_bwat(model%general%ewn-1,model%general%ewn,2,ns-1,ns,ns+1)
+ end do
+ end if
+
+ bwat(1:model%general%ewn,1:model%general%nsn) = &
+ model%tempwk%smth(1:model%general%ewn,1:model%general%nsn)
+
+ ! Case added by Jesse Johnson 11/15/08
+ ! Steady state routing of basal water using flux calculation
+
+ case(BWATER_FLUX)
+
+ call effective_pressure(bwat,c_effective_pressure,N)
+ call pressure_wphi(thck,topg,N,wphi,model%numerics%thklim,floater)
+ call route_basal_water(wphi,bmlt,model%numerics%dew,model%numerics%dns,bwatflx,lakes)
+ call flux_to_depth(bwatflx,wphi,c_flux_to_depth,p_flux_to_depth,q_flux_to_depth,model%numerics%dew,model%numerics%dns,bwat)
+
+ case(BWATER_CONST)
+
+ ! Use a constant thickness of water, to force Tpmp.
+ bwat(:,:) = const_bwat / thk0
+
+!! case(BWATER_BASAL_PROC) ! not currently supported
+
+ ! Normalized basal water
+
+!! bwat = model%basalproc%Hwater / thk0
+
+ case(BWATER_OCEAN_PENETRATION)
+
+ allocate(Haf(model%general%ewn,model%general%nsn))
+ allocate(Fp(model%general%ewn,model%general%nsn))
+ ocean_p = model%paramets%p_ocean_penetration
+ Haf = max(f * (topg*thk0 - model%climate%eus*thk0), 0.0d0)
+ Fp = max( (1.0d0 - Haf / (thck*thk0)), 0.0d0 )**ocean_p
+ model%basal_physics%effecpress = rhoi * grav * thck*thk0 * Fp
+ deallocate(Haf)
+ deallocate(Fp)
+
+ case default ! includes BWATER_NONE
+
+ bwat(:,:) = 0.0d0
+
+ end select
+
+ !TODO - Switch to glissade version (glissade_stagger)
+ ! now also calculate basal water in velocity (staggered) coord system
+ call stagvarb(model%temper%bwat, &
+ model%temper%stagbwat ,&
+ model%general%ewn, &
+ model%general%nsn)
+
+ ! Stagger effective pressure if a friction law will need it. cases BWATER_OCEAN_PENETRATION, BWATER_SHEET calculate it, but it may also be passed in as data or forcing.
+ ! cap the staggered effective pressure at 0x and 1x overburden pressure to avoid strange values going to the friction laws
+ if ( (model%options%which_ho_babc == HO_BABC_POWERLAW) .or. &
+ (model%options%which_ho_babc == HO_BABC_COULOMB_FRICTION) ) then
+
+ allocate(N_capped(model%general%ewn,model%general%nsn))
+
+ where (model%basal_physics%effecpress < 0.0d0)
+ N_capped = 0.0d0
+ else where (model%basal_physics%effecpress > rhoi * grav * model%geometry%thck * thk0)
+ N_capped = rhoi * grav * model%geometry%thck * thk0
+ else where
+ N_capped = model%basal_physics%effecpress
+ end where
+ call glissade_stagger(model%general%ewn, model%general%nsn, &
+ N_capped, model%basal_physics%effecpress_stag, &
+ model%geometry%thkmask, stagger_margin_in=1) ! only use values where there is ice
+
+ deallocate(N_capped)
+ endif
+
+
+ contains
+
+ ! Internal subroutine for smoothing
+ subroutine smooth_bwat(ewm,ew,ewp,nsm,ns,nsp)
+ ! smoothing basal water distrib
+ implicit none
+ integer, intent(in) :: ewm,ew,ewp,nsm,ns,nsp
+
+ if (bwat(ew,ns) > blim(2)) then
+ model%tempwk%smth(ew,ns) = bwat(ew,ns) + model%paramets%bwat_smooth * &
+ (bwat(ewm,ns) + bwat(ewp,ns) + bwat(ew,nsm) + bwat(ew,nsp) - 4.0d0 * bwat(ew,ns))
+ else
+ model%tempwk%smth(ew,ns) = bwat(ew,ns)
+ end if
+ end subroutine smooth_bwat
+
+ end subroutine calcbwat
+
+ subroutine find_dt_wat(dttem,estimate,dt_wat,nwat)
+
+ implicit none
+
+ real(dp), intent(out) :: dt_wat
+ integer, intent(out) :: nwat
+ real(dp), intent(in) :: dttem, estimate
+
+ nwat = int(dttem/estimate) + 1
+ dt_wat = dttem / nwat
+
+ end subroutine find_dt_wat
+
+ ! Note: This routing is supported in serial code only.
+
+ subroutine route_basal_water(wphi,melt,dx,dy,flux,lakes)
+ !> Routes water from melt field to its destination, recording flux
+ !> of water along the route. Water flow direction is determined according
+ !> to the gradient of a wphi elevation field. For the algorithm to
+ !> function properly depressions in the wphi surface must be filled.
+ !> this results in the lakes field, which is the difference between the
+ !> filled surface and the original wphi.
+ !> The method used is by Quinn et. al. (1991).
+ !>
+ !> 12/9/05 Jesse Johnson based on code from the glimmer_routing file
+ !> by Ian Rutt.
+
+ implicit none
+
+ real(dp),dimension(:,:),intent(in) :: wphi !> Input potential surface
+ real(dp),dimension(:,:),intent(in) :: melt !> Input melting field
+ real(dp), intent(in) :: dx !> Input $x$ grid-spacing
+ real(dp), intent(in) :: dy !> Input $y$ grid-spacing
+ real(dp),dimension(:,:),intent(out) :: flux !> Output flux field
+ real(dp),dimension(:,:),intent(out) :: lakes !> Output lakes field
+
+ ! Internal variables --------------------------------------
+
+ integer :: nx,ny,k,nn,cx,cy,px,py,x,y
+ integer, dimension(:,:),allocatable :: mask !> Masked points
+ integer, dimension(:,:),allocatable :: sorted
+ real(dp),dimension(:,:),allocatable :: flats,potcopy
+ real(dp),dimension(-1:1,-1:1) :: slopes
+ real(dp),dimension(-1:1,-1:1) :: dists
+ logical :: flag
+
+ ! Set up grid dimensions ----------------------------------
+
+ nx=size(wphi,1) ; ny=size(wphi,2)
+ nn=nx*ny
+
+ ! Change these distances for slope determination
+
+ dists(-1,:)=(/sqrt(dx**2+dy**2),dy,sqrt(dx**2+dy**2)/)
+ dists(0,:)=(/dx,0d0,dx/)
+ dists(1,:)=dists(-1,:)
+
+ ! Allocate internal arrays and copy data ------------------
+
+ allocate(sorted(nn,2),flats(nx,ny),potcopy(nx,ny),mask(nx,ny))
+ potcopy=wphi
+ mask=1
+
+ ! Fill holes in data, and sort heights --------------------
+
+ call fillholes(potcopy,flats,mask)
+ call heights_sort(potcopy,sorted)
+
+ lakes=potcopy-wphi
+
+ ! Initialise flux with melt, which will then be --------
+ ! redistributed. Multiply by area, so volumes are found.---
+
+ flux=melt * dx * dy
+
+ ! Begin loop over points, highest first -------------------
+
+ do k=nn,1,-1
+
+ ! Get location of current point -------------------------
+
+ x=sorted(k,1)
+ y=sorted(k,2)
+
+ ! Only propagate down slope positive values
+ if (melt(x,y) > 0) then
+
+ ! Reset flags and slope arrays --------------------------
+
+ flag=.true.
+ slopes=0.0
+
+ ! Loop over adjacent points, and calculate slopes -------
+
+ do cx=-1,1,1
+ do cy=-1,1,1
+ ! If this is the centre point, ignore
+ if (cx==0.and.cy==0) continue
+ ! Otherwise do slope calculation
+ px=x+cx ; py=y+cy
+ if (px > 0 .and. px<=nx .and. py > 0 .and. py <= ny) then
+ ! Only allow flow to points that are melted or freezing.
+ ! Testing relax this condition (Hell, Frank does).
+ !if (potcopy(px,py) Assuming that the flow is steady state, this function simply solves
+ !> flux = depth * velocity
+ !> for the depth, assuming that the velocity is a function of depth,
+ !> and pressure potential. This amounts to assuming a Weertman film,
+ !> or Manning flow, both of which take the form of a constant times water
+ !> depth to a power, times pressure wphi to a power.
+
+ use glam_grid_operators, only: df_field_2d ! Find grad_wphi
+ use glimmer_physcon, only : scyr ! Seconds per year
+
+ real(dp),dimension(:,:),intent(in) :: flux ! Basal water flux
+ real(dp),dimension(:,:),intent(in) :: wphi ! Pressure wphi
+ real(dp) ,intent(in) :: c ! Constant of proportionality
+ real(dp) ,intent(in) :: p ! Exponent of the water depth
+ real(dp) ,intent(in) :: q ! Exponent of the pressure pot.
+ real(dp) ,intent(in) :: dew ! Grid spacing, ew direction
+ real(dp) ,intent(in) :: dns ! Grid spacing, ns direction
+ real(dp),dimension(:,:),intent(out):: bwat ! Water Depth
+
+ ! Internal variables
+ real(dp),dimension(:,:),allocatable :: grad_wphi, dwphidx, dwphidy
+
+ integer nx,ny,nn
+
+ ! Set up grid dimensions ----------------------------------
+ nx=size(flux,1) ; ny=size(flux,2)
+ nn=nx*ny
+
+ ! Allocate internal arrays and copy data ------------------
+ allocate(dwphidx(nx,ny),dwphidy(nx,ny),grad_wphi(nx,ny))
+
+ ! Compute the gradient of the potential field.
+ call df_field_2d(wphi,dew,dns,dwphidx,dwphidy)
+
+ grad_wphi = sqrt(dwphidx**2 + dwphidy**2)
+
+ where (grad_wphi /= 0.d0)
+ bwat = ( flux / (c * scyr * dns * grad_wphi ** q) ) ** (1./(p+1.))
+ elsewhere
+ bwat = 0.d0
+ endwhere
+
+
+ end subroutine flux_to_depth
+
+!==============================================================
+
+ subroutine effective_pressure(bwat,c,N)
+ real(dp),dimension(:,:),intent(in) :: bwat! Water depth
+ real(dp) ,intent(in) :: c ! Constant of proportionality
+ real(dp),dimension(:,:),intent(out) :: N ! Effective pressure
+
+ where (bwat > 0.d0)
+ N = c / bwat
+ elsewhere
+ N = 0.d0
+ endwhere
+ end subroutine effective_pressure
+
+!==============================================================
+
+ subroutine pressure_wphi(thck,topg,N,wphi,thicklim,floater)
+ !> Compute the pressure wphi at the base of the ice sheet according to
+ !> ice overburden plus bed height minus effective pressure.
+ !>
+ !> whpi/(rhow*g) = topg + bwat * rhoi / rhow * thick - N / (rhow * g)
+
+ use glimmer_physcon, only : rhoi,rhow,grav
+ implicit none
+ real(dp),dimension(:,:),intent(in) :: thck ! Thickness
+ real(dp),dimension(:,:),intent(in) :: topg ! Bed elevation
+ real(dp),dimension(:,:),intent(in) :: N ! Effective pressure
+ logical,dimension(:,:),intent(in) :: floater ! Mask of floating ice
+ real(dp),intent(in) :: thicklim ! Minimal ice thickness
+ real(dp),dimension(:,:),intent(out) :: wphi ! Pressure wphi
+
+
+ where (thck > thicklim .and. .not. floater)
+ wphi = thck + rhow/rhoi * topg - N / (rhow * grav)
+ elsewhere
+ wphi = max(topg *rhow/rhoi,0.0d0)
+ end where
+
+ end subroutine pressure_wphi
+
+!==============================================================
+! Internal subroutines
+!==============================================================
+
+ subroutine fillholes(phi,flats,mask)
+
+ implicit none
+
+ real(dp),dimension(:,:),intent(inout) :: phi
+ real(dp),dimension(:,:),intent(inout) :: flats
+ integer, dimension(:,:),intent(in) :: mask
+
+ ! Internal variables --------------------------------------
+
+ real(dp),allocatable,dimension(:,:) :: old_phi
+ integer, allocatable,dimension(:,:) :: pool
+
+ real(dp) :: pvs(9), max_val
+ real(dp), parameter :: null = 1e+20
+ integer :: flag,nx,ny,i,j
+
+ ! ---------------------------------------------------------
+
+ nx=size(phi,1) ; ny=size(phi,2)
+
+ allocate(pool(nx,ny),old_phi(nx,ny))
+
+ flag = 1
+
+ ! ---------------------------------------------------------
+
+ do while (flag == 1)
+
+ flag = 0
+
+ old_phi = phi
+
+ do i=2,nx-1
+ do j=2,ny-1
+
+ flats(i,j) = 0
+
+ if (mask(i,j) == 1) then
+
+ if (any(old_phi(i-1:i+1,j-1:j+1) < old_phi(i,j))) then
+ pool(i,j) = 0
+ else
+ pool(i,j) = 1
+ end if
+
+ if (pool(i,j) == 1) then
+
+ flag = 1
+
+ pvs = (/ old_phi(i-1:i+1,j-1), old_phi(i-1:i+1,j+1), old_phi(i-1:i+1,j) /)
+
+ where (pvs == old_phi(i,j))
+ pvs = null
+ end where
+
+ max_val = minval(pvs)
+
+ if (max_val /= null) then
+ phi(i,j) = max_val
+ else
+ flag = 0
+ flats(i,j) = 1
+ end if
+
+ end if
+
+ end if
+ end do
+ end do
+
+ end do
+
+ deallocate(pool,old_phi)
+
+ end subroutine fillholes
+
+!==============================================================
+
+ subroutine heights_sort(wphi,sorted)
+
+ real(dp),dimension(:,:) :: wphi
+ integer,dimension(:,:) :: sorted
+
+ integer :: nx,ny,nn,i,j,k
+ real(dp),dimension(:),allocatable :: vect
+ integer,dimension(:),allocatable :: ind
+
+ nx=size(wphi,1) ; ny=size(wphi,2)
+ nn=size(sorted,1)
+
+ allocate(vect(nn),ind(nn))
+
+ if (nn/=nx*ny.or.size(sorted,2) /= 2) then
+ print*,'Wrong dimensions'
+ stop
+ endif
+
+ k=1
+
+ do i=1,nx
+ do j=1,ny
+ vect(k)=wphi(i,j)
+ k=k+1
+ enddo
+ enddo
+
+ call indexx(vect,ind)
+
+ do k=1,nn
+ sorted(k,1)=floor(real(ind(k)-1)/real(ny))+1
+ sorted(k,2)=mod(ind(k)-1,ny)+1
+ enddo
+
+ do k=1,nn
+ vect(k)=wphi(sorted(k,1),sorted(k,2))
+ enddo
+
+ end subroutine heights_sort
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ !
+ ! The following two subroutines perform an index-sort of an array.
+ ! They are a GPL-licenced replacement for the Numerical Recipes routine indexx.
+ ! They are not derived from any NR code, but are based on a quicksort routine by
+ ! Michael Lamont (http://linux.wku.edu/~lamonml/kb.html), originally written
+ ! in C, and issued under the GNU General Public License. The conversion to
+ ! Fortran 90, and modification to do an index sort was done by Ian Rutt.
+ !
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine indexx(array,index)
+
+ use glimmer_log
+
+ !> Performs an index sort of \texttt{array} and returns the result in
+ !> \texttt{index}. The order of elements in \texttt{array} is unchanged.
+ !>
+ !> This is a GPL-licenced replacement for the Numerical Recipes routine indexx.
+ !> It is not derived from any NR code, but are based on a quicksort routine by
+ !> Michael Lamont (http://linux.wku.edu/~lamonml/kb.html), originally written
+ !> in C, and issued under the GNU General Public License. The conversion to
+ !> Fortran 90, and modification to do an index sort was done by Ian Rutt.
+
+ real(dp),dimension(:) :: array !> Array to be indexed.
+ integer, dimension(:) :: index !> Index of elements of \texttt{array}.
+ integer :: i
+
+ if (size(array) /= size(index)) then
+ call write_log('ERROR: INDEXX size mismatch.',GM_FATAL,__FILE__,__LINE__)
+ endif
+
+ do i=1,size(index)
+ index(i)=i
+ enddo
+
+ call q_sort_index(array,index,1,size(array))
+
+ end subroutine indexx
+
+!==============================================================
+
+ recursive subroutine q_sort_index(numbers,index,left,right)
+
+ !> This is the recursive subroutine actually used by \texttt{indexx}.
+ !>
+ !> This is a GPL-licenced replacement for the Numerical Recipes routine indexx.
+ !> It is not derived from any NR code, but are based on a quicksort routine by
+ !> Michael Lamont (http://linux.wku.edu/~lamonml/kb.html), originally written
+ !> in C, and issued under the GNU General Public License. The conversion to
+ !> Fortran 90, and modification to do an index sort was done by Ian Rutt.
+
+ implicit none
+
+ real(dp),dimension(:) :: numbers !> Numbers being sorted
+ integer, dimension(:) :: index !> Returned index
+ integer :: left, right !> Limit of sort region
+
+ integer :: ll,rr
+ integer :: pv_int,l_hold, r_hold,pivpos
+ real(dp) :: pivot
+
+ ll=left
+ rr=right
+
+ l_hold = ll
+ r_hold = rr
+ pivot = numbers(index(ll))
+ pivpos=index(ll)
+
+ do
+ if (.not.(ll < rr)) exit
+
+ do
+ if (.not.((numbers(index(rr)) >= pivot) .and. (ll < rr))) exit
+ rr=rr-1
+ enddo
+
+ if (ll /= rr) then
+ index(ll) = index(rr)
+ ll=ll+1
+ endif
+
+ do
+ if (.not.((numbers(index(ll)) <= pivot) .and. (ll < rr))) exit
+ ll=ll+1
+ enddo
+
+ if (ll /= rr) then
+ index(rr) = index(ll)
+ rr=rr-1
+ endif
+ enddo
+
+ index(ll) = pivpos
+ pv_int = ll
+ ll = l_hold
+ rr = r_hold
+ if (ll < pv_int) call q_sort_index(numbers, index,ll, pv_int-1)
+ if (rr > pv_int) call q_sort_index(numbers, index,pv_int+1, rr)
+
+ end subroutine q_sort_index
+
+end module glide_bwater
diff --git a/components/cism/glimmer-cism/libglide/glide_diagnostics.F90 b/components/cism/glimmer-cism/libglide/glide_diagnostics.F90
new file mode 100644
index 0000000000..eab4235864
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_diagnostics.F90
@@ -0,0 +1,729 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_diagnostics.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+!TODO - Calculations of iarea, iareaf and areag in calc_iareaf_iareag() and glide_set_mask() could be replaced by values computed here.
+! These could be saved to the model derived type (model%geometry%iarea, etc.) for output.
+
+module glide_diagnostics
+
+ ! subroutines for computing various useful diagnostics
+ ! Author: William Lipscomb, LANL
+
+ use glimmer_global, only: dp
+ use glimmer_log
+ use glide_types
+
+ implicit none
+
+contains
+
+ subroutine glide_write_diagnostics (model, time, &
+ tstep_count, &
+ minthick_in)
+
+ ! Short driver subroutine to decide whether it's time to write diagnostics.
+ ! If so, it calls glide_write_diag.
+
+ ! input/output arguments
+
+ type(glide_global_type), intent(in) :: model ! model instance
+ real(dp), intent(in) :: time ! current time in years
+
+ integer, intent(in), optional :: tstep_count ! current timestep
+
+ real(dp), intent(in), optional :: &
+ minthick_in ! ice thickness threshold (m) for including in diagnostics
+
+ ! local arguments
+
+ real(dp) :: minthick ! ice thickness threshold (m) for including in diagnostics
+ ! defaults to eps (a small number) if not passed in
+
+ real(dp), parameter :: &
+ eps = 1.0d-11
+
+ real(dp) :: &
+ quotient, nint_quotient
+
+ if (present(minthick_in)) then
+ minthick = minthick_in
+ else
+ minthick = eps
+ endif
+
+! debug
+! print*, ' '
+! print*, 'In glide_write_diagnostics'
+! print*, 'time =', time
+! print*, 'dt_diag =', model%numerics%dt_diag
+! print*, 'ndiag =', model%numerics%ndiag
+! print*, 'tstep_count =', tstep_count
+
+ !TODO - Make the write_diag criterion more robust; e.g., derive ndiag from dt_diag at initialization.
+ ! Then we would work with integers (tstep_count and ndiag) and avoid roundoff errors.
+
+ if (model%numerics%dt_diag > 0.d0) then ! usual case
+
+!! if (mod(time,model%numerics%dt_diag)) < eps) then ! not robust because of roundoff error
+
+ quotient = time/model%numerics%dt_diag
+ nint_quotient = nint(quotient)
+ if (abs(quotient - real(nint_quotient,dp)) < eps) then ! time to write
+
+ call glide_write_diag(model, &
+ time, &
+ minthick)
+ endif
+
+ elseif (present(tstep_count) .and. model%numerics%ndiag > 0) then ! decide based on ndiag
+
+ if (mod(tstep_count, model%numerics%ndiag) == 0) then ! time to write
+ call glide_write_diag(model, &
+ time, &
+ minthick)
+ endif
+
+ endif ! dt_diag > 0
+
+ end subroutine glide_write_diagnostics
+
+!--------------------------------------------------------------------------
+
+ subroutine glide_init_diag (model)
+
+ use parallel
+
+ implicit none
+
+ ! input/output arguments
+
+ type(glide_global_type), intent(inout) :: model ! model instance
+
+ ! local variables
+
+ character(len=100) :: message
+
+ !-----------------------------------------------------------------
+ ! Find the local rank and indices of the global diagnostic point
+ !-----------------------------------------------------------------
+
+ call parallel_localindex(model%numerics%idiag, model%numerics%jdiag, &
+ model%numerics%idiag_local, model%numerics%jdiag_local, &
+ model%numerics%rdiag_local)
+
+ !WHL - debug
+ if (main_task) then
+ write(6,'(a25,2i6)') 'Global idiag, jdiag: ', &
+ model%numerics%idiag, model%numerics%jdiag
+ write(6,'(a25,3i6)') 'Local idiag, jdiag, task:', &
+ model%numerics%idiag_local, &
+ model%numerics%jdiag_local, &
+ model%numerics%rdiag_local
+ endif
+
+ if (main_task) then
+
+ write(message,'(a25,2i6)') 'Global idiag, jdiag: ', &
+ model%numerics%idiag, model%numerics%jdiag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,3i6)') 'Local idiag, jdiag, task:', &
+ model%numerics%idiag_local, &
+ model%numerics%jdiag_local, &
+ model%numerics%rdiag_local
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ endif ! main_task
+
+ end subroutine glide_init_diag
+
+!--------------------------------------------------------------------------
+
+ subroutine glide_write_diag (model, time, &
+ minthick)
+
+ ! Write global diagnostics
+ ! Also write local diagnostics for a selected grid cell
+
+ use parallel
+
+ use glimmer_paramets, only: thk0, len0, vel0, tim0, unphys_val
+ use glimmer_physcon, only: scyr, rhoi, shci
+
+ implicit none
+
+ ! input/output arguments
+
+ type(glide_global_type), intent(in) :: model ! model instance
+ real(dp), intent(in) :: time ! current time in years
+ real(dp), intent(in) :: &
+ minthick ! ice thickness threshold (m) for including in diagnostics
+
+ ! local variables
+
+ real(dp) :: &
+ tot_area, & ! total ice area (km^2)
+ tot_volume, & ! total ice volume (km^3)
+ tot_energy, & ! total ice energy (J)
+ mean_thck, & ! mean ice thickness (m)
+ mean_temp, & ! mean ice temperature (deg C)
+ mean_acab, & ! mean surface accumulation/ablation rate (m/yr)
+ mean_bmlt, & ! mean basal melt (m/yr)
+ max_thck, max_thck_global, & ! max ice thickness (m)
+ max_temp, max_temp_global, & ! max ice temperature (deg C)
+ min_temp, min_temp_global, & ! min ice temperature (deg C)
+ max_spd_sfc, max_spd_sfc_global, & ! max surface ice speed (m/yr)
+ max_spd_bas, max_spd_bas_global, & ! max basal ice speed (m/yr)
+ spd, & ! speed
+ thck_diag, usrf_diag, & ! local column diagnostics
+ topg_diag, relx_diag, &
+ artm_diag, acab_diag, &
+ bmlt_diag, bwat_diag, &
+ bheatflx_diag, level
+
+ real(dp), dimension(model%general%upn) :: &
+ temp_diag, & ! Note: sfc temp not included if temps are staggered
+ ! (use artm instead)
+ spd_diag
+
+ real(dp), dimension(model%lithot%nlayer) :: &
+ lithtemp_diag ! lithosphere column diagnostics
+
+ integer :: i, j, k, ktop, kbed, &
+ imax, imin, &
+ jmax, jmin, &
+ kmax, kmin, &
+ imax_global, imin_global, &
+ jmax_global, jmin_global, &
+ kmax_global, kmin_global, &
+ procnum, &
+ ewn, nsn, upn, & ! model%numerics%ewn, etc.
+ nlith, & ! model%lithot%nlayer
+ velo_ew_ubound, velo_ns_ubound ! upper bounds for velocity variables
+
+ character(len=100) :: message
+
+ real(dp), parameter :: &
+ eps = 1.0d-11 ! small number
+
+ ewn = model%general%ewn
+ nsn = model%general%nsn
+ upn = model%general%upn
+
+ nlith = model%lithot%nlayer
+
+ if (uhalo > 0) then
+ velo_ns_ubound = nsn-uhalo
+ velo_ew_ubound = ewn-uhalo
+ else
+ ! for uhalo==0 (as is the case for the glide dycore), the velocity grid has one less
+ ! point than the main grid, so we need to subtract one to avoid out-of-bounds problems
+ velo_ns_ubound = nsn-uhalo-1
+ velo_ew_ubound = ewn-uhalo-1
+ end if
+
+ !-----------------------------------------------------------------
+ ! Compute and write global diagnostics
+ !-----------------------------------------------------------------
+
+ call write_log('----------------------------------------------------------')
+ call write_log(' ')
+ write(message,'(a25,f24.16)') 'Diagnostic output, time =', time
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+ call write_log(' ')
+
+ ! total ice area (m^2)
+
+ tot_area = 0.d0
+ do j = lhalo+1, nsn-uhalo
+ do i = lhalo+1, ewn-uhalo
+ if (model%geometry%thck(i,j) * thk0 > minthick) then
+ tot_area = tot_area + model%numerics%dew * model%numerics%dns
+ endif
+ enddo
+ enddo
+ tot_area = tot_area * len0**2
+ tot_area = parallel_reduce_sum(tot_area)
+
+ ! total ice volume (m^3)
+
+ tot_volume = 0.d0
+ do j = lhalo+1, nsn-uhalo
+ do i = lhalo+1, ewn-uhalo
+ if (model%geometry%thck(i,j) * thk0 > minthick) then
+ tot_volume = tot_volume + model%geometry%thck(i,j) &
+ * model%numerics%dew * model%numerics%dns
+ endif
+ enddo
+ enddo
+ tot_volume = tot_volume * thk0 * len0**2
+ tot_volume = parallel_reduce_sum(tot_volume)
+
+ ! total ice energy relative to T = 0 deg C (J)
+
+ tot_energy = 0.d0
+ if (size(model%temper%temp,1) == upn+1) then ! temps are staggered in vertical
+ do j = lhalo+1, nsn-uhalo
+ do i = lhalo+1, ewn-uhalo
+ if (model%geometry%thck(i,j) * thk0 > minthick) then
+ do k = 1, upn-1
+ tot_energy = tot_energy + &
+ model%geometry%thck(i,j) * model%temper%temp(k,i,j) &
+ * model%numerics%dew * model%numerics%dns &
+ *(model%numerics%sigma(k+1) - model%numerics%sigma(k))
+ enddo
+ endif
+ enddo
+ enddo
+
+ else ! temps are unstaggered in vertical
+ do j = lhalo+1, nsn-uhalo
+ do i = lhalo+1, ewn-uhalo
+ if (model%geometry%thck(i,j) * thk0 > minthick) then
+ ! upper half-layer, T = upper sfc temp
+ tot_energy = tot_energy + &
+ model%geometry%thck(i,j) * model%temper%temp(1,i,j) &
+ * model%numerics%dew * model%numerics%dns &
+ * 0.5d0 * model%numerics%sigma(2)
+ do k = 2, upn-1
+ tot_energy = tot_energy + &
+ model%geometry%thck(i,j) * model%temper%temp(k,i,j) &
+ * model%numerics%dew * model%numerics%dns &
+ * 0.5d0*(model%numerics%sigma(k+1) - model%numerics%sigma(k-1))
+ enddo
+ ! lower half-layer, T = lower sfc temp
+ tot_energy = tot_energy + &
+ model%geometry%thck(i,j) * model%temper%temp(upn,i,j) &
+ * model%numerics%dew * model%numerics%dns &
+ * 0.5d0 * (1.0d0 - model%numerics%sigma(upn-1))
+ endif
+ enddo
+ enddo
+ endif
+
+ tot_energy = tot_energy * thk0 * len0**2 * rhoi * shci
+ tot_energy = parallel_reduce_sum(tot_energy)
+
+ ! mean thickness
+
+ if (tot_area > eps) then
+ mean_thck = tot_volume/tot_area
+ else
+ mean_thck = 0.d0
+ endif
+
+ ! mean temperature
+
+ if (tot_volume > eps) then
+ mean_temp = tot_energy/ (rhoi*shci*tot_volume)
+ else
+ mean_temp = 0.d0
+ endif
+
+ ! mean surface accumulation/ablation rate (m/yr)
+
+ mean_acab = 0.d0
+ do j = lhalo+1, nsn-uhalo
+ do i = lhalo+1, ewn-uhalo
+ if (model%geometry%thck(i,j) * thk0 > minthick) then
+ mean_acab = mean_acab + model%climate%acab(i,j) &
+ * model%numerics%dew * model%numerics%dns
+ endif
+ enddo
+ enddo
+ mean_acab = mean_acab * scyr * thk0 / tim0 * len0**2 ! convert to m^3/yr
+ mean_acab = parallel_reduce_sum(mean_acab)
+
+ if (tot_area > eps) then
+ mean_acab = mean_acab/tot_area ! divide by total area to get m/yr
+ else
+ mean_acab = 0.d0
+ endif
+
+ ! mean basal melting rate (positive for ice loss)
+
+ mean_bmlt = 0.d0
+ do j = lhalo+1, nsn-uhalo
+ do i = lhalo+1, ewn-uhalo
+ if (model%geometry%thck(i,j) * thk0 > minthick) then
+ mean_bmlt = mean_bmlt + model%temper%bmlt(i,j) &
+ * model%numerics%dew * model%numerics%dns
+ endif
+ enddo
+ enddo
+
+ mean_bmlt = mean_bmlt * scyr * thk0 / tim0 * len0**2 ! convert to m^3/yr
+ mean_bmlt = parallel_reduce_sum(mean_bmlt)
+
+ if (tot_area > eps) then
+ mean_bmlt = mean_bmlt/tot_area ! divide by total area to get m/yr
+ else
+ mean_bmlt = 0.d0
+ endif
+
+ ! write global sums and means
+
+ write(message,'(a25,e24.16)') 'Total ice area (km^2) ', &
+ tot_area*1.0d-6 ! convert to km^2
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,e24.16)') 'Total ice volume (km^3) ', &
+ tot_volume*1.0d-9 ! convert to km^3
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,e24.16)') 'Total ice energy (J) ', tot_energy
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,f24.16)') 'Mean thickness (m) ', mean_thck
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,f24.16)') 'Mean temperature (C) ', mean_temp
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,e24.16)') 'Mean accum/ablat (m/yr) ', mean_acab
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,e24.16)') 'Mean basal melt (m/yr) ', mean_bmlt
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ ! Find various global maxes and mins
+
+ ! max thickness
+
+ imax = 0
+ jmax = 0
+ max_thck = unphys_val ! = -999.d0 (an arbitrary large negative number)
+ do j = lhalo+1, nsn-uhalo
+ do i = lhalo+1, ewn-uhalo
+ if (model%geometry%thck(i,j) > max_thck) then
+ max_thck = model%geometry%thck(i,j)
+ imax = i
+ jmax = j
+ endif
+ enddo
+ enddo
+
+ imax_global = 0
+ jmax_global = 0
+ max_thck_global = parallel_reduce_max(max_thck)
+ if (max_thck == max_thck_global) then ! max_thck lives on this processor
+ imax_global = (imax - lhalo) + global_col_offset
+ jmax_global = (jmax - lhalo) + global_row_offset
+ endif
+ imax_global = parallel_reduce_max(imax_global)
+ jmax_global = parallel_reduce_max(jmax_global)
+
+ write(message,'(a25,f24.16,2i4)') 'Max thickness (m), i, j ', &
+ max_thck_global*thk0, imax_global, jmax_global
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ ! max temperature
+
+ ktop = lbound(model%temper%temp,1)
+ kbed = ubound(model%temper%temp,1)
+
+ imax = 0
+ jmax = 0
+ kmax = 0
+ max_temp = unphys_val
+ do j = lhalo+1, nsn-uhalo
+ do i = lhalo+1, ewn-uhalo
+ if (model%geometry%thck(i,j) * thk0 > minthick) then
+ do k = ktop, kbed
+ if (model%temper%temp(k,i,j) > max_temp) then
+ max_temp = model%temper%temp(k,i,j)
+ imax = i
+ jmax = j
+ kmax = k
+ endif
+ enddo
+ endif
+ enddo
+ enddo
+
+ call parallel_reduce_maxloc(xin=max_temp, xout=max_temp_global, xprocout=procnum)
+ call parallel_globalindex(imax, jmax, imax_global, jmax_global)
+ kmax_global = kmax
+ call broadcast(imax_global, procnum)
+ call broadcast(jmax_global, procnum)
+ call broadcast(kmax_global, procnum)
+
+ write(message,'(a25,f24.16,3i4)') 'Max temperature, i, j, k ', &
+ max_temp_global, imax_global, jmax_global, kmax_global
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ ! min temperature
+
+ imin = 0
+ jmin = 0
+ kmin = 0
+ min_temp = 999.d0 ! arbitrary large positive number
+ do j = lhalo+1, nsn-uhalo
+ do i = lhalo+1, ewn-uhalo
+ if (model%geometry%thck(i,j) * thk0 > minthick) then
+ do k = ktop, kbed
+ if (model%temper%temp(k,i,j) < min_temp) then
+ min_temp = model%temper%temp(k,i,j)
+ imin = i
+ jmin = j
+ kmin = k
+ endif
+ enddo
+ endif
+ enddo
+ enddo
+
+ call parallel_reduce_minloc(xin=min_temp, xout=min_temp_global, xprocout=procnum)
+ call parallel_globalindex(imin, jmin, imin_global, jmin_global)
+ kmin_global = kmin
+ call broadcast(imin_global, procnum)
+ call broadcast(jmin_global, procnum)
+ call broadcast(kmin_global, procnum)
+
+ write(message,'(a25,f24.16,3i4)') 'Min temperature, i, j, k ', &
+ min_temp_global, imin_global, jmin_global, kmin_global
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ ! max surface speed
+
+ imax = 0
+ jmax = 0
+ max_spd_sfc = unphys_val
+
+ do j = lhalo+1, velo_ns_ubound
+ do i = lhalo+1, velo_ew_ubound
+ spd = sqrt(model%velocity%uvel(1,i,j)**2 &
+ + model%velocity%vvel(1,i,j)**2)
+ if (model%geometry%thck(i,j) * thk0 > minthick .and. spd > max_spd_sfc) then
+ max_spd_sfc = spd
+ imax = i
+ jmax = j
+ endif
+ enddo
+ enddo
+
+ call parallel_reduce_maxloc(xin=max_spd_sfc, xout=max_spd_sfc_global, xprocout=procnum)
+ call parallel_globalindex(imax, jmax, imax_global, jmax_global)
+ call broadcast(imax_global, procnum)
+ call broadcast(jmax_global, procnum)
+
+ write(message,'(a25,f24.16,2i4)') 'Max sfc spd (m/yr), i, j ', &
+ max_spd_sfc_global*vel0*scyr, imax_global, jmax_global
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ ! max basal speed
+
+ imax = 0
+ jmax = 0
+ max_spd_bas = unphys_val
+ do j = lhalo+1, velo_ns_ubound
+ do i = lhalo+1, velo_ew_ubound
+ spd = sqrt(model%velocity%uvel(upn,i,j)**2 &
+ + model%velocity%vvel(upn,i,j)**2)
+ if (model%geometry%thck(i,j) * thk0 > minthick .and. spd > max_spd_bas) then
+ max_spd_bas = spd
+ imax = i
+ jmax = j
+ endif
+ enddo
+ enddo
+
+ call parallel_reduce_maxloc(xin=max_spd_bas, xout=max_spd_bas_global, xprocout=procnum)
+ call parallel_globalindex(imax, jmax, imax_global, jmax_global)
+ call broadcast(imax_global, procnum)
+ call broadcast(jmax_global, procnum)
+
+ write(message,'(a25,f24.16,2i4)') 'Max base spd (m/yr), i, j', &
+ max_spd_bas_global*vel0*scyr, imax_global, jmax_global
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ ! local diagnostics
+
+ ! initialize to unphysical negative values
+ usrf_diag = unphys_val
+ thck_diag = unphys_val
+ topg_diag = unphys_val
+ relx_diag = unphys_val
+ artm_diag = unphys_val
+ acab_diag = unphys_val
+ bmlt_diag = unphys_val
+ bwat_diag = unphys_val
+ bheatflx_diag = unphys_val
+ temp_diag(:) = unphys_val
+ spd_diag (:) = unphys_val
+ lithtemp_diag(:) = unphys_val
+
+ ! Set local diagnostic values, and communicate them to main_task
+
+ if (model%numerics%idiag_local >= 1 .and. model%numerics%idiag_local <= ewn &
+ .and. &
+ model%numerics%jdiag_local >= 1 .and. model%numerics%jdiag_local <= nsn) then
+
+ if (this_rank == model%numerics%rdiag_local) then
+
+ i = model%numerics%idiag_local
+ j = model%numerics%jdiag_local
+ usrf_diag = model%geometry%usrf(i,j)*thk0
+ thck_diag = model%geometry%thck(i,j)*thk0
+ topg_diag = model%geometry%topg(i,j)*thk0
+ relx_diag = model%isostasy%relx(i,j)*thk0
+ artm_diag = model%climate%artm(i,j)
+ acab_diag = model%climate%acab(i,j) * thk0*scyr/tim0
+ bmlt_diag = model%temper%bmlt(i,j) * thk0*scyr/tim0
+ bwat_diag = model%temper%bwat(i,j) * thk0
+ bheatflx_diag = model%temper%bheatflx(i,j)
+
+ temp_diag(:) = model%temper%temp(1:upn,i,j)
+ spd_diag(:) = sqrt(model%velocity%uvel(1:upn,i,j)**2 &
+ + model%velocity%vvel(1:upn,i,j)**2) * vel0*scyr
+ if (model%options%gthf == GTHF_COMPUTE) &
+ lithtemp_diag(:) = model%lithot%temp(i,j,:)
+ endif
+
+ usrf_diag = parallel_reduce_max(usrf_diag)
+ thck_diag = parallel_reduce_max(thck_diag)
+ topg_diag = parallel_reduce_max(topg_diag)
+ relx_diag = parallel_reduce_max(relx_diag)
+ artm_diag = parallel_reduce_max(artm_diag)
+ acab_diag = parallel_reduce_max(acab_diag)
+ bmlt_diag = parallel_reduce_max(bmlt_diag)
+ bwat_diag = parallel_reduce_max(bwat_diag)
+ bheatflx_diag = parallel_reduce_max(bheatflx_diag)
+
+ do k = 1, upn
+ temp_diag(k) = parallel_reduce_max(temp_diag(k))
+ spd_diag(k) = parallel_reduce_max(spd_diag(k))
+ enddo
+
+ do k = 1, nlith
+ lithtemp_diag(k) = parallel_reduce_max(lithtemp_diag(k))
+ enddo
+
+ call write_log(' ')
+ write(message,'(a39,2i6)') &
+ 'Grid point diagnostics: (i,j) =', model%numerics%idiag, &
+ model%numerics%jdiag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+ write(message,'(a39,3i6)') &
+ 'Local (i,j,rank) = ', model%numerics%idiag_local, &
+ model%numerics%jdiag_local, &
+ model%numerics%rdiag_local
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+ call write_log(' ')
+
+ write(message,'(a25,f24.16)') 'Upper surface (m) ', usrf_diag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,f24.16)') 'Thickness (m) ', thck_diag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,f24.16)') 'Bedrock topo (m) ', topg_diag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ if (model%options%isostasy == ISOSTASY_COMPUTE) then
+ write(message,'(a25,f24.16)') 'Relaxed bedrock (m) ', relx_diag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+ endif
+
+ write(message,'(a25,f24.16)') 'Sfc mass balance (m/yr) ', acab_diag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,f24.16)') 'Basal melt rate (m/yr) ', bmlt_diag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,f24.16)') 'Basal water depth (m) ', bwat_diag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ write(message,'(a25,f24.16)') 'Basal heat flux (W/m^2) ', bheatflx_diag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ ! Vertical profile of ice speed and temperature
+
+ call write_log(' ')
+ write(message,'(a55)') ' Sigma Ice speed (m/yr) Ice temperature (C)'
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ if (size(model%temper%temp,1) == upn+1) then ! temperatures staggered in vertical
+ ! (at layer midpoints)
+
+ ! upper surface
+ write (message,'(f6.3,2f24.16)') model%numerics%sigma(1), spd_diag(1), artm_diag
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ ! internal
+ do k = 1, upn-1
+
+ ! speed at top of layer
+ if (k > 1) then
+ write (message,'(f6.3,f24.16)') model%numerics%sigma(k), spd_diag(k)
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+ endif
+
+ ! temp at layer midpoint
+ write (message,'(f6.3,24x,f24.16)') model%numerics%stagsigma(k), temp_diag(k)
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ enddo
+
+ ! lower surface
+ write (message,'(f6.3,2f24.16)') model%numerics%sigma(upn), spd_diag(upn), temp_diag(upn)
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ else ! temperatures unstaggered in vertical (at layer interfaces)
+
+ do k = 1, upn
+ write (message,'(f6.3,2f24.16)') model%numerics%sigma(k), spd_diag(k), temp_diag(k)
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+ enddo
+
+ endif ! temps staggered
+
+ ! Vertical profile of upper lithosphere temperature
+
+ if (model%options%gthf == GTHF_COMPUTE) then
+
+ call write_log(' ')
+ write(message,'(a41)') ' Level (m) Lithosphere temp (C)'
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+
+ level = 0.d0
+ do k = 1, nlith
+ level = level + model%lithot%deltaz(nlith)
+ write (message,'(f10.0,6x,f24.16)') level, lithtemp_diag(k)
+ call write_log(trim(message), type = GM_DIAGNOSTIC)
+ enddo
+
+ endif ! gthf_compute
+
+ endif ! idiag_local and jdiag_local in bounds
+
+ call write_log(' ')
+
+ end subroutine glide_write_diag
+
+!==============================================================
+
+end module glide_diagnostics
diff --git a/components/cism/glimmer-cism/libglide/glide_grid_operators.F90 b/components/cism/glimmer-cism/libglide/glide_grid_operators.F90
new file mode 100644
index 0000000000..dbd81e1950
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_grid_operators.F90
@@ -0,0 +1,241 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_grid_operators.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+! Various grid operators for the Glide dycore, including routines for computing gradients
+! and switching between staggered and unstaggered grids
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_nan.inc"
+#include "glide_mask.inc"
+
+module glide_grid_operators
+
+ use glimmer_global, only : dp
+ implicit none
+
+contains
+
+!----------------------------------------------------------------------------
+
+ subroutine glide_geometry_derivs(model)
+
+! Compute geometric quantities needed by the glide dycore:
+! stagthck (given thck), along with the gradients
+! dusrfdew/dns (given usrf) and dthckdew/dns (given thck).
+
+ use glide_types, only: glide_global_type
+
+ implicit none
+
+ type(glide_global_type), intent(inout) :: model
+
+ ! Interpolate ice thickness to velocity points
+
+ call stagvarb(model%geometry% thck, &
+ model%geomderv% stagthck, &
+ model%general% ewn, &
+ model%general% nsn)
+
+ ! Compute EW and NS gradients in usrf and thck
+
+ call geomders(model%numerics, &
+ model%geometry% usrf, &
+ model%geomderv% stagthck,&
+ model%geomderv% dusrfdew, &
+ model%geomderv% dusrfdns)
+
+ call geomders(model%numerics, &
+ model%geometry% thck, &
+ model%geomderv% stagthck,&
+ model%geomderv% dthckdew, &
+ model%geomderv% dthckdns)
+
+ !NOTE: The following commented-out code is included in stagthickness.
+! where (model%geomderv%stagthck == 0.d0)
+! model%geomderv%dusrfdew = 0.d0
+! model%geomderv%dusrfdns = 0.d0
+! model%geomderv%dthckdew = 0.d0
+! model%geomderv%dthckdns = 0.d0
+! endwhere
+
+ end subroutine glide_geometry_derivs
+
+!---------------------------------------------------------------
+
+ subroutine stagvarb(ipvr,opvr,ewn,nsn)
+
+ ! Interpolate a scalar variable such as ice thickness from cell centers to cell corners.
+
+ !NOTE: This subroutine, used by the glide SIA dycore, is different from
+ ! stagthickness, which is used by the glam HO dycore. In stagthickness, zero-thickness
+ ! values are ignored when thickness is averaged over four adjacent grid cells.
+ ! In stagvarb, zero-thickness values are included in the average.
+ ! The glam approach works better for calving.
+ !TODO: Add a flag that allows zero-thickness values to be omitted from the gradient (e.g., for flwa and temp).
+
+ implicit none
+
+ real(dp), intent(out), dimension(:,:) :: opvr
+ real(dp), intent(in), dimension(:,:) :: ipvr
+
+ integer, intent(in) :: ewn,nsn
+
+ opvr(1:ewn-1,1:nsn-1) = (ipvr(2:ewn,1:nsn-1) + ipvr(1:ewn-1,2:nsn) + &
+ ipvr(2:ewn,2:nsn) + ipvr(1:ewn-1,1:nsn-1)) / 4.0d0
+
+ end subroutine stagvarb
+
+!----------------------------------------------------------------------------
+
+ subroutine stagvarb_3d(ipvr, opvr, ewn, nsn, upn)
+ real(dp), intent(in), dimension(:,:,:) :: ipvr
+ real(dp), intent(out), dimension(:,:,:) :: opvr
+ integer, intent(in) :: ewn, nsn, upn
+ integer :: k
+
+ do k = 1, upn
+ call stagvarb(ipvr(k,:,:), opvr(k,:,:), ewn, nsn)
+ end do
+
+ end subroutine stagvarb_3d
+
+!----------------------------------------------------------------------------
+
+ subroutine stagvarb_mask(ipvr,opvr,ewn,nsn,geometry_mask)
+
+ implicit none
+
+ real(dp), intent(out), dimension(:,:) :: opvr
+ real(dp), intent(in), dimension(:,:) :: ipvr
+
+ integer, intent(in) :: ewn,nsn
+ integer, intent(in), dimension(:,:) :: geometry_mask
+ integer :: ew,ns,n
+ real(dp) :: tot
+
+ opvr(1:ewn-1,1:nsn-1) = (ipvr(2:ewn,1:nsn-1) + ipvr(1:ewn-1,2:nsn) + &
+ ipvr(2:ewn,2:nsn) + ipvr(1:ewn-1,1:nsn-1)) / 4.0d0
+
+ do ns = 1,nsn-1
+ do ew = 1,ewn-1
+
+ !If any of our staggering points are shelf front, ignore zeros when staggering
+ if (any(GLIDE_NO_ICE(geometry_mask(ew:ew+1, ns:ns+1)))) then
+ n = 0
+ tot = 0
+
+ if (GLIDE_HAS_ICE(geometry_mask(ew,ns))) then
+ tot = tot + ipvr(ew,ns)
+ n = n + 1
+ end if
+ if (GLIDE_HAS_ICE(geometry_mask(ew+1,ns))) then
+ tot = tot + ipvr(ew+1,ns)
+ n = n + 1
+ end if
+ if (GLIDE_HAS_ICE(geometry_mask(ew,ns+1))) then
+ tot = tot + ipvr(ew,ns+1)
+ n = n + 1
+ end if
+ if (GLIDE_HAS_ICE(geometry_mask(ew+1,ns+1))) then
+ tot = tot + ipvr(ew+1,ns+1)
+ n = n + 1
+ end if
+ if (n > 0) then
+ opvr(ew,ns) = tot/n
+ else
+ opvr(ew,ns) = 0
+ end if
+
+ !Standard Staggering
+ else
+ opvr(ew,ns) = (ipvr(ew+1,ns) + ipvr(ew,ns+1) + &
+ ipvr(ew+1,ns+1) + ipvr(ew,ns)) / 4.0d0
+ end if
+
+ end do
+ end do
+
+ end subroutine stagvarb_mask
+
+!----------------------------------------------------------------------------
+
+ subroutine stagvarb_3d_mask(ipvr, opvr, ewn, nsn, upn, geometry_mask)
+ real(dp), intent(in), dimension(:,:,:) :: ipvr
+ real(dp), intent(out), dimension(:,:,:) :: opvr
+ integer, intent(in) :: ewn, nsn, upn
+ integer, intent(in), dimension(:,:) :: geometry_mask
+ integer :: k
+
+ do k = 1, upn
+ call stagvarb_mask(ipvr(k,:,:), opvr(k,:,:), ewn, nsn, geometry_mask)
+ end do
+
+ end subroutine stagvarb_3d_mask
+
+!----------------------------------------------------------------------------
+
+ subroutine geomders(numerics,ipvr,stagthck,opvrew,opvrns)
+
+ use glide_types, only: glide_numerics
+
+ implicit none
+
+ type(glide_numerics) :: numerics
+ real(dp), intent(out), dimension(:,:) :: opvrew, opvrns
+ real(dp), intent(in), dimension(:,:) :: ipvr, stagthck
+
+ real(dp) :: dew2, dns2
+ integer :: ew,ns,ewn,nsn
+
+ ! Obviously we don't need to do this every time,
+ ! but will do so for the moment.
+ dew2 = 1.d0/(2.0d0 * numerics%dew)
+ dns2 = 1.d0/(2.0d0 * numerics%dns)
+ ewn=size(ipvr,1)
+ nsn=size(ipvr,2)
+
+ do ns=1,nsn-1
+ do ew = 1,ewn-1
+ if (stagthck(ew,ns) /= 0.0d0) then
+ opvrew(ew,ns) = (ipvr(ew+1,ns+1)+ipvr(ew+1,ns)-ipvr(ew,ns)-ipvr(ew,ns+1)) * dew2
+ opvrns(ew,ns) = (ipvr(ew+1,ns+1)+ipvr(ew,ns+1)-ipvr(ew,ns)-ipvr(ew+1,ns)) * dns2
+ else
+ opvrew(ew,ns) = 0.
+ opvrns(ew,ns) = 0.
+ end if
+ end do
+ end do
+
+ end subroutine geomders
+
+!----------------------------------------------------------------------------
+
+end module glide_grid_operators
+
+!----------------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglide/glide_ground.F90 b/components/cism/glimmer-cism/libglide/glide_ground.F90
new file mode 100644
index 0000000000..f1172fd5db
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_ground.F90
@@ -0,0 +1,412 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_ground.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+!TODO - Change module name to something more appropriate (glide_marine?)
+!TODO - Make glide_marinlim fully parallel?
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_mask.inc"
+module glide_ground
+
+ use glide_types
+ use glimmer_global, only: dp
+ use parallel
+
+ implicit none
+
+contains
+!-------------------------------------------------------------------------------
+
+ subroutine glide_marinlim(which, &
+ thck, relx, &
+ topg, mask, &
+ mlimit, calving_fraction, &
+ eus, calving_field, &
+ ground, &
+ dew, dns, &
+ nsn, ewn)
+
+ ! Remove non-grounded ice according to one of several alternative methods
+
+ implicit none
+
+ !---------------------------------------------------------------------
+ ! Subroutine arguments
+ !---------------------------------------------------------------------
+
+ !TODO: Change mask to thkmask? The argument passed in is model%geometry%thkmask.
+
+ integer, intent(in) :: which !> Calving method option
+ real(dp),dimension(:,:),intent(inout) :: thck !> Ice thickness
+ real(dp),dimension(:,:),intent(in) :: relx !> Relaxed topography
+ real(dp),dimension(:,:),intent(in) :: topg !> Present bedrock topography
+ integer, dimension(:,:), intent(in) :: mask !> grid type mask
+ real(dp), intent(in) :: mlimit !> Lower limit on topography elevation for
+ !> ice to be present.
+ real(dp), intent(in) :: calving_fraction !> fraction of ice lost when calving; used with
+ !> $\mathtt{which}=3$.
+ real(dp), intent(in) :: eus !> eustatic sea level
+ real(dp),dimension(:,:),intent(out) :: calving_field ! thickness lost due to calving
+ real(dp), intent(in) :: dew,dns
+ integer, intent(in) :: nsn,ewn
+
+ type(glide_grnd), intent(inout) :: ground !> ground instance
+
+ integer :: ew,ns
+ !---------------------------------------------------------------------
+
+ calving_field(:,:) = 0.d0 ! using dp for constants in case calving_field changed to dp
+
+ select case (which)
+
+ case(MARINE_NONE) ! do nothing
+
+
+ case(MARINE_FLOAT_ZERO) ! Set thickness to zero if ice is floating
+
+ where (GLIDE_IS_FLOAT(mask))
+ calving_field = thck
+ thck = 0.0d0
+ end where
+
+ case(MARINE_FLOAT_FRACTION) ! remove fraction of ice when floating
+
+ do ns = 2,size(thck,2)-1
+ do ew = 2,size(thck,1)-1
+ if (GLIDE_IS_CALVING(mask(ew,ns))) then
+ calving_field(ew,ns) = (1.d0-calving_fraction)*thck(ew,ns)
+ thck(ew,ns) = calving_fraction*thck(ew,ns)
+ !mask(ew,ns) = ior(mask(ew,ns), GLIDE_MASK_OCEAN)
+ end if
+ end do
+ end do
+
+ ! if uncomment above mask update, then call parallel_halo(mask)
+
+ case(MARINE_RELX_THRESHOLD) ! Set thickness to zero if relaxed bedrock is below a given level
+
+ where (relx <= mlimit+eus)
+ calving_field = thck
+ thck = 0.0d0
+ end where
+
+ case(MARINE_TOPG_THRESHOLD) ! Set thickness to zero at marine edge if present bedrock is below a given level
+
+ where (GLIDE_IS_MARINE_ICE_EDGE(mask) .and. topg < mlimit+eus)
+ calving_field = thck
+ thck = 0.0d0
+ end where
+
+!WHL - Removed old case (5) based on recommendation from Jesse Johnson
+! Then changed old case(7) to new case(5) to avoid a gap in the case numbering.
+
+ ! Huybrechts grounding line scheme for Greenland initialization
+
+ case(MARINE_HUYBRECHTS) ! used to be case(7)
+
+ !TODO - MARINE_HUYBRECHTS case assumes eus has units of meters. Change to eus*thk0?
+ ! Also check units of relx.
+ if(eus > -80.d0) then
+ where (relx <= 2.d0*eus)
+ calving_field = thck
+ thck = 0.0d0
+ end where
+ elseif (eus <= -80.d0) then
+ where (relx <= (2.d0*eus - 0.25d0*(eus + 80.d0)**2.d0))
+ calving_field = thck
+ thck = 0.0d0
+ end where
+ end if
+
+ ! Commenting out this case for now
+!! case(6)
+
+ ! not serial as far as I can tell as well; for parallelization, issues
+ ! arise from components of ground being updated, and corresponding halos
+ ! also need to be updated? Waiting until serial fixes are implemented
+
+!! call not_parallel(__FILE__, __LINE__) ! not serial as far as I can tell as well
+!! call update_ground_line(ground, topg, thck, eus, dew, dns, ewn, nsn, mask)
+
+!! where (GLIDE_IS_FLOAT(mask))
+!! calving_field = thck
+!! thck = 0.0d0
+!! end where
+
+ end select
+
+ end subroutine glide_marinlim
+
+!-------------------------------------------------------------------------
+
+ subroutine calc_gline_flux(stagthk, velnorm, mask, gline_flux, ubas, vbas, dew)
+
+ ! simple subroutine to calculate the flux at the grounding line
+
+ implicit none
+
+ !JEFF removing pointer attribute integer, dimension(:,:),pointer :: mask !> grid type mask
+ integer, dimension(:,:) :: mask ! grid type mask
+ real(dp),dimension(:,:),intent(in) :: stagthk ! Ice thickness (scaled)
+ real(dp),dimension(:,:,:), intent(in) :: velnorm ! horizontal ice speed
+ real(dp),dimension(:,:), intent(inout) :: gline_flux ! Grounding Line flux
+ real(dp),dimension(:,:), intent(in) :: ubas ! basal velocity in u-dir
+ real(dp),dimension(:,:), intent(in) :: vbas ! basal velocity in v-dir
+ real(dp),intent(in) :: dew ! grid spacing
+ integer :: ewn, nsn
+
+ !TODO: get the grounding line flux on the velo grid; currently using both the ice grid and the velo grid.
+
+ ewn = size(gline_flux, 1)
+ nsn = size(gline_flux, 2)
+
+ where (GLIDE_IS_GROUNDING_LINE(mask))
+ gline_flux = stagthk * ((4.d0/5.d0)* velnorm(1,:,:) + &
+ (ubas**2.d0 + vbas**2.d0)**(1.d0/2.d0)) * dew
+ end where
+
+ !Note: - This update may not be needed. gline_flux is just a diagnostic.
+ call parallel_halo(gline_flux)
+
+ end subroutine calc_gline_flux
+
+!-------------------------------------------------------------------------
+ !TODO - The next few subroutines are associated with case 6, which is not supported. Remove them?
+
+ !Loops through the mask and does the interpolation for all the grounding lines
+
+ subroutine update_ground_line(ground, topg, thck, eus, dew, dns, ewn, nsn, mask)
+
+ implicit none
+ type(glide_grnd) :: ground !> ground instance
+ real(dp),dimension(:,:),intent(in) :: topg !> Present bedrock topography (scaled)
+ real(dp),dimension(:,:),intent(in) :: thck !> Present thickness (scaled)
+ real(dp),intent(in) :: eus !> eustatic sea level
+ real(dp),intent(in) :: dew, dns
+ integer, intent(in) :: ewn, nsn
+ !JEFF remove pointer attribute integer, dimension(:,:),pointer :: mask !> grid type mask
+ integer, dimension(:,:) :: mask !> grid type mask
+ integer :: ew,ns,jns,jew,j1ns,j1ew
+ real(dp) :: xg !grounding line
+ !this is assuming the grounding line is the last grounded pt on the mask
+ !reset grounding line data to zero
+ ground%gl_ew = 0.d0
+ ground%gl_ns = 0.d0
+ do ns = 1,nsn
+ do ew = 1,ewn
+ if (GLIDE_IS_GROUNDING_LINE(mask(ew,ns))) then
+ !the grounding line always rounds down so it is grounded.
+ !southern grounding line
+ if (GLIDE_IS_OCEAN(mask(ew,ns - 1)) &
+ .or. (GLIDE_IS_FLOAT(mask(ew,ns - 1)))) then
+ xg = lin_reg_xg(topg,thck,eus,dew,dns,ew,ns,ew,ns-1)
+ call set_ground_line(ground,ew,ns,ew,ns-1,xg)
+ !northern grounding line
+ else if (GLIDE_IS_OCEAN(mask(ew,ns + 1)) &
+ .or. (GLIDE_IS_FLOAT(mask(ew,ns + 1)))) then
+ xg = lin_reg_xg(topg,thck,eus,dew,dns,ew,ns,ew,ns+1)
+ call set_ground_line(ground,ew,ns,ew,ns+1,xg)
+ end if
+
+ !western grounding line
+ if (GLIDE_IS_OCEAN(mask(ew - 1,ns)) &
+ .or. GLIDE_IS_FLOAT(mask(ew - 1,ns))) then
+ xg = lin_reg_xg(topg,thck,eus,dew,dns,ew,ns,ew - 1,ns)
+ call set_ground_line(ground,ew,ns,ew-1,ns,xg)
+ !eastern grounding line
+ else if (GLIDE_IS_OCEAN(mask(ew + 1,ns)) &
+ .or. GLIDE_IS_FLOAT(mask(ew + 1,ns))) then
+ xg = lin_reg_xg(topg,thck,eus,dew,dns,ew,ns,ew + 1,ns)
+ call set_ground_line(ground,ew,ns,ew + 1,ns,xg)
+ end if
+ end if
+ end do
+ end do
+
+ end subroutine update_ground_line
+
+!-------------------------------------------------------------------------
+
+ subroutine set_ground_line(ground,ew1,ns1,ew2,ns2,value)
+
+ use glide_types
+ implicit none
+
+ type(glide_grnd) :: ground !> model instance
+ integer, intent(in) :: ns1 !grounding line in ns direction
+ integer, intent(in) :: ew1 !grounding line in ew direction
+ integer, intent(in) :: ns2 !grounding line in ns direction
+ integer, intent(in) :: ew2 !grounding line in ew direction
+ real(dp), intent(in) :: value !grounding line in ew direction
+ integer :: slot_ew, slot_ns !integers to compute the min
+
+ if (ns1 == ns2) then
+ slot_ew = min(ew1,ew2)
+ ground%gl_ew(slot_ew,ns1) = value
+ else if (ew1 == ew2) then
+ slot_ns = min(ns1,ns2)
+ ground%gl_ns(ew1,slot_ns) = value
+ end if
+ end subroutine set_ground_line
+
+!-------------------------------------------------------------------------
+
+ !does the pattyn interpolation for the grounding line
+
+!! real function lin_reg_xg(topg, thck, eus, dew, dns, ew, ns, j1ew, j1ns)
+ function lin_reg_xg(topg, thck, eus, dew, dns, ew, ns, j1ew, j1ns)
+
+ use glide_types
+ use glimmer_physcon, only : rhoi, rhoo
+ real(dp) :: lin_reg_xg
+ real(dp),dimension(:,:),intent(in) :: topg !> Present bedrock topography (scaled)
+ real(dp),dimension(:,:),intent(in) :: thck !> Present thickness (scaled)
+ real(dp), intent(in) :: eus !> eustatic sea level
+ real(dp), intent(in) :: dew, dns
+ integer, intent(in) :: ns !grounding line in ns direction
+ integer, intent(in) :: ew !grounding line in ew direction
+ integer, intent(in) :: j1ns !ice shelf in ns direction
+ integer, intent(in) :: j1ew !ice shelf line in ew direction
+ real(dp) :: xg !grounding line
+ real(dp) :: dx !distance between gridpts
+ real(dp) :: xj !grounding line
+ real(dp) :: fj !f at grid pnt j
+ real(dp) :: fj_1 !f evaluated at j (+/-) 1
+ real(dp) :: df !delta f of fj,jf_1
+
+ if (ew == j1ew) then
+ dx = dns
+ xj = ns*dx
+ else
+ dx = dew
+ xj = ew*dx
+ end if
+ !set the pattyn f function - assuming ocean water
+ fj = (eus - topg(ew,ns))*rhoo/(rhoi*thck(ew,ns))
+ if (thck(j1ew,j1ns) > 0.d0) then
+ fj_1 = (eus - topg(j1ew,j1ns))*rhoo/(rhoi*thck(j1ew,j1ns))
+ df = (fj_1 - fj)/dx
+ xg = (1 - fj + df*xj)/df
+ else
+ xg = xj
+ end if
+
+ lin_reg_xg = xg
+ return
+ end function lin_reg_xg
+
+!-------------------------------------------------------------------------
+
+ !TODO - Remove function get_ground_thck? Currently not called.
+
+!! real function get_ground_thck(ground,topg,usrf,dew,dns,ew1,ns1,ew2,ns2)
+ function get_ground_thck(ground,topg,usrf,dew,dns,ew1,ns1,ew2,ns2)
+
+ use glide_types
+ implicit none
+ real(dp) :: get_ground_thck
+ type(glide_grnd) :: ground !> ground instance
+ real(dp),dimension(:,:),intent(in) :: topg !> Present bedrock topography (scaled)
+ real(dp),dimension(:,:),intent(in) :: usrf !> surface height
+ real(dp), intent(in) :: dew, dns
+ integer :: ns1,ew1,ns2,ew2,min_ns,min_ew,max_ns,max_ew !grounding line in ns/ew direction
+ real(dp) :: xg !grounding line
+ real(dp) :: tg !topographic height at grounding line
+ real(dp) :: ig !ice height at grounding line
+ real(dp) :: hg !thickness at the grounding line
+ real(dp) :: x1 !pts for linear interpolation
+ real(dp) :: x0
+ real(dp) :: y1
+ real(dp) :: y0
+ !using lin. interpolation to find top at grounding line
+ if (ns1 == ns2) then
+ min_ew = min(ew1,ew2)
+ max_ew = max(ew1,ew2)
+ min_ns = ns1
+ max_ns = ns1
+ x0 = min_ew*dew !model%numerics%dew
+ x1 = max_ew*dew
+ else if (ew1 == ew2) then
+ min_ns = min(ns1,ns2)
+ max_ns = max(ns1,ns2)
+ min_ew = ew1
+ max_ew = ew1
+ x0 = min_ns*dns !model%numerics%dns
+ x1 = max_ns*dns
+ end if
+ !get grounding line
+ xg = ground%gl_ns(min_ew,min_ns)
+ !find top height at xg
+ y0 = topg(min_ew,min_ns) !model%geometry%topg
+ y1 = topg(max_ew,max_ns)
+ tg = y0 + (xg - x0)*((y1 - y0)/(x1 - x0))
+ !find ice at xg
+ y0 = usrf(min_ew,min_ns) !model%geometry%usrf
+ y1 = usrf(max_ew,max_ns)
+ ig = y0 + (xg - x0)*((y1 - y0)/(x1 - x0))
+ !thickness
+ hg = ig - tg
+ get_ground_thck = hg
+ return
+ end function get_ground_thck
+
+!-------------------------------------------------------------------------
+ !TODO - Remove function get_ground_line? Currently not called.
+
+ !This function returns the correct grounding line using the data given
+ ! the mask reference point. dir is specifying 'ew' or 'ns', but can be
+ ! left null if there's only one option.
+
+!! real function get_ground_line(ground,ew1,ns1,ew2,ns2)
+ function get_ground_line(ground,ew1,ns1,ew2,ns2)
+
+ use glide_types
+ implicit none
+ real(dp) :: get_ground_line
+ type(glide_grnd) :: ground !> glide ground instance
+ integer :: ns1,ew1,ns2,ew2,slot_ns,slot_ew !grounding line in ns/ew direction
+ real(dp) :: appr_ground !grounding line
+
+ if (ns1 == ns2) then
+ slot_ew = min(ew1,ew2)
+ appr_ground = ground%gl_ns(slot_ew,ns1)
+ else if (ew1 == ew2) then
+ slot_ns = min(ns1,ns2)
+ appr_ground = ground%gl_ew(ew1,slot_ns)
+ end if
+ get_ground_line = appr_ground
+ return
+
+ end function get_ground_line
+
+!---------------------------------------------------------------------------
+
+end module glide_ground
+
+!---------------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglide/glide_io.F90.default b/components/cism/glimmer-cism/libglide/glide_io.F90.default
new file mode 100644
index 0000000000..42445d1ede
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_io.F90.default
@@ -0,0 +1,5035 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+! WARNING: this file was automatically generated on
+! Fri, 03 Apr 2015 18:33:13 +0000
+! from ncdf_template.F90.in
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+! WJS (1-30-12): The following (turning optimization off) is needed as a workaround for an
+! xlf compiler bug, at least in IBM XL Fortran for AIX, V12.1 on bluefire
+#ifdef CPRIBM
+@PROCESS OPT(0)
+#endif
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! ncdf_template.F90.in - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#define NCO outfile%nc
+#define NCI infile%nc
+
+
+module glide_io
+ ! template for creating subsystem specific I/O routines
+ ! written by Magnus Hagdorn, 2004
+
+ use glide_types
+
+ implicit none
+
+ private :: get_xtype, is_enabled, is_enabled_0dint, is_enabled_1dint, is_enabled_2dint, is_enabled_0dreal, is_enabled_1dreal, is_enabled_2dreal, is_enabled_3dreal
+
+ character(310), save :: restart_variable_list='' ! list of variables needed for a restart
+!TODO change 310 to a variable - see glimmer_ncdf.F90 in the definition for type glimmer_nc_stat for other instances of this value.
+
+ interface is_enabled ! MJH 10/21/13: Interface needed for determining if arrays have been enabled. See notes below in glide_io_create.
+ module procedure is_enabled_0dint
+ module procedure is_enabled_1dint
+ module procedure is_enabled_2dint
+ module procedure is_enabled_0dreal
+ module procedure is_enabled_1dreal
+ module procedure is_enabled_2dreal
+ module procedure is_enabled_3dreal
+ end interface is_enabled
+
+contains
+
+ !*****************************************************************************
+ ! netCDF output
+ !*****************************************************************************
+ subroutine glide_io_createall(model,data,outfiles)
+ ! open all netCDF files for output
+ use glide_types
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(glide_global_type) :: model
+ type(glide_global_type) :: data ! MJH 10/21/13: Making 'data' mandatory. See notes below in glide_io_create
+ type(glimmer_nc_output),optional,pointer :: outfiles
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ do while(associated(oc))
+ call glide_io_create(oc,model,data)
+ oc=>oc%next
+ end do
+ end subroutine glide_io_createall
+
+ subroutine glide_io_writeall(data,model,atend,outfiles,time)
+ ! if necessary write to netCDF files
+ use glide_types
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(glide_global_type) :: data
+ type(glide_global_type) :: model
+ logical, optional :: atend
+ type(glimmer_nc_output),optional,pointer :: outfiles
+ real(dp),optional :: time
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+ logical :: forcewrite=.false.
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ if (present(atend)) then
+ forcewrite = atend
+ end if
+
+ do while(associated(oc))
+#ifdef HAVE_AVG
+ if (oc%do_averages) then
+ call glide_avg_accumulate(oc,data,model)
+ end if
+#endif
+ call glimmer_nc_checkwrite(oc,model,forcewrite,time)
+ if (oc%nc%just_processed) then
+ ! write standard variables
+ call glide_io_write(oc,data)
+#ifdef HAVE_AVG
+ if (oc%do_averages) then
+ call glide_avg_reset(oc,data)
+ end if
+#endif
+ end if
+ oc=>oc%next
+ end do
+ end subroutine glide_io_writeall
+
+ subroutine glide_io_create(outfile,model,data)
+ use parallel
+ use glide_types
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ use glimmer_map_types
+ use glimmer_log
+ use glimmer_paramets
+ use glimmer_scales
+ use glimmer_log
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ type(glide_global_type) :: model
+ type(glide_global_type) :: data ! MJH 10/21/13: Making 'data' mandatory. See note below
+
+ integer status,varid,pos
+
+ ! MJH 10/21/13: Local variables needed for checking if a variable is enabled.
+ real(dp) :: tavgf
+ integer :: up
+
+ integer :: level_dimid
+ integer :: lithoz_dimid
+ integer :: staglevel_dimid
+ integer :: stagwbndlevel_dimid
+ integer :: time_dimid
+ integer :: x0_dimid
+ integer :: x1_dimid
+ integer :: y0_dimid
+ integer :: y1_dimid
+
+ ! defining dimensions
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'level',model%general%upn,level_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'level',level_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'lithoz',model%lithot%nlayer,lithoz_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'lithoz',lithoz_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'staglevel',model%general%upn-1,staglevel_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'staglevel',staglevel_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'stagwbndlevel',model%general%upn+1,stagwbndlevel_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'stagwbndlevel',stagwbndlevel_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_inq_dimid(NCO%id,'time',time_dimid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'x0',global_ewn-1,x0_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'x0',x0_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'x1',global_ewn,x1_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'x1',x1_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'y0',global_nsn-1,y0_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'y0',y0_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (.not.outfile%append) then
+ status = parallel_def_dim(NCO%id,'y1',global_nsn,y1_dimid)
+ else
+ status = parallel_inq_dimid(NCO%id,'y1',y1_dimid)
+ endif
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ ! Expanding restart variables: if 'restart' or 'hot' is present, we remove that
+ ! word from the variable list, and flip the restartfile flag.
+ ! In CISM 2.0, 'restart' is the preferred name to represent restart variables,
+ ! but 'hot' is supported for backward compatibility. Thus, we check for both.
+ NCO%vars = ' '//trim(adjustl(NCO%vars))//' ' ! Need to maintain a space at beginning and end of list
+ ! expanding restart variables
+ pos = index(NCO%vars,' restart ')
+ if (pos.ne.0) then
+ NCO%vars = NCO%vars(:pos)//NCO%vars(pos+8:)
+ NCO%restartfile = .true.
+ end if
+ pos = index(NCO%vars,' hot ')
+ if (pos.ne.0) then
+ NCO%vars = NCO%vars(:pos)//NCO%vars(pos+4:)
+ NCO%restartfile = .true.
+ end if
+ ! Now apply necessary changes if the file is a restart file.
+ if (NCO%restartfile) then
+ if ((len_trim(NCO%vars) + len_trim(restart_variable_list) + 2) >= len(NCO%vars) ) then
+ call write_log('Adding restart variables has made the list of output variables too long for file ' // NCO%filename, GM_FATAL)
+ else
+ ! Expand the restart variable list
+ ! Need to maintain a space at beginning and end of list
+ NCO%vars = trim(NCO%vars) // ' ' // trim(restart_variable_list) // ' ' ! (a module variable)
+ ! Set the xtype to be double (required for an exact restart)
+ outfile%default_xtype = NF90_DOUBLE
+ endif
+ end if
+
+ ! Convert temp and flwa to versions on stag grid, if needed
+ ! Note: this check must occur after restart variables are expanded which happens in glimmer_nc_readparams
+ call check_for_tempstag(model%options%whichdycore,NCO)
+
+ ! checking if we need to handle time averages
+ pos = index(NCO%vars,"_tavg")
+ if (pos.ne.0) then
+ outfile%do_averages = .True.
+ end if
+
+ ! Now that the output variable list is finalized, make sure we aren't truncating what the user intends to be output.
+ ! Note: this only checks that the text in the variable list does not extend to within one character of the end of the variable.
+ ! It does not handle the case where the user exactly fills the allowable length with variables or has a too-long list with more than one space between variable names.
+ if ((len_trim(NCO%vars) + 1 ) >= len(NCO%vars)) then
+ call write_log('The list of output variables is too long for file ' // NCO%filename, GM_FATAL)
+ endif
+
+
+ ! MJH, 10/21/13: In the auto-generated code below, the creation of each output variable is wrapped by a check if the data for that
+ ! variable has a size greater than 0. This is because of recently added checks in glide_types.F90 that don't fully allocate
+ ! some variables if certain model options are disabled. This is to lower memory requirements while running the model.
+ ! The reason they have to be allocated with size zero rather than left unallocated is because the data for
+ ! some netCDF output variables is defined with math, which causes an error if the operands are unallocated.
+ ! Note that if a variable is not created, then it will not be subsequently written to.
+ ! Also note that this change requires that data be a mandatory argument to this subroutine.
+
+ ! Some output variables will need tavgf. The value does not matter, but it must exist.
+ ! Nonetheless, for completeness give it the proper value that it has in glide_io_write.
+ tavgf = outfile%total_time
+ if (tavgf.ne.0.d0) then
+ tavgf = 1.d0/tavgf
+ end if
+ ! Similarly, some output variables use the variable up. Give it value of 0 here.
+ up = 0
+
+ ! level -- sigma layers
+ if (.not.outfile%append) then
+ call write_log('Creating variable level')
+ status = parallel_def_var(NCO%id,'level',get_xtype(outfile,NF90_FLOAT),(/level_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'positive', 'down')
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'sigma layers')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_sigma_coordinate')
+ status = parallel_put_att(NCO%id, varid, 'units', '1')
+ end if
+
+ ! lithoz -- vertical coordinate of lithosphere layer
+ if (.not.outfile%append) then
+ call write_log('Creating variable lithoz')
+ status = parallel_def_var(NCO%id,'lithoz',get_xtype(outfile,NF90_FLOAT),(/lithoz_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'vertical coordinate of lithosphere layer')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ end if
+
+ ! staglevel -- stag sigma layers
+ if (.not.outfile%append) then
+ call write_log('Creating variable staglevel')
+ status = parallel_def_var(NCO%id,'staglevel',get_xtype(outfile,NF90_FLOAT),(/staglevel_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'positive', 'down')
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'stag sigma layers')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_stag_sigma_coordinate')
+ status = parallel_put_att(NCO%id, varid, 'units', '1')
+ end if
+
+ ! stagwbndlevel -- stag sigma layers with boundaries
+ if (.not.outfile%append) then
+ call write_log('Creating variable stagwbndlevel')
+ status = parallel_def_var(NCO%id,'stagwbndlevel',get_xtype(outfile,NF90_FLOAT),(/stagwbndlevel_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'positive', 'down')
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'stag sigma layers with boundaries')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_stag_sigma_coordinate_with_bnd')
+ status = parallel_put_att(NCO%id, varid, 'units', '1')
+ end if
+
+ ! x0 -- Cartesian x-coordinate, velocity grid
+ if (.not.outfile%append) then
+ call write_log('Creating variable x0')
+ status = parallel_def_var(NCO%id,'x0',get_xtype(outfile,NF90_FLOAT),(/x0_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Cartesian x-coordinate, velocity grid')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ status = parallel_put_att(NCO%id, varid, 'axis', 'X')
+ end if
+
+ ! x1 -- Cartesian x-coordinate
+ if (.not.outfile%append) then
+ call write_log('Creating variable x1')
+ status = parallel_def_var(NCO%id,'x1',get_xtype(outfile,NF90_FLOAT),(/x1_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Cartesian x-coordinate')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ status = parallel_put_att(NCO%id, varid, 'axis', 'X')
+ end if
+
+ ! y0 -- Cartesian y-coordinate, velocity grid
+ if (.not.outfile%append) then
+ call write_log('Creating variable y0')
+ status = parallel_def_var(NCO%id,'y0',get_xtype(outfile,NF90_FLOAT),(/y0_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Cartesian y-coordinate, velocity grid')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ status = parallel_put_att(NCO%id, varid, 'axis', 'Y')
+ end if
+
+ ! y1 -- Cartesian y-coordinate
+ if (.not.outfile%append) then
+ call write_log('Creating variable y1')
+ status = parallel_def_var(NCO%id,'y1',get_xtype(outfile,NF90_FLOAT),(/y1_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Cartesian y-coordinate')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ status = parallel_put_att(NCO%id, varid, 'axis', 'Y')
+ end if
+
+ ! acab -- accumulation, ablation rate
+ pos = index(NCO%vars,' acab ')
+ status = parallel_inq_varid(NCO%id,'acab',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%climate%acab)) then
+ call write_log('Creating variable acab')
+ status = parallel_def_var(NCO%id,'acab',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_acab))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'accumulation, ablation rate')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_surface_specific_mass_balance')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable acab was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! adv_cfl_dt -- advective CFL maximum time step
+ pos = index(NCO%vars,' adv_cfl_dt ')
+ status = parallel_inq_varid(NCO%id,'adv_cfl_dt',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+10) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%numerics%adv_cfl_dt)) then
+ call write_log('Creating variable adv_cfl_dt')
+ status = parallel_def_var(NCO%id,'adv_cfl_dt',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'advective CFL maximum time step')
+ status = parallel_put_att(NCO%id, varid, 'units', 'years')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable adv_cfl_dt was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! artm -- annual mean air temperature
+ pos = index(NCO%vars,' artm ')
+ status = parallel_inq_varid(NCO%id,'artm',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%climate%artm)) then
+ call write_log('Creating variable artm')
+ status = parallel_def_var(NCO%id,'artm',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'annual mean air temperature')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'surface_temperature')
+ status = parallel_put_att(NCO%id, varid, 'cell_methods', 'time: mean')
+ status = parallel_put_att(NCO%id, varid, 'units', 'degree_Celsius')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable artm was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! beta -- higher-order bed stress coefficient
+ pos = index(NCO%vars,' beta ')
+ status = parallel_inq_varid(NCO%id,'beta',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%beta)) then
+ call write_log('Creating variable beta')
+ status = parallel_def_var(NCO%id,'beta',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_beta))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'higher-order bed stress coefficient')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa yr/m')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable beta was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! bfricflx -- basal friction heat flux
+ pos = index(NCO%vars,' bfricflx ')
+ status = parallel_inq_varid(NCO%id,'bfricflx',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+8) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%bfricflx)) then
+ call write_log('Creating variable bfricflx')
+ status = parallel_def_var(NCO%id,'bfricflx',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(1.0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal friction heat flux')
+ status = parallel_put_att(NCO%id, varid, 'units', 'watt/meter2')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable bfricflx was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! bheatflx -- upward basal heat flux
+ pos = index(NCO%vars,' bheatflx ')
+ status = parallel_inq_varid(NCO%id,'bheatflx',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+8) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%bheatflx)) then
+ call write_log('Creating variable bheatflx')
+ status = parallel_def_var(NCO%id,'bheatflx',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_bflx))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'upward basal heat flux')
+ status = parallel_put_att(NCO%id, varid, 'units', 'watt/meter2')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable bheatflx was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! bmlt -- basal melt rate
+ pos = index(NCO%vars,' bmlt ')
+ status = parallel_inq_varid(NCO%id,'bmlt',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%bmlt)) then
+ call write_log('Creating variable bmlt')
+ status = parallel_def_var(NCO%id,'bmlt',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_acab))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal melt rate')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_basal_melt_rate')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable bmlt was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! btemp -- basal ice temperature
+ pos = index(NCO%vars,' btemp ')
+ status = parallel_inq_varid(NCO%id,'btemp',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+5) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%temp)) then
+ call write_log('Creating variable btemp')
+ status = parallel_def_var(NCO%id,'btemp',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal ice temperature')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_temperature')
+ status = parallel_put_att(NCO%id, varid, 'units', 'degree_Celsius')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable btemp was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! btractx -- basal traction (x-direction comp)
+ pos = index(NCO%vars,' btractx ')
+ status = parallel_inq_varid(NCO%id,'btractx',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%stress%btractx)) then
+ call write_log('Creating variable btractx')
+ status = parallel_def_var(NCO%id,'btractx',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_tau))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal traction (x-direction comp)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable btractx was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! btracty -- basal traction (y-direction comp)
+ pos = index(NCO%vars,' btracty ')
+ status = parallel_inq_varid(NCO%id,'btracty',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%stress%btracty)) then
+ call write_log('Creating variable btracty')
+ status = parallel_def_var(NCO%id,'btracty',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_tau))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal traction (y-direction comp)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable btracty was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! btrc -- basal slip coefficient
+ pos = index(NCO%vars,' btrc ')
+ status = parallel_inq_varid(NCO%id,'btrc',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%btrc)) then
+ call write_log('Creating variable btrc')
+ status = parallel_def_var(NCO%id,'btrc',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_btrc))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal slip coefficient')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/pascal/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable btrc was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! bwat -- basal water depth
+ pos = index(NCO%vars,' bwat ')
+ status = parallel_inq_varid(NCO%id,'bwat',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%bwat)) then
+ call write_log('Creating variable bwat')
+ status = parallel_def_var(NCO%id,'bwat',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal water depth')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable bwat was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! bwatflx -- basal water flux
+ pos = index(NCO%vars,' bwatflx ')
+ status = parallel_inq_varid(NCO%id,'bwatflx',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%bwatflx)) then
+ call write_log('Creating variable bwatflx')
+ status = parallel_def_var(NCO%id,'bwatflx',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal water flux')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter3/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable bwatflx was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! calving -- ice margin calving
+ pos = index(NCO%vars,' calving ')
+ status = parallel_inq_varid(NCO%id,'calving',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%climate%calving)) then
+ call write_log('Creating variable calving')
+ status = parallel_def_var(NCO%id,'calving',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice margin calving')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable calving was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! diff_cfl_dt -- diffusive CFL maximum time step
+ pos = index(NCO%vars,' diff_cfl_dt ')
+ status = parallel_inq_varid(NCO%id,'diff_cfl_dt',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+11) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%numerics%diff_cfl_dt)) then
+ call write_log('Creating variable diff_cfl_dt')
+ status = parallel_def_var(NCO%id,'diff_cfl_dt',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'diffusive CFL maximum time step')
+ status = parallel_put_att(NCO%id, varid, 'units', 'years')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable diff_cfl_dt was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! diffu -- apparent diffusivity
+ pos = index(NCO%vars,' diffu ')
+ status = parallel_inq_varid(NCO%id,'diffu',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+5) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%diffu)) then
+ call write_log('Creating variable diffu')
+ status = parallel_def_var(NCO%id,'diffu',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_diffu))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'apparent diffusivity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter2/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable diffu was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! dissip -- dissipation rate (W m-3) divided by rhoi Ci
+ pos = index(NCO%vars,' dissip ')
+ status = parallel_inq_varid(NCO%id,'dissip',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+6) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%dissip)) then
+ call write_log('Creating variable dissip')
+ status = parallel_def_var(NCO%id,'dissip',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scyr))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'dissipation rate (W m-3) divided by rhoi Ci')
+ status = parallel_put_att(NCO%id, varid, 'units', 'deg C/yr')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable dissip was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! dissipstag -- dissipation rate (W m-3) divided by rhoi Ci
+ pos = index(NCO%vars,' dissipstag ')
+ status = parallel_inq_varid(NCO%id,'dissipstag',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+10) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%dissip)) then
+ call write_log('Creating variable dissipstag')
+ status = parallel_def_var(NCO%id,'dissipstag',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, staglevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scyr))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'dissipation rate (W m-3) divided by rhoi Ci')
+ status = parallel_put_att(NCO%id, varid, 'units', 'deg C/yr')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable dissipstag was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! dthckdtm -- tendency of ice thickness (NOTE: Glide only)
+ pos = index(NCO%vars,' dthckdtm ')
+ status = parallel_inq_varid(NCO%id,'dthckdtm',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+8) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geomderv%dthckdtm)) then
+ call write_log('Creating variable dthckdtm')
+ status = parallel_def_var(NCO%id,'dthckdtm',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_acab))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'tendency of ice thickness (NOTE: Glide only)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable dthckdtm was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! dusrfdtm -- rate of upper ice surface elevation change (NOTE: Glide only)
+ pos = index(NCO%vars,' dusrfdtm ')
+ status = parallel_inq_varid(NCO%id,'dusrfdtm',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+8) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geomderv%dusrfdtm)) then
+ call write_log('Creating variable dusrfdtm')
+ status = parallel_def_var(NCO%id,'dusrfdtm',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_acab))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'rate of upper ice surface elevation change (NOTE: Glide only)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable dusrfdtm was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! dynbcmask -- 2d array of higher-order model boundary condition mask values (NOTE: Glam ONLY)
+ pos = index(NCO%vars,' dynbcmask ')
+ status = parallel_inq_varid(NCO%id,'dynbcmask',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+9) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%dynbcmask)) then
+ call write_log('Creating variable dynbcmask')
+ status = parallel_def_var(NCO%id,'dynbcmask',get_xtype(outfile,NF90_INT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', '2d array of higher-order model boundary condition mask values (NOTE: Glam ONLY)')
+ status = parallel_put_att(NCO%id, varid, 'units', '1')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable dynbcmask was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! effecpress -- effective pressure
+ pos = index(NCO%vars,' effecpress ')
+ status = parallel_inq_varid(NCO%id,'effecpress',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+10) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%basal_physics%effecpress)) then
+ call write_log('Creating variable effecpress')
+ status = parallel_def_var(NCO%id,'effecpress',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'effective pressure')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable effecpress was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! efvs -- effective viscosity
+ pos = index(NCO%vars,' efvs ')
+ status = parallel_inq_varid(NCO%id,'efvs',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%stress%efvs)) then
+ call write_log('Creating variable efvs')
+ status = parallel_def_var(NCO%id,'efvs',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, staglevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_efvs))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'effective viscosity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pascal * years')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable efvs was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! enthalpy -- specific enthalpy
+ pos = index(NCO%vars,' enthalpy ')
+ status = parallel_inq_varid(NCO%id,'enthalpy',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+8) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%enthalpy)) then
+ call write_log('Creating variable enthalpy')
+ status = parallel_def_var(NCO%id,'enthalpy',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, stagwbndlevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'specific enthalpy')
+ status = parallel_put_att(NCO%id, varid, 'units', 'J/m^3')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable enthalpy was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! eus -- global average sea level
+ pos = index(NCO%vars,' eus ')
+ status = parallel_inq_varid(NCO%id,'eus',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+3) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%climate%eus)) then
+ call write_log('Creating variable eus')
+ status = parallel_def_var(NCO%id,'eus',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'global average sea level')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'global_average_sea_level_change')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable eus was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! flwa -- Pre-exponential flow law parameter
+ pos = index(NCO%vars,' flwa ')
+ status = parallel_inq_varid(NCO%id,'flwa',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%flwa)) then
+ call write_log('Creating variable flwa')
+ status = parallel_def_var(NCO%id,'flwa',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_flwa))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Pre-exponential flow law parameter')
+ status = parallel_put_att(NCO%id, varid, 'units', 'pascal**(-n) year**(-1)')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable flwa was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! flwastag -- Pre-exponential flow law parameter
+ pos = index(NCO%vars,' flwastag ')
+ status = parallel_inq_varid(NCO%id,'flwastag',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+8) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%flwa)) then
+ call write_log('Creating variable flwastag')
+ status = parallel_def_var(NCO%id,'flwastag',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, staglevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_flwa))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Pre-exponential flow law parameter')
+ status = parallel_put_att(NCO%id, varid, 'units', 'pascal**(-n) year**(-1)')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable flwastag was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! gravity -- gravitational acceleration
+ pos = index(NCO%vars,' gravity ')
+ status = parallel_inq_varid(NCO%id,'gravity',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(grav)) then
+ call write_log('Creating variable gravity')
+ status = parallel_def_var(NCO%id,'gravity',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',1.0)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'gravitational acceleration')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'gravity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/s/s')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable gravity was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! iarea -- area covered by ice
+ pos = index(NCO%vars,' iarea ')
+ status = parallel_inq_varid(NCO%id,'iarea',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+5) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geometry%iarea)) then
+ call write_log('Creating variable iarea')
+ status = parallel_def_var(NCO%id,'iarea',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(len0*len0*1.e-6))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'area covered by ice')
+ status = parallel_put_att(NCO%id, varid, 'units', 'km2')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable iarea was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! iareaf -- area covered by floating ice
+ pos = index(NCO%vars,' iareaf ')
+ status = parallel_inq_varid(NCO%id,'iareaf',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+6) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geometry%iareaf)) then
+ call write_log('Creating variable iareaf')
+ status = parallel_def_var(NCO%id,'iareaf',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(len0*len0*1.e-6))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'area covered by floating ice')
+ status = parallel_put_att(NCO%id, varid, 'units', 'km2')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable iareaf was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! iareag -- area covered by grounded ice
+ pos = index(NCO%vars,' iareag ')
+ status = parallel_inq_varid(NCO%id,'iareag',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+6) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geometry%iareag)) then
+ call write_log('Creating variable iareag')
+ status = parallel_def_var(NCO%id,'iareag',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(len0*len0*1.e-6))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'area covered by grounded ice')
+ status = parallel_put_att(NCO%id, varid, 'units', 'km2')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable iareag was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! ice_mask -- real-valued mask denoting ice (1) or no ice (0)
+ pos = index(NCO%vars,' ice_mask ')
+ status = parallel_inq_varid(NCO%id,'ice_mask',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+8) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geometry%ice_mask)) then
+ call write_log('Creating variable ice_mask')
+ status = parallel_def_var(NCO%id,'ice_mask',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(1.0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'real-valued mask denoting ice (1) or no ice (0)')
+ status = parallel_put_att(NCO%id, varid, 'units', '1')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable ice_mask was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! ice_specific_heat -- ice specific heat
+ pos = index(NCO%vars,' ice_specific_heat ')
+ status = parallel_inq_varid(NCO%id,'ice_specific_heat',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+17) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(shci)) then
+ call write_log('Creating variable ice_specific_heat')
+ status = parallel_def_var(NCO%id,'ice_specific_heat',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',1.0)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice specific heat')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'ice_specific_heat')
+ status = parallel_put_att(NCO%id, varid, 'units', 'J/kg/K')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable ice_specific_heat was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! ice_thermal_conductivity -- ice thermal conductivity
+ pos = index(NCO%vars,' ice_thermal_conductivity ')
+ status = parallel_inq_varid(NCO%id,'ice_thermal_conductivity',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+24) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(coni)) then
+ call write_log('Creating variable ice_thermal_conductivity')
+ status = parallel_def_var(NCO%id,'ice_thermal_conductivity',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',1.0)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice thermal conductivity')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'ice_thermal_conductivity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'J/(K kg)')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable ice_thermal_conductivity was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! ivol -- ice volume
+ pos = index(NCO%vars,' ivol ')
+ status = parallel_inq_varid(NCO%id,'ivol',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geometry%ivol)) then
+ call write_log('Creating variable ivol')
+ status = parallel_def_var(NCO%id,'ivol',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0*len0*len0*1.e-9))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice volume')
+ status = parallel_put_att(NCO%id, varid, 'units', 'km3')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable ivol was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! kinbcmask -- Mask of locations where uvel, vvel value should be held
+ pos = index(NCO%vars,' kinbcmask ')
+ status = parallel_inq_varid(NCO%id,'kinbcmask',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+9) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%kinbcmask)) then
+ call write_log('Creating variable kinbcmask')
+ status = parallel_def_var(NCO%id,'kinbcmask',get_xtype(outfile,NF90_INT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Mask of locations where uvel, vvel value should be held')
+ status = parallel_put_att(NCO%id, varid, 'units', '1')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable kinbcmask was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! litho_temp -- lithosphere temperature
+ pos = index(NCO%vars,' litho_temp ')
+ status = parallel_inq_varid(NCO%id,'litho_temp',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+10) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%lithot%temp)) then
+ call write_log('Creating variable litho_temp')
+ status = parallel_def_var(NCO%id,'litho_temp',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, lithoz_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'lithosphere temperature')
+ status = parallel_put_att(NCO%id, varid, 'units', 'degree_Celsius')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable litho_temp was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! lsurf -- ice lower surface elevation
+ pos = index(NCO%vars,' lsurf ')
+ status = parallel_inq_varid(NCO%id,'lsurf',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+5) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geometry%lsrf)) then
+ call write_log('Creating variable lsurf')
+ status = parallel_def_var(NCO%id,'lsurf',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice lower surface elevation')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable lsurf was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! relx -- relaxed bedrock topography
+ pos = index(NCO%vars,' relx ')
+ status = parallel_inq_varid(NCO%id,'relx',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%isostasy%relx)) then
+ call write_log('Creating variable relx')
+ status = parallel_def_var(NCO%id,'relx',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'relaxed bedrock topography')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable relx was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! resid_u -- u component of residual Ax - b (NOTE: Glam only)
+ pos = index(NCO%vars,' resid_u ')
+ status = parallel_inq_varid(NCO%id,'resid_u',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%resid_u)) then
+ call write_log('Creating variable resid_u')
+ status = parallel_def_var(NCO%id,'resid_u',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_resid))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'u component of residual Ax - b (NOTE: Glam only)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa/m')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable resid_u was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! resid_v -- v component of residual Ax - b (NOTE: Glam only)
+ pos = index(NCO%vars,' resid_v ')
+ status = parallel_inq_varid(NCO%id,'resid_v',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%resid_v)) then
+ call write_log('Creating variable resid_v')
+ status = parallel_def_var(NCO%id,'resid_v',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_resid))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'v component of residual Ax - b (NOTE: Glam only)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa/m')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable resid_v was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! rho_ice -- ice density
+ pos = index(NCO%vars,' rho_ice ')
+ status = parallel_inq_varid(NCO%id,'rho_ice',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(rhoi)) then
+ call write_log('Creating variable rho_ice')
+ status = parallel_def_var(NCO%id,'rho_ice',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',1.0)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice density')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'rho_ice')
+ status = parallel_put_att(NCO%id, varid, 'units', 'kg/meter3')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable rho_ice was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! rho_seawater -- seawater density
+ pos = index(NCO%vars,' rho_seawater ')
+ status = parallel_inq_varid(NCO%id,'rho_seawater',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+12) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(rhoo)) then
+ call write_log('Creating variable rho_seawater')
+ status = parallel_def_var(NCO%id,'rho_seawater',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',1.0)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'seawater density')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'rho_seawater')
+ status = parallel_put_att(NCO%id, varid, 'units', 'kg/meter3')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable rho_seawater was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! rhs_u -- u component of b in Ax = b
+ pos = index(NCO%vars,' rhs_u ')
+ status = parallel_inq_varid(NCO%id,'rhs_u',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+5) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%rhs_u)) then
+ call write_log('Creating variable rhs_u')
+ status = parallel_def_var(NCO%id,'rhs_u',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_resid))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'u component of b in Ax = b')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa/m')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable rhs_u was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! rhs_v -- v component of b in Ax = b
+ pos = index(NCO%vars,' rhs_v ')
+ status = parallel_inq_varid(NCO%id,'rhs_v',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+5) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%rhs_v)) then
+ call write_log('Creating variable rhs_v')
+ status = parallel_def_var(NCO%id,'rhs_v',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_resid))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'v component of b in Ax = b')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa/m')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable rhs_v was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! seconds_per_year -- seconds per year
+ pos = index(NCO%vars,' seconds_per_year ')
+ status = parallel_inq_varid(NCO%id,'seconds_per_year',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+16) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(scyr)) then
+ call write_log('Creating variable seconds_per_year')
+ status = parallel_def_var(NCO%id,'seconds_per_year',get_xtype(outfile,NF90_FLOAT),(/time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',1.0)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'seconds per year')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'seconds_per_year')
+ status = parallel_put_att(NCO%id, varid, 'units', 's/yr')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable seconds_per_year was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! soft -- bed softness parameter
+ pos = index(NCO%vars,' soft ')
+ status = parallel_inq_varid(NCO%id,'soft',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%bed_softness)) then
+ call write_log('Creating variable soft')
+ status = parallel_def_var(NCO%id,'soft',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_btrc))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'bed softness parameter')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/pascal/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable soft was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! stagthk -- staggered ice thickness
+ pos = index(NCO%vars,' stagthk ')
+ status = parallel_inq_varid(NCO%id,'stagthk',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geomderv%stagthck)) then
+ call write_log('Creating variable stagthk')
+ status = parallel_def_var(NCO%id,'stagthk',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'staggered ice thickness')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'stag_land_ice_thickness')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable stagthk was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! surftemp -- annual mean surface temperature
+ pos = index(NCO%vars,' surftemp ')
+ status = parallel_inq_varid(NCO%id,'surftemp',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+8) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%climate%artm)) then
+ call write_log('Creating variable surftemp')
+ status = parallel_def_var(NCO%id,'surftemp',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'annual mean surface temperature')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'surface_temperature')
+ status = parallel_put_att(NCO%id, varid, 'cell_methods', 'time: mean')
+ status = parallel_put_att(NCO%id, varid, 'units', 'degree_Celsius')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable surftemp was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! tau_eff -- effective stress
+ pos = index(NCO%vars,' tau_eff ')
+ status = parallel_inq_varid(NCO%id,'tau_eff',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%stress%tau%scalar)) then
+ call write_log('Creating variable tau_eff')
+ status = parallel_def_var(NCO%id,'tau_eff',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, staglevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_tau))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'effective stress')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable tau_eff was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! tau_xx -- x component of horiz. normal stress
+ pos = index(NCO%vars,' tau_xx ')
+ status = parallel_inq_varid(NCO%id,'tau_xx',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+6) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%stress%tau%xx)) then
+ call write_log('Creating variable tau_xx')
+ status = parallel_def_var(NCO%id,'tau_xx',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, staglevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_tau))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'x component of horiz. normal stress')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable tau_xx was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! tau_xy -- horiz. shear stress
+ pos = index(NCO%vars,' tau_xy ')
+ status = parallel_inq_varid(NCO%id,'tau_xy',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+6) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%stress%tau%xy)) then
+ call write_log('Creating variable tau_xy')
+ status = parallel_def_var(NCO%id,'tau_xy',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, staglevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_tau))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'horiz. shear stress')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable tau_xy was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! tau_xz -- X component vertical shear stress
+ pos = index(NCO%vars,' tau_xz ')
+ status = parallel_inq_varid(NCO%id,'tau_xz',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+6) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%stress%tau%xz)) then
+ call write_log('Creating variable tau_xz')
+ status = parallel_def_var(NCO%id,'tau_xz',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, staglevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_tau))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'X component vertical shear stress')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable tau_xz was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! tau_yy -- y component of horiz. normal stress
+ pos = index(NCO%vars,' tau_yy ')
+ status = parallel_inq_varid(NCO%id,'tau_yy',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+6) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%stress%tau%yy)) then
+ call write_log('Creating variable tau_yy')
+ status = parallel_def_var(NCO%id,'tau_yy',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, staglevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_tau))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'y component of horiz. normal stress')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable tau_yy was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! tau_yz -- Y component vertical shear stress
+ pos = index(NCO%vars,' tau_yz ')
+ status = parallel_inq_varid(NCO%id,'tau_yz',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+6) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%stress%tau%yz)) then
+ call write_log('Creating variable tau_yz')
+ status = parallel_def_var(NCO%id,'tau_yz',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, staglevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_tau))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Y component vertical shear stress')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable tau_yz was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! tauf -- higher-order basal yield stress
+ pos = index(NCO%vars,' tauf ')
+ status = parallel_inq_varid(NCO%id,'tauf',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%basalproc%mintauf)) then
+ call write_log('Creating variable tauf')
+ status = parallel_def_var(NCO%id,'tauf',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_tau))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'higher-order basal yield stress')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable tauf was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! taux -- basal shear stress in x direction (NOTE: Glide only)
+ pos = index(NCO%vars,' taux ')
+ status = parallel_inq_varid(NCO%id,'taux',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%tau_x)) then
+ call write_log('Creating variable taux')
+ status = parallel_def_var(NCO%id,'taux',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(1e-3*thk0*thk0/len0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal shear stress in x direction (NOTE: Glide only)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'kilopascal')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable taux was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! tauy -- basal shear stress in y direction
+ pos = index(NCO%vars,' tauy ')
+ status = parallel_inq_varid(NCO%id,'tauy',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%tau_y)) then
+ call write_log('Creating variable tauy')
+ status = parallel_def_var(NCO%id,'tauy',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(1e-3*thk0*thk0/len0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal shear stress in y direction')
+ status = parallel_put_att(NCO%id, varid, 'units', 'kilopascal')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable tauy was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! temp -- ice temperature
+ pos = index(NCO%vars,' temp ')
+ status = parallel_inq_varid(NCO%id,'temp',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%temp)) then
+ call write_log('Creating variable temp')
+ status = parallel_def_var(NCO%id,'temp',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice temperature')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_temperature')
+ status = parallel_put_att(NCO%id, varid, 'units', 'degree_Celsius')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable temp was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! tempstag -- ice temperature on staggered vertical levels with boundaries
+ pos = index(NCO%vars,' tempstag ')
+ status = parallel_inq_varid(NCO%id,'tempstag',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+8) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%temp)) then
+ call write_log('Creating variable tempstag')
+ status = parallel_def_var(NCO%id,'tempstag',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, stagwbndlevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice temperature on staggered vertical levels with boundaries')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_temperature_stag')
+ status = parallel_put_att(NCO%id, varid, 'units', 'degree_Celsius')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable tempstag was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! thk -- ice thickness
+ pos = index(NCO%vars,' thk ')
+ status = parallel_inq_varid(NCO%id,'thk',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+3) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geometry%thck)) then
+ call write_log('Creating variable thk')
+ status = parallel_def_var(NCO%id,'thk',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice thickness')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_thickness')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable thk was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! thkmask -- mask
+ pos = index(NCO%vars,' thkmask ')
+ status = parallel_inq_varid(NCO%id,'thkmask',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geometry%thkmask)) then
+ call write_log('Creating variable thkmask')
+ status = parallel_def_var(NCO%id,'thkmask',get_xtype(outfile,NF90_INT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'mask')
+ status = parallel_put_att(NCO%id, varid, 'units', '1')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable thkmask was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! topg -- bedrock topography
+ pos = index(NCO%vars,' topg ')
+ status = parallel_inq_varid(NCO%id,'topg',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geometry%topg)) then
+ call write_log('Creating variable topg')
+ status = parallel_def_var(NCO%id,'topg',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'bedrock topography')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'bedrock_altitude')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable topg was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! ubas -- basal slip velocity in x direction
+ pos = index(NCO%vars,' ubas ')
+ status = parallel_inq_varid(NCO%id,'ubas',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%ubas)) then
+ call write_log('Creating variable ubas')
+ status = parallel_def_var(NCO%id,'ubas',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_uvel))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal slip velocity in x direction')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_basal_x_velocity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable ubas was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! uflx -- flux in x direction (NOTE: Glide and Glam only)
+ pos = index(NCO%vars,' uflx ')
+ status = parallel_inq_varid(NCO%id,'uflx',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%uflx)) then
+ call write_log('Creating variable uflx')
+ status = parallel_def_var(NCO%id,'uflx',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_uflx))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'flux in x direction (NOTE: Glide and Glam only)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter2/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable uflx was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! unstagbeta -- higher-order bed stress coefficient on the unstaggered grid (NOTE: this will overwrite beta if both are input)
+ pos = index(NCO%vars,' unstagbeta ')
+ status = parallel_inq_varid(NCO%id,'unstagbeta',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+10) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%unstagbeta)) then
+ call write_log('Creating variable unstagbeta')
+ status = parallel_def_var(NCO%id,'unstagbeta',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_beta))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'higher-order bed stress coefficient on the unstaggered grid (NOTE: this will overwrite beta if both are input)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'Pa yr/m')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable unstagbeta was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! usurf -- ice upper surface elevation
+ pos = index(NCO%vars,' usurf ')
+ status = parallel_inq_varid(NCO%id,'usurf',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+5) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%geometry%usrf)) then
+ call write_log('Creating variable usurf')
+ status = parallel_def_var(NCO%id,'usurf',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(thk0))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice upper surface elevation')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'surface_altitude')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable usurf was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! uvel -- ice velocity in x direction
+ pos = index(NCO%vars,' uvel ')
+ status = parallel_inq_varid(NCO%id,'uvel',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%uvel)) then
+ call write_log('Creating variable uvel')
+ status = parallel_def_var(NCO%id,'uvel',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_uvel))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice velocity in x direction')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_x_velocity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable uvel was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! uvel_extend -- ice velocity in x direction (extended grid)
+ pos = index(NCO%vars,' uvel_extend ')
+ status = parallel_inq_varid(NCO%id,'uvel_extend',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+11) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%uvel_extend)) then
+ call write_log('Creating variable uvel_extend')
+ status = parallel_def_var(NCO%id,'uvel_extend',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_uvel))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice velocity in x direction (extended grid)')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_x_velocity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable uvel_extend was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! vbas -- basal slip velocity in y direction
+ pos = index(NCO%vars,' vbas ')
+ status = parallel_inq_varid(NCO%id,'vbas',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%vbas)) then
+ call write_log('Creating variable vbas')
+ status = parallel_def_var(NCO%id,'vbas',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_uvel))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'basal slip velocity in y direction')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_basal_y_velocity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable vbas was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! velnorm -- Horizontal ice velocity magnitude
+ pos = index(NCO%vars,' velnorm ')
+ status = parallel_inq_varid(NCO%id,'velnorm',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%velnorm)) then
+ call write_log('Creating variable velnorm')
+ status = parallel_def_var(NCO%id,'velnorm',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_uvel))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Horizontal ice velocity magnitude')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable velnorm was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! vflx -- flux in x direction (NOTE: Glide and Glam only)
+ pos = index(NCO%vars,' vflx ')
+ status = parallel_inq_varid(NCO%id,'vflx',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%vflx)) then
+ call write_log('Creating variable vflx')
+ status = parallel_def_var(NCO%id,'vflx',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_uflx))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'flux in x direction (NOTE: Glide and Glam only)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter2/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable vflx was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! vvel -- ice velocity in y direction
+ pos = index(NCO%vars,' vvel ')
+ status = parallel_inq_varid(NCO%id,'vvel',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%vvel)) then
+ call write_log('Creating variable vvel')
+ status = parallel_def_var(NCO%id,'vvel',get_xtype(outfile,NF90_FLOAT),(/x0_dimid, y0_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_uvel))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice velocity in y direction')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_y_velocity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable vvel was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! vvel_extend -- ice velocity in y direction (extended grid)
+ pos = index(NCO%vars,' vvel_extend ')
+ status = parallel_inq_varid(NCO%id,'vvel_extend',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+11) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%vvel_extend)) then
+ call write_log('Creating variable vvel_extend')
+ status = parallel_def_var(NCO%id,'vvel_extend',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_uvel))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'ice velocity in y direction (extended grid)')
+ status = parallel_put_att(NCO%id, varid, 'standard_name', 'land_ice_y_velocity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ end if
+ else
+ call write_log('Variable vvel_extend was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! waterfrac -- internal water fraction
+ pos = index(NCO%vars,' waterfrac ')
+ status = parallel_inq_varid(NCO%id,'waterfrac',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+9) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%temper%waterfrac)) then
+ call write_log('Creating variable waterfrac')
+ status = parallel_def_var(NCO%id,'waterfrac',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, staglevel_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'internal water fraction')
+ status = parallel_put_att(NCO%id, varid, 'units', 'unitless [0,1]')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable waterfrac was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! wgrd -- Vertical grid velocity
+ pos = index(NCO%vars,' wgrd ')
+ status = parallel_inq_varid(NCO%id,'wgrd',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%wgrd)) then
+ call write_log('Creating variable wgrd')
+ status = parallel_def_var(NCO%id,'wgrd',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_wvel))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'Vertical grid velocity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable wgrd was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! wvel -- vertical ice velocity
+ pos = index(NCO%vars,' wvel ')
+ status = parallel_inq_varid(NCO%id,'wvel',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+4) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%wvel)) then
+ call write_log('Creating variable wvel')
+ status = parallel_def_var(NCO%id,'wvel',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_wvel))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'vertical ice velocity')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable wvel was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ ! wvel_ho -- vertical ice velocity relative to ice sheet base from higher-order model (NOTE: Glam only)
+ pos = index(NCO%vars,' wvel_ho ')
+ status = parallel_inq_varid(NCO%id,'wvel_ho',varid)
+ if (pos.ne.0) then
+ NCO%vars(pos+1:pos+7) = ' '
+ end if
+ if (pos.ne.0 .and. status.eq.nf90_enotvar) then
+ if (is_enabled(data%velocity%wvel_ho)) then
+ call write_log('Creating variable wvel_ho')
+ status = parallel_def_var(NCO%id,'wvel_ho',get_xtype(outfile,NF90_FLOAT),(/x1_dimid, y1_dimid, level_dimid, time_dimid/),varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, varid, 'scale_factor',(scale_wvel))
+ status = parallel_put_att(NCO%id, varid, 'long_name', 'vertical ice velocity relative to ice sheet base from higher-order model (NOTE: Glam only)')
+ status = parallel_put_att(NCO%id, varid, 'units', 'meter/year')
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_put_att(NCO%id, varid, 'grid_mapping',glimmer_nc_mapvarname)
+ status = parallel_put_att(NCO%id, varid, 'coordinates', 'lon lat')
+ end if
+ else
+ call write_log('Variable wvel_ho was specified for output but it is inappropriate for your config settings. It will be excluded from the output.', GM_WARNING)
+ end if
+ end if
+
+ end subroutine glide_io_create
+
+ subroutine glide_io_write(outfile,data)
+ use parallel
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_paramets
+ use glimmer_scales
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: data
+ ! the model instance
+
+ ! local variables
+ real(dp) :: tavgf
+ integer status, varid
+ integer up
+
+ tavgf = outfile%total_time
+ if (tavgf.ne.0.d0) then
+ tavgf = 1.d0/tavgf
+ end if
+
+ ! write variables
+ status = parallel_inq_varid(NCO%id,'acab',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%climate%acab, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'adv_cfl_dt',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%numerics%adv_cfl_dt, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'artm',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%climate%artm, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'beta',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%beta, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'bfricflx',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%bfricflx, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'bheatflx',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%bheatflx, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'bmlt',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%bmlt, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'btemp',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%temp(data%general%upn,1:data%general%ewn,1:data%general%nsn), (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'btractx',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%stress%btractx(:,:), (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'btracty',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%stress%btracty(:,:), (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'btrc',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%btrc, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'bwat',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%bwat, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'bwatflx',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%bwatflx, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'calving',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%climate%calving, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'diff_cfl_dt',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%numerics%diff_cfl_dt, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'diffu',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%diffu, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'dissip',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%dissip(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'dissipstag',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nstaglevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%dissip(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'dthckdtm',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geomderv%dthckdtm, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'dusrfdtm',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geomderv%dusrfdtm, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'dynbcmask',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%dynbcmask, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'effecpress',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%basal_physics%effecpress, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'efvs',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nstaglevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%stress%efvs(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'enthalpy',varid)
+ if (status .eq. nf90_noerr) then
+ do up=0,NCO%nstagwbndlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%enthalpy(up,:,:), (/1,1,up+1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'eus',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%climate%eus, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'flwa',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%flwa(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'flwastag',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nstaglevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%flwa(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'gravity',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ grav, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'iarea',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geometry%iarea, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'iareaf',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geometry%iareaf, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'iareag',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geometry%iareag, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'ice_mask',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geometry%ice_mask, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'ice_specific_heat',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ shci, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'ice_thermal_conductivity',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ coni, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'ivol',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geometry%ivol, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'kinbcmask',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%kinbcmask(:,:), (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'litho_temp',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%lithot%temp, (/1,1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'lsurf',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geometry%lsrf, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'relx',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%isostasy%relx, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'resid_u',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%resid_u(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'resid_v',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%resid_v(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'rho_ice',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ rhoi, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'rho_seawater',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ rhoo, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'rhs_u',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%rhs_u(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'rhs_v',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%rhs_v(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'seconds_per_year',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ scyr, (/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'soft',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%bed_softness, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'stagthk',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geomderv%stagthck, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'surftemp',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%climate%artm, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'tau_eff',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nstaglevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%stress%tau%scalar(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'tau_xx',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nstaglevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%stress%tau%xx(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'tau_xy',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nstaglevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%stress%tau%xy(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'tau_xz',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nstaglevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%stress%tau%xz(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'tau_yy',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nstaglevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%stress%tau%yy(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'tau_yz',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nstaglevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%stress%tau%yz(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'tauf',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%basalproc%mintauf, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'taux',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%tau_x, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'tauy',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%tau_y, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'temp',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%temp(up,1:data%general%ewn,1:data%general%nsn), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'tempstag',varid)
+ if (status .eq. nf90_noerr) then
+ do up=0,NCO%nstagwbndlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%temp(up,1:data%general%ewn,1:data%general%nsn), (/1,1,up+1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'thk',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geometry%thck, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'thkmask',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geometry%thkmask, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'topg',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geometry%topg, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'ubas',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%ubas, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'uflx',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%uflx, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'unstagbeta',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%unstagbeta, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'usurf',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%geometry%usrf, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'uvel',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%uvel(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'uvel_extend',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%uvel_extend(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'vbas',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%vbas, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'velnorm',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%velnorm(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'vflx',varid)
+ if (status .eq. nf90_noerr) then
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%vflx, (/1,1,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ status = parallel_inq_varid(NCO%id,'vvel',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%vvel(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'vvel_extend',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%vvel_extend(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'waterfrac',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nstaglevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%temper%waterfrac(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'wgrd',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%wgrd(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'wvel',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%wvel(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ status = parallel_inq_varid(NCO%id,'wvel_ho',varid)
+ if (status .eq. nf90_noerr) then
+ do up=1,NCO%nlevel
+ status = distributed_put_var(NCO%id, varid, &
+ data%velocity%wvel_ho(up,:,:), (/1,1,up,outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end do
+ end if
+
+ end subroutine glide_io_write
+
+
+ subroutine glide_add_to_restart_variable_list(vars_to_add)
+ ! This subroutine adds variables to the list of variables needed for a restart.
+ ! It is a public subroutine that allows other parts of the model to modify the list,
+ ! which is a module level variable. MJH 1/17/2013
+
+ use glimmer_log
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+ character(len=*), intent (in) :: vars_to_add ! list of variable(s) to be added to the list of restart variables
+ !character(*), intent (inout) :: restart_variable_list ! list of variables needed to perform an exact restart - module variable
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ !------------------------------------------------------------------------------------
+
+ ! Add the variables to the list so long as they don't make the list too long.
+ if ( (len_trim(restart_variable_list) + 1 + len_trim(vars_to_add)) > len(restart_variable_list)) then
+ call write_log('Adding restart variables has made the restart variable list too long.',GM_FATAL)
+ else
+ restart_variable_list = trim(adjustl(restart_variable_list)) // ' ' // trim(vars_to_add)
+ !call write_log('Adding to glide restart variable list: ' // trim(vars_to_add) )
+ endif
+
+ end subroutine glide_add_to_restart_variable_list
+
+
+ ! Functions for the interface 'is_enabled'. These are needed by the auto-generated code in glide_io_create
+ ! to determine if a variable is 'turned on', and should be written.
+
+ function is_enabled_0dint(var)
+ integer, intent(in) :: var
+ logical :: is_enabled_0dint
+ is_enabled_0dint = .true. ! scalars are always enabled
+ return
+ end function is_enabled_0dint
+
+ function is_enabled_1dint(var)
+ integer, dimension(:), pointer, intent(in) :: var
+ logical :: is_enabled_1dint
+ if (associated(var)) then
+ is_enabled_1dint = .true.
+ else
+ is_enabled_1dint = .false.
+ endif
+ return
+ end function is_enabled_1dint
+
+ function is_enabled_2dint(var)
+ integer, dimension(:,:), pointer, intent(in) :: var
+ logical :: is_enabled_2dint
+ if (associated(var)) then
+ is_enabled_2dint = .true.
+ else
+ is_enabled_2dint = .false.
+ endif
+ return
+ end function is_enabled_2dint
+
+ function is_enabled_0dreal(var)
+ real(dp), intent(in) :: var
+ logical :: is_enabled_0dreal
+ is_enabled_0dreal = .true. ! scalars are always enabled
+ return
+ end function is_enabled_0dreal
+
+ function is_enabled_1dreal(var)
+ real(dp), dimension(:), pointer, intent(in) :: var
+ logical :: is_enabled_1dreal
+ if (associated(var)) then
+ is_enabled_1dreal = .true.
+ else
+ is_enabled_1dreal = .false.
+ endif
+ return
+ end function is_enabled_1dreal
+
+ function is_enabled_2dreal(var)
+ real(dp), dimension(:,:), pointer, intent(in) :: var
+ logical :: is_enabled_2dreal
+ if (associated(var)) then
+ is_enabled_2dreal = .true.
+ else
+ is_enabled_2dreal = .false.
+ endif
+ return
+ end function is_enabled_2dreal
+
+ function is_enabled_3dreal(var)
+ real(dp), dimension(:,:,:), pointer, intent(in) :: var
+ logical :: is_enabled_3dreal
+ if (associated(var)) then
+ is_enabled_3dreal = .true.
+ else
+ is_enabled_3dreal = .false.
+ endif
+ return
+ end function is_enabled_3dreal
+
+
+ !*****************************************************************************
+ ! netCDF input
+ !*****************************************************************************
+ subroutine glide_io_readall(data, model, filetype)
+ ! read from netCDF file
+ use glide_types
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(glide_global_type) :: data
+ type(glide_global_type) :: model
+ integer, intent(in), optional :: filetype ! 0 for input, 1 for forcing; defaults to input
+
+ ! local variables
+ type(glimmer_nc_input), pointer :: ic
+ integer :: filetype_local
+
+ if (present(filetype)) then
+ filetype_local = filetype
+ else
+ filetype_local = 0 ! default to input type
+ end if
+
+ if (filetype_local == 0) then
+ ic=>model%funits%in_first
+ else
+ ic=>model%funits%frc_first
+ endif
+ do while(associated(ic))
+ call glimmer_nc_checkread(ic,model)
+ if (ic%nc%just_processed) then
+ call glide_io_read(ic,data)
+ end if
+ ic=>ic%next
+ end do
+ end subroutine glide_io_readall
+
+
+ subroutine glide_read_forcing(data, model)
+ ! Read data from forcing files
+ use glimmer_log
+ use glide_types
+ use glimmer_ncdf
+
+ implicit none
+ type(glide_global_type) :: data
+ type(glide_global_type), intent(inout) :: model
+
+ ! Locals
+ type(glimmer_nc_input), pointer :: ic
+ integer :: t
+ real(dp) :: eps ! a tolerance to use for stepwise constant forcing
+
+ ! Make eps a fraction of the time step.
+ eps = model%numerics%tinc * 1.0d-4
+
+ ! read forcing files
+ ic=>model%funits%frc_first
+ do while(associated(ic))
+
+ !print *, 'possible forcing times', ic%times
+
+ ! Find the current time in the file
+ do t = ic%nt, 1, -1 ! look through the time array backwards
+ if ( ic%times(t) <= model%numerics%time + eps) then
+ ! use the largest time that is smaller or equal to the current time (stepwise forcing)
+
+ ! Set the desired time to be read
+ ic%current_time = t
+ !print *, 'time, forcing index, forcing time', model%numerics%time, ic%current_time, ic%times(ic%current_time)
+ exit ! once we find the time, exit the loop
+ endif
+ end do
+
+ ! read all forcing fields present in this file for the time specified above
+ ic%nc%just_processed = .false. ! set this to false so it will be re-processed every time through - this ensures info gets written to the log, and that time levels don't get skipped.
+ call glide_io_readall(data, model, filetype=1)
+
+ ! move on to the next forcing file
+ ic=>ic%next
+ end do
+
+ end subroutine glide_read_forcing
+
+
+!------------------------------------------------------------------------------
+
+
+ subroutine glide_io_read(infile,data)
+ ! read variables from a netCDF file
+ use parallel
+ use glimmer_log
+ use glimmer_ncdf
+ use glide_types
+ use glimmer_paramets
+ use glimmer_scales
+ implicit none
+ type(glimmer_nc_input), pointer :: infile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: data
+ ! the model instance
+
+ ! local variables
+ integer status,varid
+ integer up
+ real(dp) :: scaling_factor
+
+ ! read variables
+ status = parallel_inq_varid(NCI%id,'x1',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%general%x1)) then
+ call write_log(' Loading x1')
+ status = distributed_get_var(NCI%id, varid, &
+ data%general%x1, (/1/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling x1",GM_DIAGNOSTIC)
+ data%general%x1 = data%general%x1*scaling_factor
+ end if
+ else
+ call write_log('Variable x1 was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'y1',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%general%y1)) then
+ call write_log(' Loading y1')
+ status = distributed_get_var(NCI%id, varid, &
+ data%general%y1, (/1/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling y1",GM_DIAGNOSTIC)
+ data%general%y1 = data%general%y1*scaling_factor
+ end if
+ else
+ call write_log('Variable y1 was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'acab',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%climate%acab)) then
+ call write_log(' Loading acab')
+ status = distributed_get_var(NCI%id, varid, &
+ data%climate%acab, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_acab)
+ else
+ scaling_factor = scaling_factor/(scale_acab)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling acab",GM_DIAGNOSTIC)
+ data%climate%acab = data%climate%acab*scaling_factor
+ end if
+ else
+ call write_log('Variable acab was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'artm',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%climate%artm)) then
+ call write_log(' Loading artm')
+ status = distributed_get_var(NCI%id, varid, &
+ data%climate%artm, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling artm",GM_DIAGNOSTIC)
+ data%climate%artm = data%climate%artm*scaling_factor
+ end if
+ else
+ call write_log('Variable artm was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'beta',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%beta)) then
+ call write_log(' Loading beta')
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%beta, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_beta)
+ else
+ scaling_factor = scaling_factor/(scale_beta)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling beta",GM_DIAGNOSTIC)
+ data%velocity%beta = data%velocity%beta*scaling_factor
+ end if
+ else
+ call write_log('Variable beta was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'bfricflx',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%bfricflx)) then
+ call write_log(' Loading bfricflx')
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%bfricflx, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(1.0)
+ else
+ scaling_factor = scaling_factor/(1.0)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling bfricflx",GM_DIAGNOSTIC)
+ data%temper%bfricflx = data%temper%bfricflx*scaling_factor
+ end if
+ else
+ call write_log('Variable bfricflx was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'bheatflx',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%bheatflx)) then
+ call write_log(' Loading bheatflx')
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%bheatflx, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_bflx)
+ else
+ scaling_factor = scaling_factor/(scale_bflx)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling bheatflx",GM_DIAGNOSTIC)
+ data%temper%bheatflx = data%temper%bheatflx*scaling_factor
+ end if
+ else
+ call write_log('Variable bheatflx was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'bmlt',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%bmlt)) then
+ call write_log(' Loading bmlt')
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%bmlt, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_acab)
+ else
+ scaling_factor = scaling_factor/(scale_acab)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling bmlt",GM_DIAGNOSTIC)
+ data%temper%bmlt = data%temper%bmlt*scaling_factor
+ end if
+ else
+ call write_log('Variable bmlt was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'bwat',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%bwat)) then
+ call write_log(' Loading bwat')
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%bwat, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(thk0)
+ else
+ scaling_factor = scaling_factor/(thk0)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling bwat",GM_DIAGNOSTIC)
+ data%temper%bwat = data%temper%bwat*scaling_factor
+ end if
+ else
+ call write_log('Variable bwat was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'dissip',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%dissip)) then
+ call write_log(' Loading dissip')
+ do up=1,NCI%nlevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%dissip(up,:,:), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scyr)
+ else
+ scaling_factor = scaling_factor/(scyr)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling dissip",GM_DIAGNOSTIC)
+ data%temper%dissip(up,:,:) = data%temper%dissip(up,:,:)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable dissip was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'dissipstag',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%dissip)) then
+ call write_log(' Loading dissipstag')
+ do up=1,NCI%nstaglevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%dissip(up,:,:), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scyr)
+ else
+ scaling_factor = scaling_factor/(scyr)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling dissipstag",GM_DIAGNOSTIC)
+ data%temper%dissip(up,:,:) = data%temper%dissip(up,:,:)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable dissipstag was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'effecpress',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%basal_physics%effecpress)) then
+ call write_log(' Loading effecpress')
+ status = distributed_get_var(NCI%id, varid, &
+ data%basal_physics%effecpress, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling effecpress",GM_DIAGNOSTIC)
+ data%basal_physics%effecpress = data%basal_physics%effecpress*scaling_factor
+ end if
+ else
+ call write_log('Variable effecpress was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'flwa',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%flwa)) then
+ call write_log(' Loading flwa')
+ do up=1,NCI%nlevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%flwa(up,:,:), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_flwa)
+ else
+ scaling_factor = scaling_factor/(scale_flwa)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling flwa",GM_DIAGNOSTIC)
+ data%temper%flwa(up,:,:) = data%temper%flwa(up,:,:)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable flwa was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'flwastag',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%flwa)) then
+ call write_log(' Loading flwastag')
+ do up=1,NCI%nstaglevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%flwa(up,:,:), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_flwa)
+ else
+ scaling_factor = scaling_factor/(scale_flwa)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling flwastag",GM_DIAGNOSTIC)
+ data%temper%flwa(up,:,:) = data%temper%flwa(up,:,:)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable flwastag was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'kinbcmask',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%kinbcmask)) then
+ call write_log(' Loading kinbcmask')
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%kinbcmask(:,:), (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling kinbcmask",GM_DIAGNOSTIC)
+ data%velocity%kinbcmask(:,:) = data%velocity%kinbcmask(:,:)*scaling_factor
+ end if
+ else
+ call write_log('Variable kinbcmask was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'litho_temp',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%lithot%temp)) then
+ call write_log(' Loading litho_temp')
+ status = distributed_get_var(NCI%id, varid, &
+ data%lithot%temp, (/1,1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling litho_temp",GM_DIAGNOSTIC)
+ data%lithot%temp = data%lithot%temp*scaling_factor
+ end if
+ else
+ call write_log('Variable litho_temp was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'relx',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%isostasy%relx)) then
+ call write_log(' Loading relx')
+ status = distributed_get_var(NCI%id, varid, &
+ data%isostasy%relx, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(thk0)
+ else
+ scaling_factor = scaling_factor/(thk0)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling relx",GM_DIAGNOSTIC)
+ data%isostasy%relx = data%isostasy%relx*scaling_factor
+ end if
+ else
+ call write_log('Variable relx was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'soft',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%bed_softness)) then
+ call write_log(' Loading soft')
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%bed_softness, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_btrc)
+ else
+ scaling_factor = scaling_factor/(scale_btrc)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling soft",GM_DIAGNOSTIC)
+ data%velocity%bed_softness = data%velocity%bed_softness*scaling_factor
+ end if
+ else
+ call write_log('Variable soft was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'surftemp',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%climate%artm)) then
+ call write_log(' Loading surftemp')
+ status = distributed_get_var(NCI%id, varid, &
+ data%climate%artm, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling surftemp",GM_DIAGNOSTIC)
+ data%climate%artm = data%climate%artm*scaling_factor
+ end if
+ else
+ call write_log('Variable surftemp was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'tauf',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%basalproc%mintauf)) then
+ call write_log(' Loading tauf')
+ status = distributed_get_var(NCI%id, varid, &
+ data%basalproc%mintauf, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_tau)
+ else
+ scaling_factor = scaling_factor/(scale_tau)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling tauf",GM_DIAGNOSTIC)
+ data%basalproc%mintauf = data%basalproc%mintauf*scaling_factor
+ end if
+ else
+ call write_log('Variable tauf was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'temp',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%temp)) then
+ call write_log(' Loading temp')
+ do up=1,NCI%nlevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%temp(up,1:data%general%ewn,1:data%general%nsn), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling temp",GM_DIAGNOSTIC)
+ data%temper%temp(up,1:data%general%ewn,1:data%general%nsn) = data%temper%temp(up,1:data%general%ewn,1:data%general%nsn)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable temp was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'tempstag',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%temp)) then
+ call write_log(' Loading tempstag')
+ do up=0,NCI%nstagwbndlevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%temp(up,1:data%general%ewn,1:data%general%nsn), (/1,1,up+1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling tempstag",GM_DIAGNOSTIC)
+ data%temper%temp(up,1:data%general%ewn,1:data%general%nsn) = data%temper%temp(up,1:data%general%ewn,1:data%general%nsn)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable tempstag was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'thk',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%geometry%thck)) then
+ call write_log(' Loading thk')
+ status = distributed_get_var(NCI%id, varid, &
+ data%geometry%thck, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(thk0)
+ else
+ scaling_factor = scaling_factor/(thk0)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling thk",GM_DIAGNOSTIC)
+ data%geometry%thck = data%geometry%thck*scaling_factor
+ end if
+ else
+ call write_log('Variable thk was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'thkmask',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%geometry%thkmask)) then
+ call write_log(' Loading thkmask')
+ status = distributed_get_var(NCI%id, varid, &
+ data%geometry%thkmask, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling thkmask",GM_DIAGNOSTIC)
+ data%geometry%thkmask = data%geometry%thkmask*scaling_factor
+ end if
+ else
+ call write_log('Variable thkmask was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'topg',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%geometry%topg)) then
+ call write_log(' Loading topg')
+ status = distributed_get_var(NCI%id, varid, &
+ data%geometry%topg, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(thk0)
+ else
+ scaling_factor = scaling_factor/(thk0)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling topg",GM_DIAGNOSTIC)
+ data%geometry%topg = data%geometry%topg*scaling_factor
+ end if
+ else
+ call write_log('Variable topg was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'ubas',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%ubas)) then
+ call write_log(' Loading ubas')
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%ubas, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_uvel)
+ else
+ scaling_factor = scaling_factor/(scale_uvel)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling ubas",GM_DIAGNOSTIC)
+ data%velocity%ubas = data%velocity%ubas*scaling_factor
+ end if
+ else
+ call write_log('Variable ubas was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'unstagbeta',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%unstagbeta)) then
+ call write_log(' Loading unstagbeta')
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%unstagbeta, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_beta)
+ else
+ scaling_factor = scaling_factor/(scale_beta)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling unstagbeta",GM_DIAGNOSTIC)
+ data%velocity%unstagbeta = data%velocity%unstagbeta*scaling_factor
+ end if
+ else
+ call write_log('Variable unstagbeta was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'usurf',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%geometry%usrf)) then
+ call write_log(' Loading usurf')
+ status = distributed_get_var(NCI%id, varid, &
+ data%geometry%usrf, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(thk0)
+ else
+ scaling_factor = scaling_factor/(thk0)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling usurf",GM_DIAGNOSTIC)
+ data%geometry%usrf = data%geometry%usrf*scaling_factor
+ end if
+ else
+ call write_log('Variable usurf was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'uvel',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%uvel)) then
+ call write_log(' Loading uvel')
+ do up=1,NCI%nlevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%uvel(up,:,:), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_uvel)
+ else
+ scaling_factor = scaling_factor/(scale_uvel)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling uvel",GM_DIAGNOSTIC)
+ data%velocity%uvel(up,:,:) = data%velocity%uvel(up,:,:)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable uvel was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'vbas',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%vbas)) then
+ call write_log(' Loading vbas')
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%vbas, (/1,1,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_uvel)
+ else
+ scaling_factor = scaling_factor/(scale_uvel)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling vbas",GM_DIAGNOSTIC)
+ data%velocity%vbas = data%velocity%vbas*scaling_factor
+ end if
+ else
+ call write_log('Variable vbas was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'vvel',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%vvel)) then
+ call write_log(' Loading vvel')
+ do up=1,NCI%nlevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%vvel(up,:,:), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_uvel)
+ else
+ scaling_factor = scaling_factor/(scale_uvel)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling vvel",GM_DIAGNOSTIC)
+ data%velocity%vvel(up,:,:) = data%velocity%vvel(up,:,:)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable vvel was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'waterfrac',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%temper%waterfrac)) then
+ call write_log(' Loading waterfrac')
+ do up=1,NCI%nstaglevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%temper%waterfrac(up,:,:), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling waterfrac",GM_DIAGNOSTIC)
+ data%temper%waterfrac(up,:,:) = data%temper%waterfrac(up,:,:)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable waterfrac was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'wgrd',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%wgrd)) then
+ call write_log(' Loading wgrd')
+ do up=1,NCI%nlevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%wgrd(up,:,:), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_wvel)
+ else
+ scaling_factor = scaling_factor/(scale_wvel)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling wgrd",GM_DIAGNOSTIC)
+ data%velocity%wgrd(up,:,:) = data%velocity%wgrd(up,:,:)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable wgrd was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'wvel',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%wvel)) then
+ call write_log(' Loading wvel')
+ do up=1,NCI%nlevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%wvel(up,:,:), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_wvel)
+ else
+ scaling_factor = scaling_factor/(scale_wvel)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling wvel",GM_DIAGNOSTIC)
+ data%velocity%wvel(up,:,:) = data%velocity%wvel(up,:,:)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable wvel was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ status = parallel_inq_varid(NCI%id,'wvel_ho',varid)
+ if (status .eq. nf90_noerr) then
+ if (is_enabled(data%velocity%wvel_ho)) then
+ call write_log(' Loading wvel_ho')
+ do up=1,NCI%nlevel
+ status = distributed_get_var(NCI%id, varid, &
+ data%velocity%wvel_ho(up,:,:), (/1,1,up,infile%current_time/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(NCI%id, varid,'scale_factor',scaling_factor)
+ if (status.ne.NF90_NOERR) then
+ scaling_factor = 1.0d0/(scale_wvel)
+ else
+ scaling_factor = scaling_factor/(scale_wvel)
+ end if
+ if (abs(scaling_factor-1.0d0).gt.1.d-17) then
+ call write_log("scaling wvel_ho",GM_DIAGNOSTIC)
+ data%velocity%wvel_ho(up,:,:) = data%velocity%wvel_ho(up,:,:)*scaling_factor
+ end if
+ end do
+ else
+ call write_log('Variable wvel_ho was specified for input but it is inappropriate for your config settings. It will be excluded from the input.', GM_WARNING)
+ end if
+
+ end if
+
+ end subroutine glide_io_read
+
+ subroutine glide_io_checkdim(infile,model,data)
+ ! check if dimension sizes in file match dims of model
+ use parallel
+ use glimmer_log
+ use glimmer_ncdf
+ use glide_types
+ use glide_types
+ implicit none
+ type(glimmer_nc_input), pointer :: infile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: model
+ type(glide_global_type), optional :: data
+
+ integer status,dimid,dimsize
+ character(len=150) message
+
+ ! check dimensions
+ status = parallel_inq_dimid(NCI%id,'level',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.model%general%upn) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size level does not match: ', &
+ model%general%upn
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'lithoz',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.model%lithot%nlayer) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size lithoz does not match: ', &
+ model%lithot%nlayer
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'staglevel',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.model%general%upn-1) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size staglevel does not match: ', &
+ model%general%upn-1
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'stagwbndlevel',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.model%general%upn+1) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size stagwbndlevel does not match: ', &
+ model%general%upn+1
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'x0',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.global_ewn-1) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size x0 does not match: ', &
+ global_ewn-1
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'x1',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.global_ewn) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size x1 does not match: ', &
+ global_ewn
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'y0',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.global_nsn-1) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size y0 does not match: ', &
+ global_nsn-1
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ status = parallel_inq_dimid(NCI%id,'y1',dimid)
+ if (dimid.gt.0) then
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ if (dimsize.ne.global_nsn) then
+ write(message,*) 'Error, reading file ',trim(NCI%filename),' size y1 does not match: ', &
+ global_nsn
+ call write_log(message,GM_FATAL)
+ end if
+ end if
+ end subroutine glide_io_checkdim
+
+ !*****************************************************************************
+ ! calculating time averages
+ !*****************************************************************************
+#ifdef HAVE_AVG
+ subroutine glide_avg_accumulate(outfile,data,model)
+ use parallel
+ use glide_types
+ use glide_types
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: model
+ type(glide_global_type) :: data
+
+ ! local variables
+ real(dp) :: factor
+ integer status, varid
+
+ ! increase total time
+ outfile%total_time = outfile%total_time + model%numerics%tinc
+ factor = model%numerics%tinc
+
+ end subroutine glide_avg_accumulate
+
+ subroutine glide_avg_reset(outfile,data)
+ use parallel
+ use glide_types
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: data
+
+ ! local variables
+ integer status, varid
+
+ ! reset total time
+ outfile%total_time = 0.d0
+
+ end subroutine glide_avg_reset
+#endif
+
+ !*********************************************************************
+ ! some private procedures
+ !*********************************************************************
+
+ !> apply default type to be used in netCDF file
+ integer function get_xtype(outfile,xtype)
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile !< derived type holding information about output file
+ integer, intent(in) :: xtype !< the external netCDF type
+
+ get_xtype = xtype
+
+ if (xtype.eq.NF90_REAL .and. outfile%default_xtype.eq.NF90_DOUBLE) then
+ get_xtype = NF90_DOUBLE
+ end if
+ if (xtype.eq.NF90_DOUBLE .and. outfile%default_xtype.eq.NF90_REAL) then
+ get_xtype = NF90_REAL
+ end if
+ end function get_xtype
+
+ !*********************************************************************
+ ! lots of accessor subroutines follow
+ !*********************************************************************
+ subroutine glide_get_acab(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_acab)*(data%climate%acab)
+ end subroutine glide_get_acab
+
+ subroutine glide_set_acab(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%climate%acab = inarray/(scale_acab)
+ end subroutine glide_set_acab
+
+ subroutine glide_get_adv_cfl_dt(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = data%numerics%adv_cfl_dt
+ end subroutine glide_get_adv_cfl_dt
+
+ subroutine glide_set_adv_cfl_dt(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+ data%numerics%adv_cfl_dt = inarray
+ end subroutine glide_set_adv_cfl_dt
+
+ subroutine glide_get_artm(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = data%climate%artm
+ end subroutine glide_get_artm
+
+ subroutine glide_set_artm(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%climate%artm = inarray
+ end subroutine glide_set_artm
+
+ subroutine glide_get_beta(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_beta)*(data%velocity%beta)
+ end subroutine glide_get_beta
+
+ subroutine glide_set_beta(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%beta = inarray/(scale_beta)
+ end subroutine glide_set_beta
+
+ subroutine glide_get_bfricflx(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (1.0)*(data%temper%bfricflx)
+ end subroutine glide_get_bfricflx
+
+ subroutine glide_set_bfricflx(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%temper%bfricflx = inarray/(1.0)
+ end subroutine glide_set_bfricflx
+
+ subroutine glide_get_bheatflx(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_bflx)*(data%temper%bheatflx)
+ end subroutine glide_get_bheatflx
+
+ subroutine glide_set_bheatflx(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%temper%bheatflx = inarray/(scale_bflx)
+ end subroutine glide_set_bheatflx
+
+ subroutine glide_get_bmlt(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_acab)*(data%temper%bmlt)
+ end subroutine glide_get_bmlt
+
+ subroutine glide_set_bmlt(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%temper%bmlt = inarray/(scale_acab)
+ end subroutine glide_set_bmlt
+
+ subroutine glide_get_btemp(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = data%temper%temp(data%general%upn,1:data%general%ewn,1:data%general%nsn)
+ end subroutine glide_get_btemp
+
+ subroutine glide_get_btractx(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_tau)*(data%stress%btractx(:,:))
+ end subroutine glide_get_btractx
+
+ subroutine glide_set_btractx(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%stress%btractx(:,:) = inarray/(scale_tau)
+ end subroutine glide_set_btractx
+
+ subroutine glide_get_btracty(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_tau)*(data%stress%btracty(:,:))
+ end subroutine glide_get_btracty
+
+ subroutine glide_set_btracty(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%stress%btracty(:,:) = inarray/(scale_tau)
+ end subroutine glide_set_btracty
+
+ subroutine glide_get_btrc(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_btrc)*(data%velocity%btrc)
+ end subroutine glide_get_btrc
+
+ subroutine glide_set_btrc(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%btrc = inarray/(scale_btrc)
+ end subroutine glide_set_btrc
+
+ subroutine glide_get_bwat(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (thk0)*(data%temper%bwat)
+ end subroutine glide_get_bwat
+
+ subroutine glide_set_bwat(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%temper%bwat = inarray/(thk0)
+ end subroutine glide_set_bwat
+
+ subroutine glide_get_bwatflx(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (thk0)*(data%temper%bwatflx)
+ end subroutine glide_get_bwatflx
+
+ subroutine glide_set_bwatflx(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%temper%bwatflx = inarray/(thk0)
+ end subroutine glide_set_bwatflx
+
+ subroutine glide_get_calving(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (thk0)*(data%climate%calving)
+ end subroutine glide_get_calving
+
+ subroutine glide_set_calving(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%climate%calving = inarray/(thk0)
+ end subroutine glide_set_calving
+
+ subroutine glide_get_diff_cfl_dt(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = data%numerics%diff_cfl_dt
+ end subroutine glide_get_diff_cfl_dt
+
+ subroutine glide_set_diff_cfl_dt(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+ data%numerics%diff_cfl_dt = inarray
+ end subroutine glide_set_diff_cfl_dt
+
+ subroutine glide_get_diffu(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_diffu)*(data%velocity%diffu)
+ end subroutine glide_get_diffu
+
+ subroutine glide_set_diffu(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%diffu = inarray/(scale_diffu)
+ end subroutine glide_set_diffu
+
+ subroutine glide_get_dthckdtm(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_acab)*(data%geomderv%dthckdtm)
+ end subroutine glide_get_dthckdtm
+
+ subroutine glide_set_dthckdtm(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%geomderv%dthckdtm = inarray/(scale_acab)
+ end subroutine glide_set_dthckdtm
+
+ subroutine glide_get_dusrfdtm(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_acab)*(data%geomderv%dusrfdtm)
+ end subroutine glide_get_dusrfdtm
+
+ subroutine glide_set_dusrfdtm(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%geomderv%dusrfdtm = inarray/(scale_acab)
+ end subroutine glide_set_dusrfdtm
+
+ subroutine glide_get_dynbcmask(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ integer, dimension(:,:), intent(out) :: outarray
+
+ outarray = data%velocity%dynbcmask
+ end subroutine glide_get_dynbcmask
+
+ subroutine glide_set_dynbcmask(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ integer, dimension(:,:), intent(in) :: inarray
+
+ data%velocity%dynbcmask = inarray
+ end subroutine glide_set_dynbcmask
+
+ subroutine glide_get_effecpress(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = data%basal_physics%effecpress
+ end subroutine glide_get_effecpress
+
+ subroutine glide_set_effecpress(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%basal_physics%effecpress = inarray
+ end subroutine glide_set_effecpress
+
+ subroutine glide_get_eus(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = (thk0)*(data%climate%eus)
+ end subroutine glide_get_eus
+
+ subroutine glide_set_eus(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+ data%climate%eus = inarray/(thk0)
+ end subroutine glide_set_eus
+
+ subroutine glide_get_gravity(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = grav
+ end subroutine glide_get_gravity
+
+ subroutine glide_set_gravity(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+! no rescaling here
+ end subroutine glide_set_gravity
+
+ subroutine glide_get_iarea(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = (len0*len0*1.e-6)*(data%geometry%iarea)
+ end subroutine glide_get_iarea
+
+ subroutine glide_set_iarea(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+ data%geometry%iarea = inarray/(len0*len0*1.e-6)
+ end subroutine glide_set_iarea
+
+ subroutine glide_get_iareaf(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = (len0*len0*1.e-6)*(data%geometry%iareaf)
+ end subroutine glide_get_iareaf
+
+ subroutine glide_set_iareaf(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+ data%geometry%iareaf = inarray/(len0*len0*1.e-6)
+ end subroutine glide_set_iareaf
+
+ subroutine glide_get_iareag(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = (len0*len0*1.e-6)*(data%geometry%iareag)
+ end subroutine glide_get_iareag
+
+ subroutine glide_set_iareag(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+ data%geometry%iareag = inarray/(len0*len0*1.e-6)
+ end subroutine glide_set_iareag
+
+ subroutine glide_get_ice_mask(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (1.0)*(data%geometry%ice_mask)
+ end subroutine glide_get_ice_mask
+
+ subroutine glide_set_ice_mask(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%geometry%ice_mask = inarray/(1.0)
+ end subroutine glide_set_ice_mask
+
+ subroutine glide_get_ice_specific_heat(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = shci
+ end subroutine glide_get_ice_specific_heat
+
+ subroutine glide_set_ice_specific_heat(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+! no rescaling here
+ end subroutine glide_set_ice_specific_heat
+
+ subroutine glide_get_ice_thermal_conductivity(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = coni
+ end subroutine glide_get_ice_thermal_conductivity
+
+ subroutine glide_set_ice_thermal_conductivity(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+! no rescaling here
+ end subroutine glide_set_ice_thermal_conductivity
+
+ subroutine glide_get_ivol(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = (thk0*len0*len0*1.e-9)*(data%geometry%ivol)
+ end subroutine glide_get_ivol
+
+ subroutine glide_set_ivol(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+ data%geometry%ivol = inarray/(thk0*len0*len0*1.e-9)
+ end subroutine glide_set_ivol
+
+ subroutine glide_get_kinbcmask(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ integer, dimension(:,:), intent(out) :: outarray
+
+ outarray = data%velocity%kinbcmask(:,:)
+ end subroutine glide_get_kinbcmask
+
+ subroutine glide_set_kinbcmask(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ integer, dimension(:,:), intent(in) :: inarray
+
+ data%velocity%kinbcmask(:,:) = inarray
+ end subroutine glide_set_kinbcmask
+
+ subroutine glide_get_lsurf(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (thk0)*(data%geometry%lsrf)
+ end subroutine glide_get_lsurf
+
+ subroutine glide_set_lsurf(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%geometry%lsrf = inarray/(thk0)
+ end subroutine glide_set_lsurf
+
+ subroutine glide_get_relx(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (thk0)*(data%isostasy%relx)
+ end subroutine glide_get_relx
+
+ subroutine glide_set_relx(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%isostasy%relx = inarray/(thk0)
+ end subroutine glide_set_relx
+
+ subroutine glide_get_rho_ice(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = rhoi
+ end subroutine glide_get_rho_ice
+
+ subroutine glide_set_rho_ice(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+! no rescaling here
+ end subroutine glide_set_rho_ice
+
+ subroutine glide_get_rho_seawater(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = rhoo
+ end subroutine glide_get_rho_seawater
+
+ subroutine glide_set_rho_seawater(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+! no rescaling here
+ end subroutine glide_set_rho_seawater
+
+ subroutine glide_get_seconds_per_year(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(out) :: outarray
+
+ outarray = scyr
+ end subroutine glide_get_seconds_per_year
+
+ subroutine glide_set_seconds_per_year(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), intent(in) :: inarray
+
+! no rescaling here
+ end subroutine glide_set_seconds_per_year
+
+ subroutine glide_get_soft(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_btrc)*(data%velocity%bed_softness)
+ end subroutine glide_get_soft
+
+ subroutine glide_set_soft(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%bed_softness = inarray/(scale_btrc)
+ end subroutine glide_set_soft
+
+ subroutine glide_get_stagthk(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (thk0)*(data%geomderv%stagthck)
+ end subroutine glide_get_stagthk
+
+ subroutine glide_set_stagthk(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%geomderv%stagthck = inarray/(thk0)
+ end subroutine glide_set_stagthk
+
+ subroutine glide_get_surftemp(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = data%climate%artm
+ end subroutine glide_get_surftemp
+
+ subroutine glide_set_surftemp(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%climate%artm = inarray
+ end subroutine glide_set_surftemp
+
+ subroutine glide_get_tauf(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_tau)*(data%basalproc%mintauf)
+ end subroutine glide_get_tauf
+
+ subroutine glide_set_tauf(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%basalproc%mintauf = inarray/(scale_tau)
+ end subroutine glide_set_tauf
+
+ subroutine glide_get_taux(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (1e-3*thk0*thk0/len0)*(data%velocity%tau_x)
+ end subroutine glide_get_taux
+
+ subroutine glide_set_taux(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%tau_x = inarray/(1e-3*thk0*thk0/len0)
+ end subroutine glide_set_taux
+
+ subroutine glide_get_tauy(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (1e-3*thk0*thk0/len0)*(data%velocity%tau_y)
+ end subroutine glide_get_tauy
+
+ subroutine glide_set_tauy(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%tau_y = inarray/(1e-3*thk0*thk0/len0)
+ end subroutine glide_set_tauy
+
+ subroutine glide_get_thk(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (thk0)*(data%geometry%thck)
+ end subroutine glide_get_thk
+
+ subroutine glide_set_thk(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%geometry%thck = inarray/(thk0)
+ end subroutine glide_set_thk
+
+ subroutine glide_get_thkmask(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ integer, dimension(:,:), intent(out) :: outarray
+
+ outarray = data%geometry%thkmask
+ end subroutine glide_get_thkmask
+
+ subroutine glide_set_thkmask(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ integer, dimension(:,:), intent(in) :: inarray
+
+ data%geometry%thkmask = inarray
+ end subroutine glide_set_thkmask
+
+ subroutine glide_get_topg(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (thk0)*(data%geometry%topg)
+ end subroutine glide_get_topg
+
+ subroutine glide_set_topg(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%geometry%topg = inarray/(thk0)
+ end subroutine glide_set_topg
+
+ subroutine glide_get_ubas(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_uvel)*(data%velocity%ubas)
+ end subroutine glide_get_ubas
+
+ subroutine glide_set_ubas(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%ubas = inarray/(scale_uvel)
+ end subroutine glide_set_ubas
+
+ subroutine glide_get_uflx(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_uflx)*(data%velocity%uflx)
+ end subroutine glide_get_uflx
+
+ subroutine glide_set_uflx(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%uflx = inarray/(scale_uflx)
+ end subroutine glide_set_uflx
+
+ subroutine glide_get_unstagbeta(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_beta)*(data%velocity%unstagbeta)
+ end subroutine glide_get_unstagbeta
+
+ subroutine glide_set_unstagbeta(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%unstagbeta = inarray/(scale_beta)
+ end subroutine glide_set_unstagbeta
+
+ subroutine glide_get_usurf(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (thk0)*(data%geometry%usrf)
+ end subroutine glide_get_usurf
+
+ subroutine glide_set_usurf(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%geometry%usrf = inarray/(thk0)
+ end subroutine glide_set_usurf
+
+ subroutine glide_get_vbas(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_uvel)*(data%velocity%vbas)
+ end subroutine glide_get_vbas
+
+ subroutine glide_set_vbas(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%vbas = inarray/(scale_uvel)
+ end subroutine glide_set_vbas
+
+ subroutine glide_get_vflx(data,outarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(out) :: outarray
+
+ outarray = (scale_uflx)*(data%velocity%vflx)
+ end subroutine glide_get_vflx
+
+ subroutine glide_set_vflx(data,inarray)
+ use glimmer_scales
+ use glimmer_paramets
+ use glide_types
+ implicit none
+ type(glide_global_type) :: data
+ real(dp), dimension(:,:), intent(in) :: inarray
+
+ data%velocity%vflx = inarray/(scale_uflx)
+ end subroutine glide_set_vflx
+
+
+end module glide_io
diff --git a/components/cism/glimmer-cism/libglide/glide_lithot.F90 b/components/cism/glimmer-cism/libglide/glide_lithot.F90
new file mode 100644
index 0000000000..13b5e7b3cf
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_lithot.F90
@@ -0,0 +1,176 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_lithot.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+! module for temperature calculations in the upper lithosphere
+
+ !TODO - Test module glide_lithot (1D version) in parallel code.
+ ! 3D version probably will not work in parallel
+
+module glide_lithot
+
+ implicit none
+
+contains
+
+ subroutine init_lithot(model)
+ use glide_types
+ use glide_setup
+ use glimmer_paramets, only: tim0
+ use glimmer_log
+ use glide_lithot1d
+ use glide_lithot3d
+ implicit none
+ type(glide_global_type),intent(inout) :: model !> model instance
+
+ ! local variables
+ integer k
+ real(dp) :: factor
+
+ ! allocate memory for common arrays
+ allocate(model%lithot%deltaz(model%lithot%nlayer)); model%lithot%deltaz = 0.0
+ allocate(model%lithot%zfactors(3,model%lithot%nlayer)); model%lithot%zfactors = 0.0
+
+ ! set up vertical grid
+ do k=1,model%lithot%nlayer
+ model%lithot%deltaz(k) = (1.d0 - glide_calc_sigma(real((model%lithot%nlayer-k),dp)/real((model%lithot%nlayer-1),dp), 2.d0)) &
+ *model%lithot%rock_base
+ end do
+
+ ! calculate diffusion coefficient
+ model%lithot%diffu = model%lithot%con_r/(model%lithot%rho_r*model%lithot%shc_r)
+
+ ! set up factors for vertical finite differences
+ do k=2,model%lithot%nlayer-1
+ model%lithot%zfactors(1,k) = model%lithot%diffu*tim0*model%numerics%dt / &
+ ((model%lithot%deltaz(k)-model%lithot%deltaz(k-1)) * (model%lithot%deltaz(k+1)-model%lithot%deltaz(k-1)))
+ model%lithot%zfactors(2,k) = model%lithot%diffu*tim0*model%numerics%dt / &
+ ((model%lithot%deltaz(k+1)-model%lithot%deltaz(k)) * (model%lithot%deltaz(k)-model%lithot%deltaz(k-1)))
+ model%lithot%zfactors(3,k) = model%lithot%diffu*tim0*model%numerics%dt / &
+ ((model%lithot%deltaz(k+1)-model%lithot%deltaz(k)) * (model%lithot%deltaz(k+1)-model%lithot%deltaz(k-1)))
+ end do
+ k = model%lithot%nlayer
+ model%lithot%zfactors(:,k) = 0.5*model%lithot%diffu*tim0*model%numerics%dt / &
+ (model%lithot%deltaz(k)-model%lithot%deltaz(k-1))**2
+
+ !TODO - Make sure the sign is correct for the geothermal flux.
+ !NOTE: CISM convention is that geot is positive down, so geot < 0 for upward geothermal flux
+
+ if (model%options%is_restart == RESTART_FALSE) then
+ ! set initial temp distribution to thermal gradient
+ factor = model%paramets%geot / model%lithot%con_r
+ do k=1,model%lithot%nlayer
+ model%lithot%temp(:,:,k) = model%lithot%surft + model%lithot%deltaz(k)*factor
+ end do
+ end if
+
+ if (model%lithot%num_dim==1) then
+ call init_lithot1d(model)
+ else if (model%lithot%num_dim==3) then
+ call init_lithot3d(model)
+ else
+ call write_log('Error, init_lithot: Wrong number of dimensions',GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ end subroutine init_lithot
+
+ subroutine spinup_lithot(model)
+ use parallel
+ use glide_types
+ use glimmer_log
+ use glide_mask
+ implicit none
+ type(glide_global_type),intent(inout) :: model !> model instance
+
+ integer t
+
+ if (model%options%is_restart == RESTART_FALSE .and. model%lithot%numt > 0) then
+ call write_log('Spinning up GTHF calculations',type=GM_INFO)
+ call not_parallel(__FILE__,__LINE__)
+ do t=1,model%lithot%numt
+ call calc_lithot(model)
+ end do
+
+ end if
+ end subroutine spinup_lithot
+
+ subroutine calc_lithot(model)
+ use glide_types
+ use glimmer_log
+ use glide_lithot1d
+ use glide_lithot3d
+ implicit none
+ type(glide_global_type),intent(inout) :: model !> model instance
+
+ if (model%lithot%num_dim==1) then
+ call calc_lithot1d(model)
+ else if (model%lithot%num_dim==3) then
+ call calc_lithot3d(model)
+ else
+ call write_log('Wrong number of dimensions.',GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ call calc_geoth(model)
+
+ end subroutine calc_lithot
+
+ subroutine calc_geoth(model)
+ !> calculate geothermal heat flux
+ use glide_types
+ implicit none
+ type(glide_global_type),intent(inout) :: model !> model instance
+
+ real(dp) factor
+
+ factor = model%lithot%con_r/(model%lithot%deltaz(2)-model%lithot%deltaz(1))
+ model%temper%bheatflx(:,:) = factor*(model%lithot%temp(:,:,2)-model%lithot%temp(:,:,1))
+
+ end subroutine calc_geoth
+
+ subroutine finalise_lithot(model)
+ use glide_types
+ use glide_lithot1d
+ use glimmer_log
+ use glide_lithot3d
+ implicit none
+ type(glide_global_type),intent(inout) :: model !> model instance
+
+ deallocate(model%lithot%deltaz)
+ deallocate(model%lithot%zfactors)
+
+ if (model%lithot%num_dim==1) then
+ call finalise_lithot1d(model)
+ else if (model%lithot%num_dim==3) then
+ call finalise_lithot3d(model)
+ else
+ call write_log('Wrong number of dimensions.',GM_FATAL,__FILE__,__LINE__)
+ end if
+ end subroutine finalise_lithot
+
+end module glide_lithot
diff --git a/components/cism/glimmer-cism/libglide/glide_lithot1d.F90 b/components/cism/glimmer-cism/libglide/glide_lithot1d.F90
new file mode 100644
index 0000000000..1c90451e3e
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_lithot1d.F90
@@ -0,0 +1,132 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_lithot1d.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_mask.inc"
+
+! module for 1D temperature calculations in the upper lithosphere
+
+!TODO - Test glide_lithot1d in parallel. It is local and should be parallel-friendly.
+
+module glide_lithot1d
+
+ implicit none
+
+contains
+
+ subroutine init_lithot1d(model)
+
+ use glide_types
+ implicit none
+ type(glide_global_type),intent(inout) :: model !> model instance
+
+ ! allocate memory for 1D code
+ allocate(model%lithot%rhs(model%lithot%nlayer))
+ allocate(model%lithot%subd(model%lithot%nlayer))
+ allocate(model%lithot%diag(model%lithot%nlayer))
+ allocate(model%lithot%supd(model%lithot%nlayer))
+
+ ! setup coefficient matrix
+ model%lithot%subd(:) = - model%lithot%zfactors(1,:)
+ model%lithot%diag(:) = 1. + model%lithot%zfactors(2,:)
+ model%lithot%supd(:) = - model%lithot%zfactors(3,:)
+ ! and the boundary conditions
+ ! top face
+ ! simply match air temperature where no ice and basal temperature where ice
+ model%lithot%subd(1) = 0.
+ model%lithot%diag(1) = 1.
+ model%lithot%supd(1) = 0.
+ ! bottom face
+ ! keep constant
+ model%lithot%subd(model%lithot%nlayer) = 0.
+ model%lithot%diag(model%lithot%nlayer) = 1.
+ model%lithot%supd(model%lithot%nlayer) = 0.
+ end subroutine init_lithot1d
+
+ subroutine calc_lithot1d(model)
+ use glide_types
+ use glimmer_utils, only: tridiag
+ !use glide_mask
+ implicit none
+ type(glide_global_type),intent(inout) :: model !> model instance
+
+ integer i,j,k
+
+ !Note - I think these loops can be left as is for parallel code.
+ ! Local calculation, so no issues with computing in halo cells.
+
+ ! loop over grid
+ do j=1,model%general%nsn
+ do i=1,model%general%ewn
+ ! calculate RHS for upper BC
+ if (GLIDE_IS_GROUND(model%geometry%thkmask(i,j)) .and. .not. GLIDE_IS_THIN(model%geometry%thkmask(i,j)) ) then
+ model%lithot%rhs(1) = model%temper%temp(model%general%upn,i,j) ! ice basal temperature
+ model%lithot%mask(i,j) = .true.
+ else
+ if (model%lithot%mask(i,j)) then
+ if (GLIDE_IS_OCEAN(model%geometry%thkmask(i,j))) then
+ model%lithot%rhs(1) = model%lithot%mart
+ else if (GLIDE_IS_LAND(model%geometry%thkmask(i,j))) then
+ model%lithot%rhs(1) = model%climate%artm(i,j) ! air temperature outside ice sheet
+ end if
+ end if
+ end if
+
+ if (model%lithot%mask(i,j)) then
+ ! calculate RHS for rest
+ do k=2,model%lithot%nlayer-1
+ model%lithot%rhs(k) = - model%lithot%subd(k)*model%lithot%temp(i,j,k-1) &
+ + (2.-model%lithot%diag(k))*model%lithot%temp(i,j,k) &
+ - model%lithot%supd(k)*model%lithot%temp(i,j,k+1)
+ end do
+ model%lithot%rhs(model%lithot%nlayer) = model%lithot%temp(i,j,model%lithot%nlayer)
+
+ ! solve tri-diagonal matrix eqn
+ call tridiag(model%lithot%subd(1:), &
+ model%lithot%diag(:), &
+ model%lithot%supd(:model%lithot%nlayer), &
+ model%lithot%temp(i,j,:) , &
+ model%lithot%rhs(:))
+ end if
+ end do
+ end do
+ end subroutine calc_lithot1d
+
+ subroutine finalise_lithot1d(model)
+ use glide_types
+ implicit none
+ type(glide_global_type),intent(inout) :: model !> model instance
+
+ deallocate(model%lithot%rhs)
+ deallocate(model%lithot%subd)
+ deallocate(model%lithot%diag)
+ deallocate(model%lithot%supd)
+ end subroutine finalise_lithot1d
+
+end module glide_lithot1d
diff --git a/components/cism/glimmer-cism/libglide/glide_lithot3d.F90 b/components/cism/glimmer-cism/libglide/glide_lithot3d.F90
new file mode 100644
index 0000000000..1d7ffae409
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_lithot3d.F90
@@ -0,0 +1,238 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_lithot3d.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_mask.inc"
+
+! module for 3D temperature calculations in the upper lithosphere
+! (serial only)
+
+ !TODO - Support a 3D lithosphere calculation in parallel? Not easy to do.
+
+module glide_lithot3d
+
+ implicit none
+
+ private
+ public :: init_lithot3d, calc_lithot3d, finalise_lithot3d
+
+
+contains
+
+ subroutine init_lithot3d(model)
+
+ use glide_types
+ use glimmer_paramets, only: len0,tim0
+ implicit none
+ type(glide_global_type),intent(inout) :: model ! model instance
+
+ ! local variables
+ integer i,j,k,r,icount,jcount,ewn,nsn
+
+ ! allocate memory for 3D code
+ !TODO - Move to glide_allocarr?
+
+ ewn=model%general%ewn
+ nsn=model%general%nsn
+
+ call new_sparse_matrix(ewn*nsn*model%lithot%nlayer, &
+ (model%lithot%nlayer-1)*ewn*nsn*7+ewn*nsn+1,model%lithot%fd_coeff)
+ call new_sparse_matrix(ewn*nsn*model%lithot%nlayer, &
+ (model%lithot%nlayer-1)*ewn*nsn*7+ewn*nsn+1,model%lithot%fd_coeff_slap)
+ allocate(model%lithot%rhs(model%lithot%nlayer*ewn*nsn))
+ allocate(model%lithot%answer(model%lithot%nlayer*ewn*nsn))
+ model%lithot%mxnelt = 20 * model%lithot%nlayer*ewn*nsn
+
+ !TODO - Deallocate these arrays
+ allocate(model%lithot%rwork(model%lithot%mxnelt))
+ allocate(model%lithot%iwork(model%lithot%mxnelt))
+
+ ! set up factors for horizontal finite differences
+ model%lithot%xfactor = 0.5*model%lithot%diffu*tim0*model%numerics%dt / (model%numerics%dew*len0)**2
+ model%lithot%yfactor = 0.5*model%lithot%diffu*tim0*model%numerics%dt / (model%numerics%dns*len0)**2
+
+
+ ! calculate finite difference coefficient matrix
+ ! top face
+ ! simply match air temperature where no ice and basal temperature where ice
+ k = 1
+ do j=1,model%general%nsn
+ do i=1,model%general%ewn
+ r = linearise(model,i,j,k)
+ call sparse_insert_val(model%lithot%fd_coeff,r,r, 1.d0)
+ end do
+ end do
+ do k=2, model%lithot%nlayer-1
+ do j=1,model%general%nsn
+ do i=1,model%general%ewn
+ icount = 0
+ jcount = 0
+ r = linearise(model,i,j,k)
+ ! i-1,j,k
+ if (i /= 1) then
+ call sparse_insert_val(model%lithot%fd_coeff,r,linearise(model,i-1,j,k), -model%lithot%xfactor)
+ icount = icount + 1
+ end if
+ ! i+1, j, k
+ if (i /= model%general%ewn) then
+ call sparse_insert_val(model%lithot%fd_coeff,r,linearise(model,i+1,j,k), -model%lithot%xfactor)
+ icount = icount + 1
+ end if
+ ! i,j-1,k
+ if (j /= 1) then
+ call sparse_insert_val(model%lithot%fd_coeff,r,linearise(model,i,j-1,k), -model%lithot%yfactor)
+ jcount = jcount + 1
+ end if
+ ! i,j+1,k
+ if (j /= model%general%nsn) then
+ call sparse_insert_val(model%lithot%fd_coeff,r,linearise(model,i,j+1,k), -model%lithot%yfactor)
+ jcount = jcount + 1
+ end if
+ ! i,j,k-1
+ call sparse_insert_val(model%lithot%fd_coeff,r,linearise(model,i,j,k-1), -model%lithot%zfactors(1,k))
+ ! i,j,k+1
+ call sparse_insert_val(model%lithot%fd_coeff,r,linearise(model,i,j,k+1), -model%lithot%zfactors(3,k))
+ ! i,j,k
+ call sparse_insert_val(model%lithot%fd_coeff,r,r, &
+ icount*model%lithot%xfactor + jcount*model%lithot%yfactor + model%lithot%zfactors(2,k) + 1.)
+ end do
+ end do
+ end do
+
+ ! bottom face
+ ! keep constant
+ k = model%lithot%nlayer
+ do j=1,model%general%nsn
+ do i=1,model%general%ewn
+ r = linearise(model,i,j,k)
+ call sparse_insert_val(model%lithot%fd_coeff,r,r, 1.d0)
+ end do
+ end do
+
+ ! convert from SLAP Triad to SLAP Column format
+ call copy_sparse_matrix(model%lithot%fd_coeff,model%lithot%fd_coeff_slap)
+ call ds2y(model%general%nsn*model%general%ewn*model%lithot%nlayer,model%lithot%fd_coeff_slap%nonzeros, &
+ model%lithot%fd_coeff_slap%col,model%lithot%fd_coeff_slap%row,model%lithot%fd_coeff_slap%val, 0)
+
+ ! initialise result vector
+ do k=1,model%lithot%nlayer
+ do j=1,model%general%nsn
+ do i=1,model%general%ewn
+ model%lithot%answer(linearise(model,i,j,k)) = model%lithot%temp(i,j,k)
+ end do
+ end do
+ end do
+
+ end subroutine init_lithot3d
+
+ subroutine calc_lithot3d(model)
+ use glide_types
+ use glide_stop
+ use glimmer_log
+ implicit none
+ type(glide_global_type),intent(inout) :: model ! model instance
+
+ integer i,j,k,r
+ integer iter
+ real(dp) err
+ real(dp), parameter :: tol = 1.0d-12
+ integer, parameter :: isym = 0, itol = 2, itmax = 101
+ integer :: ierr
+
+ ! calculate RHS
+ call sparse_matrix_vec_prod(model%lithot%fd_coeff,model%lithot%answer,model%lithot%rhs)
+ model%lithot%rhs = -model%lithot%rhs + 2. * model%lithot%answer
+ ! calc RHS on upper boundary
+ k = 1
+ do j=1,model%general%nsn
+ do i=1,model%general%ewn
+ r = linearise(model,i,j,k)
+ if (GLIDE_IS_GROUND(model%geometry%thkmask(i,j)) .and. .not. GLIDE_IS_THIN(model%geometry%thkmask(i,j)) ) then
+ model%lithot%rhs(r) = model%temper%temp(model%general%upn,i,j) ! ice basal temperature
+ model%lithot%mask(i,j) = .true.
+ else
+ if (model%lithot%mask(i,j)) then
+ if (GLIDE_IS_OCEAN(model%geometry%thkmask(i,j))) then
+ model%lithot%rhs(r) = model%lithot%mart
+ else if (GLIDE_IS_LAND(model%geometry%thkmask(i,j))) then
+ model%lithot%rhs(r) = model%climate%artm(i,j) ! air temperature outside ice sheet
+ end if
+ end if
+ end if
+ end do
+ end do
+
+ ! solve matrix equation
+ call dslucs(model%general%nsn*model%general%ewn*model%lithot%nlayer, model%lithot%rhs, model%lithot%answer, &
+ model%lithot%fd_coeff_slap%nonzeros, model%lithot%fd_coeff_slap%col,model%lithot%fd_coeff_slap%row, &
+ model%lithot%fd_coeff_slap%val, isym,itol,tol,itmax,iter,err,ierr,0, &
+ model%lithot%rwork, model%lithot%mxnelt, model%lithot%iwork, model%lithot%mxnelt)
+
+ if (ierr /= 0) then
+ print *, 'pcg error ', ierr, itmax, iter
+ write(*,*) model%numerics%time
+ call glide_finalise(model,.true.)
+ call close_log
+ stop
+ end if
+
+ ! de-linearise results
+ do k=1, model%lithot%nlayer
+ do j=1,model%general%nsn
+ do i=1,model%general%ewn
+ model%lithot%temp(i,j,k) = model%lithot%answer(linearise(model,i,j,k))
+ end do
+ end do
+ end do
+
+ end subroutine calc_lithot3d
+
+ subroutine finalise_lithot3d(model)
+ use glide_types
+ implicit none
+ type(glide_global_type),intent(inout) :: model ! model instance
+
+ call del_sparse_matrix(model%lithot%fd_coeff)
+ call del_sparse_matrix(model%lithot%fd_coeff_slap)
+ deallocate(model%lithot%rhs)
+ deallocate(model%lithot%answer)
+ end subroutine finalise_lithot3d
+
+ function linearise(model,i,j,k)
+ use glide_types
+ implicit none
+ type(glide_global_type),intent(in) :: model
+ integer, intent(in) :: i,j,k
+ integer :: linearise
+
+ linearise = i + (j-1)*model%general%ewn + (k-1)*model%general%ewn*model%general%nsn
+ end function linearise
+
+
+end module glide_lithot3d
diff --git a/components/cism/glimmer-cism/libglide/glide_mask.F90 b/components/cism/glimmer-cism/libglide/glide_mask.F90
new file mode 100644
index 0000000000..23a33e239c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_mask.F90
@@ -0,0 +1,589 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_mask.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_mask.inc"
+
+module glide_mask
+
+ ! masking ice thicknesses
+
+ use glimmer_global, only : dp
+ use nan_mod, only : NaN
+
+ implicit none
+
+contains
+
+!TODO - Remove iarea and ivol calculation? They can be computed elsewhere.
+
+!TODO - Write a new subroutine (in addition to glide_set_mask) to compute mask for staggered grid?
+! This subroutine is now called from glissade_velo_driver with stagthck and stagtopg
+! as input arguments.
+
+ subroutine glide_set_mask(numerics, thck, topg, ewn, nsn, eus, mask, iarea, ivol, exec_serial)
+
+ use parallel
+ use glide_types
+ use glimmer_physcon, only : rhoi, rhoo
+ implicit none
+
+ type(glide_numerics), intent(in) :: numerics !Numerical parameters structure
+ real(dp), dimension(:,:), intent(in) :: thck ! Ice thickness
+ real(dp), dimension(:,:), intent(in) :: topg ! Bedrock topography (not lower surface!)
+ integer, intent(in) :: ewn, nsn ! Grid size
+ real(dp), intent(in) :: eus ! Sea level
+ integer, dimension(:,:), intent(inout) :: mask ! Output mask
+ real(dp), intent(inout), optional :: ivol, iarea ! Area and volume of ice
+ logical, optional :: exec_serial !JEFF If executing in serial in MPI program.
+
+ ! local variables
+ integer ew,ns
+ real(dp), parameter :: con = - rhoi / rhoo
+ logical :: exec_serial_flag
+
+ !Note - This array may not be needed, at least in parallel.
+
+ ! Create an array to "fake" the boundaries of the mask so that boundary
+ ! finding can work even on the boundaries of the real mask.
+
+ integer, dimension(0:ewn+1,0:nsn+1) :: maskWithBounds;
+
+ !TODO - What is the exec_serial option? Is it still needed?
+
+ !JEFF Handle exec_serial optional parameter
+ if ( present(exec_serial) ) then
+ exec_serial_flag = exec_serial
+ else
+ ! Default to off
+ exec_serial_flag = .FALSE.
+ endif
+
+ mask = 0
+
+ if (present(iarea)) iarea = 0.d0
+ if (present(ivol)) ivol = 0.d0
+
+!Note - This mask is confusing. Wondering if we should replace it by a series of logical masks.
+
+! Would need the following:
+! glide_mask_has_ice = 1
+! glide_mask_thin_ice = 3
+! glide_mask_ocean = 4 (below sea level, with or without ice)
+! glide_mask_land = 8 (complement of glide_mask_ocean)
+! glide_mask_grounding_line = 16 (could define in terms of margin and has ice?)
+! glide_mask_margin = 32 (has_ice + at least one neighbor with no ice)
+! glide_mask_dirichlet_bc = 64
+! glide_mask_comp_domain_bnd = 128 (no longer needed with new global BC?)
+! glide_no_ice (complement of glide_has_ice)
+! glide_is_thin
+! glide_is_ocean (ocean + no_ice; change to glide_ocean_icefree or remove?)
+! glide_is_land (land + no_ice; change to glide_land_icefree or remove?)
+! glide_is_ground (land + has_ice)
+! glide_is_float (ocean + has_ice)
+! glide_is_grounding_line (just inside or just outside? Used only in glide_ground)
+! glide_is_margin
+! glide_is_land_margin (margin + land + has_ice)
+! glide_is_calving (margin + ocean + has_ice; change the name to is_marine_margin?)
+! glide_is_marine_ice_edge (margin + (float or GL); may not be needed)
+! glide_is_dirichlet_boundary
+! glide_is_comp_domain_bnd (may not be needed with new global BC?)
+!
+! If we keep the present structure, could change glide_is_land to glide_icefree_land,
+! glide_is_ocean to glide_icefree_ocean
+! Could get by with fewer masks in the code by removing some combinations
+! Could remove *BITS
+
+ !Identify points with any ice
+ where (thck > 0.d0)
+ mask = ior(mask, GLIDE_MASK_HAS_ICE) ! GLIDE_MASK_HAS_ICE = 1; see glide_mask.inc
+ endwhere
+
+ !Identify points where the ice is below the ice dynamics limit
+ where (thck > 0.d0 .and. thck < numerics%thklim)
+ mask = ior(mask, GLIDE_MASK_THIN_ICE) ! GLIDE_MASK_THIN_ICE = 3
+ endwhere
+
+ !Identify points where the ice is floating or where there is open ocean
+ where (topg - eus < con * thck)
+ mask = ior(mask, GLIDE_MASK_OCEAN) ! GLIDE_MASK_OCEAN = 8
+ elsewhere
+ mask = ior(mask, GLIDE_MASK_LAND) ! GLIDE_MASK_LAND = 4
+ endwhere
+
+ if (present(iarea) .and. present(ivol)) then
+ call get_area_vol(thck, numerics%dew, numerics%dns, numerics%thklim, iarea, ivol, exec_serial_flag)
+ end if
+
+ !TODO - Replace the following with a halo call for 'mask', with appropriate global BC?
+
+ maskWithBounds = 0
+ maskWithBounds(1:ewn, 1:nsn) = MASK
+ maskWithBounds(0,1:nsn) = mask(1,:)
+ maskWithBounds(1:ewn,0) = mask(:,1)
+ maskWithBounds(ewn+1,1:nsn) = mask(ewn,:)
+ maskWithBounds(1:ewn,nsn+1) = mask(:,nsn)
+ maskWithBounds(0,0) = mask(1,1)
+ maskWithBounds(ewn+1,nsn+1) = mask(ewn,nsn)
+ maskWithBounds(0,nsn+1) = mask(1,nsn)
+ maskWithBounds(ewn+1,0) = mask(ewn,1)
+
+ ! finding boundaries
+
+ !Note: If halo cells are present, maskWithBounds array may not be needed; can replace with mask array.
+ ! Not sure what happens here when we're computing a mask on the velocity grid.
+
+ do ns = 1,nsn
+ do ew = 1,ewn
+ !Find the grounding line
+ if (GLIDE_IS_GROUND(MASK(ew,ns))) then ! land + has_ice
+ if (GLIDE_IS_FLOAT(maskWithBounds(ew-1,ns)) .or. &
+ GLIDE_IS_FLOAT(maskWithBounds(ew+1,ns)) .or. &
+ GLIDE_IS_FLOAT(maskWithBounds(ew,ns-1)) .or. &
+ GLIDE_IS_FLOAT(maskWithBounds(ew,ns+1))) then
+ MASK(ew,ns) = ior(MASK(ew,ns),GLIDE_MASK_GROUNDING_LINE)
+ end if
+ end if
+
+ ! Ice margin
+ ! *tb* A point is now masked even if it touches the ocean on one corner.
+ if ( GLIDE_HAS_ICE(mask(ew, ns)) .and. &
+ (GLIDE_NO_ICE(maskWithBounds(ew-1,ns)) .or. GLIDE_NO_ICE(maskWithBounds(ew+1,ns)) .or. &
+ GLIDE_NO_ICE(maskWithBounds(ew,ns-1)) .or. GLIDE_NO_ICE(maskWithBounds(ew,ns+1)) .or. &
+ GLIDE_NO_ICE(maskWithBounds(ew-1,ns-1)) .or. GLIDE_NO_ICE(maskWithBounds(ew-1,ns+1)) .or. &
+ GLIDE_NO_ICE(maskWithBounds(ew+1,ns-1)) .or. GLIDE_NO_ICE(maskWithBounds(ew+1,ns+1)))) then
+ MASK(ew,ns) = ior(MASK(ew,ns),GLIDE_MASK_MARGIN)
+ end if
+
+! The GLIDE_MASK_COMP_DOMAIN_BND condition is currently used in glam_strs2.F90.
+
+ !Mark domain boundaries
+ !if (ns == 1 .or. ns == nsn .or. ew == 1 .or. ew == ewn) then
+ if (parallel_boundary(ew,ewn,ns,nsn)) then
+! SFP: commenting out for now, while trying to get periodic bcs working
+! mask(ew, ns) = ior(mask(ew, ns), GLIDE_MASK_COMP_DOMAIN_BND)
+ end if
+ end do
+ end do
+
+ !JEFF Don't call halo update if running in serial mode
+ !WHL - I think the halo update will now work in serial mode.
+ if (.NOT. exec_serial_flag) then
+ call parallel_halo(mask)
+ endif
+
+ end subroutine glide_set_mask
+
+ subroutine augment_kinbc_mask(mask, kinbcmask)
+
+ ! Augments the Glide mask with the location of kinematic (dirichlet) boundary
+ ! conditions. These locations cannot be determined by the model a priori, and
+ ! must be specified through a field in a NetCDF file.
+ integer, dimension(:,:), target :: mask
+ integer, dimension(:,:) :: kinbcmask
+
+ integer, dimension(:,:), pointer :: maskp
+
+ !Because the kinematic boundary conditions are specified on the staggered grid,
+ !there may be a size mismatch here depending on whether we are computing a mask
+ !for the staggered grid.
+ if (size(mask, 1) /= size(kinbcmask, 1)) then
+ maskp => mask(1:size(mask,1) - 1, 1:size(mask,2) - 1)
+ else
+ maskp => mask
+ end if
+
+ where (kinbcmask /= 0)
+ maskp = ior(maskp, GLIDE_MASK_DIRICHLET_BC)
+ endwhere
+ end subroutine augment_kinbc_mask
+
+ subroutine get_area_vol(thck, dew, dns, thklim, iarea, ivol, exec_serial)
+ use parallel
+ implicit none
+ real(dp), dimension(:,:) :: thck
+ real(dp) :: dew, dns, thklim
+ real(dp) :: iarea, ivol, sum(2)
+ logical :: exec_serial
+
+ integer :: i,j
+
+ do i = 1+lhalo, size(thck,1)-uhalo
+ do j = 1+lhalo, size(thck,2)-uhalo
+ if (thck(i,j) > thklim ) then
+ iarea = iarea + 1
+ ivol = ivol + thck(i,j)
+ end if
+ end do
+ end do
+
+ iarea = iarea * dew * dns
+ ivol = ivol * dew * dns
+
+ if (.NOT. exec_serial) then
+ sum(1) = iarea
+ sum(2) = ivol
+ call global_sum(sum)
+ iarea = sum(1)
+ ivol = sum(2)
+ endif
+
+ end subroutine get_area_vol
+
+ subroutine calc_iareaf_iareag(dew, dns, mask, iareaf, iareag, exec_serial)
+
+ use parallel
+
+ implicit none
+ real(dp), intent(in) :: dew, dns
+ real(dp), intent(out) :: iareaf, iareag
+ integer, dimension(:,:), intent(in) :: mask
+ logical, optional :: exec_serial ! If executing in serial in MPI program.
+
+ integer :: i,j
+ logical :: exec_serial_flag
+ real(dp) :: sum(2)
+
+ !TODO - exec_serial option may not be needed
+ if ( present(exec_serial) ) then
+ exec_serial_flag = exec_serial
+ else
+ ! Default to off
+ exec_serial_flag = .FALSE.
+ endif
+
+ iareaf = 0.d0
+ iareag = 0.d0
+
+ !loop over locally owned scalars
+ do j = 1+lhalo, size(mask,2)-uhalo
+ do i = 1+lhalo, size(mask,1)-uhalo
+ if (GLIDE_IS_FLOAT(mask(i,j))) then
+ iareaf = iareaf + dew * dns
+ else if(GLIDE_IS_GROUND_OR_GNDLINE(mask(i,j))) then
+ iareag = iareag + dew * dns
+ end if
+ end do
+ end do
+
+ if (.NOT. exec_serial_flag) then
+ sum(1) = iareaf
+ sum(2) = iareag
+ call global_sum(sum)
+ iareaf = sum(1)
+ iareag = sum(2)
+ endif
+
+ end subroutine calc_iareaf_iareag
+
+ subroutine glide_marine_margin_normal(thck, mask, marine_bc_normal, exec_serial)
+
+ !TODO - Remove subroutine glide_marine_margin_normal? Old PBJ routine.
+ ! Also can remove calc_normal_45deg
+
+ use parallel
+ use glimmer_physcon, only:pi
+ implicit none
+ !> This subroutine derives from the given mask the normal to an ice shelf
+ !> each point on the marine margin.
+ real(dp), dimension(:,:), intent(in) :: thck
+ integer, dimension(:,:), intent(in) :: mask
+ real(dp), dimension(:,:), intent(out) :: marine_bc_normal
+ logical, optional :: exec_serial !JEFF If executing in serial in MPI program.
+
+ integer :: i, j, dx, dy, k
+ logical :: exec_serial_flag
+
+ real(dp), dimension(size(thck,1), size(thck,2)) :: direction_x, direction_y
+
+ real(dp), dimension(-1:1, -1:1) :: angle_lookup
+
+ !JEFF Handle exec_serial optional parameter
+ if ( present(exec_serial) ) then
+ exec_serial_flag = exec_serial
+ else
+ ! Default to off
+ exec_serial_flag = .FALSE.
+ endif
+
+ !direction_y = -1 0 1 !direction_x =
+ angle_lookup(-1, :) = (/ 3*pi/4, pi/2, pi/4 /) !-1
+ angle_lookup( 0, :) = (/ pi, 0D0, 2*pi /) ! 0
+ angle_lookup( 1, :) = (/ 5*pi/4, 3*pi/2, 7*pi/4 /) ! 1
+ call upwind_from_mask(mask, direction_x, direction_y, exec_serial_flag)
+
+ !Set up a thickness variable with "ghost cells" so that we don't go out
+ !of bounds with the vectorized operation below
+ !thckWithBounds(1:size(thck,1), 1:size(thck,2)) = thck
+ !thckWithBounds(:,0) = thckWithBounds(:,1)
+ !thckWithBounds(0,:) = thckWithBounds(1,:)
+ !thckWithBounds(size(thck,1)+1,:) = thckWithBounds(size(thck,1),:)
+ !thckWithBounds(:,size(thck,2)+1) = thckWithBounds(:,size(thck,2))
+ do i = 1, size(mask, 1)
+ do j = 1, size(mask, 2)
+ if (GLIDE_IS_CALVING(mask(i,j))) then
+ dx = int(direction_x(i,j))
+ dy = int(direction_y(i,j))
+ if (dx == 0 .and. dy == 0) then
+ write(*,*)"A shelf front point has been identified at:"
+ write(*,*)"x = ",i
+ write(*,*)"y = ",j
+ write(*,*)"But neither x nor y derivatives have been marked as upwinded."
+ write(*,*)"This should never happen, if this error appears it is a bug"
+ write(*,*)"and should be reported."
+ write(*,*)"The mask around this point follows:"
+ write(*,*)"--------------------------"
+
+ !Write a header row with a * in the column corresponding to the center
+ do k = -4, 4
+ if (k==0) then
+ write(*,"(A)",advance="no")" *"
+ else if (i+k > 0 .and. i+k <= size(mask,1)) then
+ write(*,"(A)",advance="no")" "
+ end if
+ end do
+ write(*,*)
+
+ do k=4, -4, -1
+ if (j+k > 0 .and. j+k <= size(mask, 2)) then
+ if (k == 0) then
+ write(*,*) "*", mask(max(1,i-4):min(size(mask,1),i+4),j+k)
+ else
+ write(*,*) " ", mask(max(1,i-4):min(size(mask,1),i+4),j+k)
+ end if
+ end if
+ end do
+ write(*,*)"--------------------------"
+ write(*,*)"Have a nice day!"
+ !stop
+ end if
+ marine_bc_normal(i,j) = angle_lookup(dx, dy)
+ !marine_bc_normal(i,j) = calc_normal_45deg(thckWithBounds(i-1:i+1,j-1:j+1))
+ else
+ marine_bc_normal(i,j) = NaN
+ end if
+ end do
+ end do
+ if (.NOT. exec_serial_flag) then
+ call parallel_halo(marine_bc_normal)
+ endif
+ end subroutine
+
+ function calc_normal_45deg(thck3x3)
+ use glimmer_physcon, only: pi
+
+ !> Computes the angle of the normal vector, in radians, for the given
+ !> 3x3 segment of ice geometry.
+ !> The normal is given in increments of 45 degrees (no nicer
+ !> interpolation is currently done)
+ !> This is based on the Payne and Price GLAM code, if/when this is
+ !> integrated into CISM it should probably be refactored to use this.
+ real(dp), dimension(3,3) :: thck3x3
+
+ real(dp) :: calc_normal_45deg
+
+ real(dp), dimension(3,3) :: mask, maskcorners
+ real(dp), dimension(3,3) :: thckmask
+ real(dp), dimension(3) :: testvect
+ real(dp) :: phi, deg2rad
+ integer :: loc_latbc
+
+ deg2rad = pi / 180.0d0
+ loc_latbc = 0
+ phi = 0.d0
+ mask(:,1) = (/ 0.0d0, 180.0d0, 0.0d0 /)
+ mask(:,2) = (/ 270.0d0, 0.0d0, 90.0d0 /)
+ mask(:,3) = (/ 0.0d0, 360.0d0, 0.0d0 /)
+ maskcorners(:,1) = (/ 225.0d0, 0.0d0, 135.0d0 /)
+ maskcorners(:,2) = (/ 0.0d0, 0.0d0, 0.0d0 /)
+ maskcorners(:,3) = (/ 315.0d0, 0.0d0, 45.0d0 /)
+
+ ! specify new value of 'loc' vector such that fwd/bwd diffs. are set up correctly in sparse matrix
+ ! when function 'fillsprsebndy' is called. Also, specify appropriate values for the vectors 'normal'
+ ! and 'fwdorbwd', which specify the orientation of the boundary normal and the direction of forward or
+ ! backward differencing to be done in the lateral boundary condition functions 'normhorizmainbc_lat'
+ ! and 'crosshorizmainbc_lat'
+
+ ! following is algorithm for calculating boundary normal at 45 deg. increments, based on arbitray
+ ! boundary shape
+
+ where( thck3x3 /= 0.0d0 )
+ thckmask = 0.0_dp
+ elsewhere( thck3x3 == 0.0d0 )
+ thckmask = 1.0d0
+ endwhere
+
+ testvect = sum( thckmask * mask, 1 )
+
+ !if( up == 3 )then ! temporary code for debugging
+ ! do i = 3,1,-1
+ ! print *, 'thck = ', thck(:,i)
+ ! end do
+ ! print *, ' '
+ !
+ ! do i = 3,1,-1
+ ! print *, 'thckmask = ', thckmask(:,i)
+ ! end do
+ ! print *, ' '
+ !
+ ! print *, 'testvect = ', testvect
+ ! print *, ' '
+ !end if
+
+ ! calculate the angle of the normal in cart. (x,y) system w/ 0 deg. at 12 O'clock, 90 deg. at 3 O'clock, etc.
+ if( sum( sum( thckmask, 1 ) ) == 1.0d0 )then
+ phi = sum( sum( thckmask * maskcorners, 1 ) )
+ else
+ if( any( testvect == 360.0d0 ) )then
+ if( sum( testvect ) == 450.0d0 )then
+ phi = 45.0d0
+ elseif( sum( testvect ) == 630.0d0 )then
+ phi = 315.0d0
+ else
+ phi = 0.0d0
+ end if
+ elseif( all( testvect /= 360 ) )then
+ phi = sum( testvect ) / sum( testvect/testvect, testvect /= 0.0d0 )
+ end if
+ end if
+
+ calc_normal_45deg = deg2rad * phi
+
+ !Tim's Note: This appears to actually compute 0 at 6 O'clock according
+ !to Glimmer's coordinate system. 90 deg. is still 3 O'clock.
+ !I'm going to correct for this here rather than dig through the code
+ !above
+ calc_normal_45deg = pi - calc_normal_45deg
+ if (calc_normal_45deg < 0) calc_normal_45deg = calc_normal_45deg + 2*pi
+
+ end function
+
+!TODO - Remove subroutine upwind_from_mask? Not currently used.
+
+ !Fills a field of differencing directions suitable to give a field
+ !derivative routine. Uses centered differencing everywhere except for the
+ !marine ice margin, where upwinding and downwinding is used to avoid
+ !differencing across the boundary.
+
+ subroutine upwind_from_mask(mask, direction_x, direction_y, exec_serial)
+ use parallel
+ integer, dimension(:,:), intent(in) :: mask
+ double precision, dimension(:,:), intent(out) :: direction_x, direction_y
+ logical, optional :: exec_serial !JEFF If executing in serial in MPI program.
+
+ integer :: i,j
+ logical :: exec_serial_flag
+
+ !JEFF Handle exec_serial optional parameter
+ if ( present(exec_serial) ) then
+ exec_serial_flag = exec_serial
+ else
+ ! Default to off
+ exec_serial_flag = .FALSE.
+ endif
+
+ direction_x = 0
+ direction_y = 0
+
+ !Detect locations of the marine margin
+ do i = 1, size(mask,1)
+ do j = 1, size(mask,2)
+ if (GLIDE_IS_CALVING(mask(i,j))) then
+ !Detect whether we need to upwind or downwind in the Y
+ !direction
+ if (i > 1) then
+ if (.not. GLIDE_HAS_ICE(mask(i-1,j))) then
+ direction_x(i,j) = 1
+ end if
+ end if
+
+ if (i < size(mask, 1)) then
+ if (.not. GLIDE_HAS_ICE(mask(i+1,j))) then
+ direction_x(i,j) = -1
+ end if
+ end if
+
+ !Detect whether we need to upwind or downwind in the X
+ !direction
+ if (j > 1) then
+ if (.not. GLIDE_HAS_ICE(mask(i,j-1))) then
+ direction_y(i,j) = 1
+ end if
+ end if
+
+ if (j < size(mask, 2)) then
+ if (.not. GLIDE_HAS_ICE(mask(i,j+1))) then
+ direction_y(i,j) = -1
+ end if
+ end if
+
+ !If we are at a point that is "interior" to two other boundary points,
+ !such as the lower right of:
+ !o b i
+ !b b i
+ !(o = ocean, b = boundary, i = interior), then we will not detect the need
+ !to upwind or downwind. However, we still should for consistency with other
+ !mask points (in some cases, not doing so can lead to a singular calculation
+ !at the marine ice front)
+ !
+ !We can think of this operation as avoiding calving points where there is
+ !a non-calving point to upwind into.
+ !
+ !NOTE: We need a better way to detect interior points. Right now I am just using
+ !points that are floating, and that works, but this doesn't work for two reasons:
+ !1. Boundary points are also floating
+ !2. Could fail for a very thin ice shelf
+ if (int(direction_x(i,j)) == 0 .and. int(direction_y(i,j)) == 0 .and. &
+ i > 1 .and. j > 1 .and. i < size(mask, 1) .and. j < size(mask, 2)) then
+ if (.not. GLIDE_HAS_ICE(mask(i-1, j-1))) then
+ direction_x(i,j) = 1
+ direction_y(i,j) = 1
+ else if (.not. GLIDE_HAS_ICE(mask(i-1, j+1))) then
+ direction_x(i,j) = 1
+ direction_y(i,j) = -1
+ else if (.not. GLIDE_HAS_ICE(mask(i+1, j-1))) then
+ direction_x(i,j) = -1
+ direction_y(i,j) = 1
+ else if (.not. GLIDE_HAS_ICE(mask(i+1, j+1))) then
+ direction_x(i,j) = -1
+ direction_y(i,j) = -1
+ end if
+ end if
+ end if
+ end do
+ end do
+
+ if (.NOT. exec_serial_flag) then
+ call parallel_halo(direction_x)
+ call parallel_halo(direction_y)
+ endif
+
+ end subroutine upwind_from_mask
+
+end module glide_mask
diff --git a/components/cism/glimmer-cism/libglide/glide_mask.inc b/components/cism/glimmer-cism/libglide/glide_mask.inc
new file mode 100644
index 0000000000..94576e85f0
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_mask.inc
@@ -0,0 +1,123 @@
+#ifndef GLIDE_MASK_INC
+
+!Bits 1:0 - Ice presence (0 if no ice). NOTE: If bit 2 is activated, bit 1 must be activated
+#define GLIDE_MASK_HAS_ICE 1
+#define GLIDE_MASK_THIN_ICE 3
+#define GLIDE_ICE_PRESENCE_BITS 3
+
+!TODO - Is the grounding line mask needed?
+! Also, note that MASK_LAND is redundant given MASK_OCEAN, since one is the complement of the other.
+
+!Bits 4:2 - Type of base (Land or ocean - grounding line has both bits on). The 16 bit specifies grounding line,
+! set up so that those points are treated as grounded)
+#define GLIDE_MASK_LAND 4
+#define GLIDE_MASK_OCEAN 8
+#define GLIDE_MASK_GROUNDING_LINE 16
+
+!Bit 5: Identifies a margin (jump from zero to nonzero thickness). Margin type determined by whether ice grounded or floating.
+#define GLIDE_MASK_MARGIN 32
+
+!Bit 6: Identifies a dirichlet condition. The velocity at points marked with this bit should be held constant.
+#define GLIDE_MASK_DIRICHLET_BC 64
+
+!Bit 7: Identifies a computational domain boundary. These are normally just activated on the edges of the domain,
+!unless there is a domain decomposition (in which case they may be missing)
+#define GLIDE_MASK_COMP_DOMAIN_BND 128
+
+!=======
+! All mask values actually used in code (as defined below) should be made up of some combination of one or more
+! of the above "base" type bits.
+
+!TODO - Rename to GLIDE_ICEFREE_OCEAN?
+!Checks for an iceless square
+!Checks for open ocean with no ice
+#define GLIDE_IS_OCEAN(mask) (iand(mask, GLIDE_MASK_OCEAN) == GLIDE_MASK_OCEAN .and. GLIDE_NO_ICE(mask))
+
+!TODO - Rename to GLIDE_ICEFREE_LAND?
+!Checks for land with no ice
+#define GLIDE_IS_LAND(mask) (iand(mask, GLIDE_MASK_LAND) == GLIDE_MASK_LAND .and. GLIDE_NO_ICE(mask))
+
+!Checks for the presence of any ice, dynamic or not
+#define GLIDE_HAS_ICE(mask) (iand(mask, GLIDE_MASK_HAS_ICE) == GLIDE_MASK_HAS_ICE)
+
+!Checks for a lack of ice
+#define GLIDE_NO_ICE(mask) (iand(mask, GLIDE_MASK_HAS_ICE) == 0)
+
+!Checks for the presence of ice that is below the ice dynamics limit
+#define GLIDE_IS_THIN(mask) (iand(mask,GLIDE_MASK_THIN_ICE) == GLIDE_MASK_THIN_ICE)
+
+!Checks for any ice, dynamic or not, that is on an ice shelf.
+#define GLIDE_IS_FLOAT(mask) (iand(mask,GLIDE_MASK_OCEAN) == GLIDE_MASK_OCEAN .and. GLIDE_HAS_ICE(mask))
+
+!Checks for any ice, dynamic or not, that is grounded
+#define GLIDE_IS_GROUND(mask) (iand(mask,GLIDE_MASK_LAND) == GLIDE_MASK_LAND .and. GLIDE_HAS_ICE(mask))
+
+!TODO - Remove hardwiring?
+! Actually, not sure this is needed
+!Checks for any ice, dynamic or not, that is on the grounding line
+
+!TODO: define from above available combinations rather than hardcode?
+!17 = 16 + 1
+#define GLIDE_IS_GROUNDING_LINE(mask) (iand(mask, 17) == 17)
+
+!TODO - This one probably is not needed - NOTE that removing it affects the iarea/ivol calculation
+!Checks for any ice, dynamic or not, that is either floating *or* on the grounding line
+!TODO: IF NEEDED, define from above available combinations rather than hardcode?
+#define GLIDE_IS_FLOAT_OR_GNDLINE(mask) (iand(mask, 24) > 0 .and. GLIDE_HAS_ICE(mask))
+
+!TODO - This one probably is not needed - NOTE that removing it affects the iarea/ivol calculation
+!Checks for any ice, dynamic or not, that is either grounded *or* on the grounding line
+!TODO: IF NEEDED, define from above available combinations rather than hardcode?
+#define GLIDE_IS_GROUND_OR_GNDLINE(mask) (iand(mask, 20) > 0 .and. GLIDE_HAS_ICE(mask))
+
+!Checks whether this is an ice margin (thickness jumps from 0 to non-zero at this point)
+#define GLIDE_IS_MARGIN(mask) (iand(mask, GLIDE_MASK_MARGIN) == GLIDE_MASK_MARGIN)
+
+!TODO - Not sure this is needed
+!Checks whether this is a margin in contact with the ocean, floating or not
+#define GLIDE_IS_MARINE_ICE_EDGE(mask) (GLIDE_IS_MARGIN(mask) .and. GLIDE_IS_FLOAT_OR_GNDLINE(mask))
+
+!TODO - Not a good name for this mask
+!Checks whether this is a margin in contact with the ocean
+!41 = 32 + 8 + 1
+!TODO: define from above available combinations rather than hardcode?
+#define GLIDE_IS_CALVING(mask) (iand(mask, 41) == 41)
+
+!Checks whether this is a land margin
+!37 = 32 + 4 + 1
+!TODO: define from above available combinations rather than hardcode?
+#define GLIDE_IS_LAND_MARGIN(mask) (iand(mask, 37) == 37)
+
+!TODO - Where are the Dirichlet and domain_bnd masks set in the code?
+!Checks whether a dirichlet boundary has been defined at this point
+#define GLIDE_IS_DIRICHLET_BOUNDARY(mask) (iand(mask, GLIDE_MASK_DIRICHLET_BC) == GLIDE_MASK_DIRICHLET_BC)
+
+!Checks whether we are at the edge of the computational domain *and* there is ice in this square
+
+!TODO: define from above available combinations rather than hardcode?
+
+! 129 = 128 + 1
+#define GLIDE_IS_COMP_DOMAIN_BND(mask) (iand(mask, 129) == 129)
+
+! table of common combinations:
+!1 ! has ice
+!3 ! has thin ice
+!4 ! land
+!5 = 4 + 1 ! grounded ice
+!7 = 4 + 3 ! grounded thin ice
+!8 ! ocean
+!9 = 8 + 1 ! floating ice
+!11 = 8 + 3 ! floating thin ice
+!17 = 16 + 1 ! grounding line with ice
+!19 = 16 + 3 ! grounding line with thin ice
+!21 = 16 + 4 + 1 ! non-thin ice grounding line
+!23 = 16 + 4 + 3 ! thin ice grounding line
+!32 ! margin
+!37 = 32 + 4 + 1 ! land margin
+!39 = 32 + 4 + 3 ! land margin with thin ice
+!41 = 32 + 8 + 1 ! calving margin (ocean)
+!43 = 32 + 8 + 3 ! calving margin (ocean) with thin ice
+!53 = 32 + 21 ! margin AND grounding line
+!55 = 32 + 23 ! margin AND grounding line with thin ice
+
+#endif
diff --git a/components/cism/glimmer-cism/libglide/glide_nan.inc b/components/cism/glimmer-cism/libglide/glide_nan.inc
new file mode 100644
index 0000000000..8f54dc1a05
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_nan.inc
@@ -0,0 +1,8 @@
+#ifndef GLIDE_NAN_INC
+#define GLIDE_NAN_INC
+
+#define IS_NAN(x) ((x) /= (x))
+#define IS_INF(x) (ISNAN((x)*0))
+#define IS_POS_INF(x) (IS_INF(x) .and. ((x) > 0)
+#define IS_NEG_INF(x) (IS_INF(x) .and. ((x) < 0)
+#endif
diff --git a/components/cism/glimmer-cism/libglide/glide_nc_custom.F90 b/components/cism/glimmer-cism/libglide/glide_nc_custom.F90
new file mode 100644
index 0000000000..6deb235fe9
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_nc_custom.F90
@@ -0,0 +1,187 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_nc_custom.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#define NCO outfile%nc
+
+module glide_nc_custom
+
+ !module for filling in dimension variables
+
+ use glimmer_global, only: dp
+ implicit none
+
+contains
+
+ subroutine glide_nc_fillall(model, outfiles)
+
+ !> fill dimension variables of all files
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+
+ type(glide_global_type) :: model
+ type(glimmer_nc_output),pointer,optional :: outfiles
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ do while(associated(oc))
+ if (.not.oc%append) then
+ call glide_nc_filldvars(oc,model)
+ endif
+ oc=>oc%next
+ end do
+
+ end subroutine glide_nc_fillall
+
+ subroutine glide_nc_filldvars(outfile, model)
+
+ use parallel
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_paramets, only : len0
+ implicit none
+
+ type(glimmer_nc_output), pointer :: outfile
+ type(glide_global_type) :: model
+
+ integer i,status,varid
+ real(dp),dimension(model%general%ewn-1) :: x0
+ real(dp),dimension(model%general%ewn) :: x1
+ real(dp),dimension(model%general%nsn-1) :: y0
+ real(dp),dimension(model%general%nsn) :: y1
+
+ ! check if we are still in define mode and if so leave it
+ if (NCO%define_mode) then
+ status = parallel_enddef(NCO%id)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ NCO%define_mode = .FALSE.
+ end if
+
+ ! horizontal dimensions
+ ! (x1,y1) is the unstaggered scalar grid
+ ! (x0,y0) is the staggered velocity grid
+
+ if (associated(model%funits%in_first)) then
+
+ status = parallel_inq_varid(NCO%id,'x1',varid)
+ status = distributed_put_var(NCO%id,varid,model%general%x1)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ status = parallel_inq_varid(NCO%id,'y1',varid)
+ status= distributed_put_var(NCO%id,varid,model%general%y1)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ !create the x0 and y0 grids from x1 and y1
+
+ status = parallel_inq_varid(NCO%id,'x0',varid)
+ do i=1, model%general%ewn-1
+ x0(i) = (model%general%x1(i)+model%general%x1(i+1))/2.0
+ end do
+ status=distributed_put_var(NCO%id,varid,x0)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ status = parallel_inq_varid(NCO%id,'y0',varid)
+ do i=1, model%general%nsn-1
+ y0(i) = (model%general%y1(i)+model%general%y1(i+1))/2.0
+ end do
+ status = distributed_put_var(NCO%id,varid,y0)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ else if(.not. associated(model%funits%in_first)) then
+
+ ! filling coordinate variables
+ status = parallel_inq_varid(NCO%id,'x0',varid)
+ do i=1, model%general%ewn-1
+ x0(i) = ((i-0.5)*model%numerics%dew*len0)
+ end do
+ status=distributed_put_var(NCO%id,varid,x0)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ status = parallel_inq_varid(NCO%id,'y0',varid)
+ do i=1, model%general%nsn-1
+ y0(i) = (i-0.5)*model%numerics%dns*len0
+ end do
+ status=distributed_put_var(NCO%id,varid,y0)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ status = parallel_inq_varid(NCO%id,'x1',varid)
+ do i=1, model%general%ewn
+ x1(i) = (i-1.)*model%numerics%dew*len0
+ end do
+ status=distributed_put_var(NCO%id,varid,x1)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ status = parallel_inq_varid(NCO%id,'y1',varid)
+ do i=1, model%general%nsn
+ y1(i) = (i-1.)*model%numerics%dns*len0
+ end do
+ status=distributed_put_var(NCO%id,varid,y1)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ end if ! associated(model%funits%in_first)
+
+ ! layer interfaces
+
+ status = parallel_inq_varid(NCO%id,'level',varid)
+ status = parallel_put_var(NCO%id,varid,model%numerics%sigma)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ ! layer midpoints
+
+ status = parallel_inq_varid(NCO%id,'staglevel',varid)
+ status = parallel_put_var(NCO%id,varid,model%numerics%stagsigma)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ ! layer midpoints, plus upper and lower surfaces
+ ! (e.g., temperature field in HO dycore)
+
+ status = parallel_inq_varid(NCO%id,'stagwbndlevel',varid)
+ status = parallel_put_var(NCO%id,varid,model%numerics%stagwbndsigma)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ ! lithosphere vertical coordinate
+
+ if (model%options%gthf == GTHF_COMPUTE) then
+ status = parallel_inq_varid(NCO%id,'lithoz',varid)
+ status= parallel_put_var(NCO%id,varid,model%lithot%deltaz)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end if
+
+ end subroutine glide_nc_filldvars
+
+end module glide_nc_custom
diff --git a/components/cism/glimmer-cism/libglide/glide_nonlin.F90 b/components/cism/glimmer-cism/libglide/glide_nonlin.F90
new file mode 100644
index 0000000000..19c443a262
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_nonlin.F90
@@ -0,0 +1,266 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_nonlin.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+!Contains helper functions for nonlinear iteration, both to embed in the
+!iteration loop and to serialize the data into the vector format that these
+!functions require.
+!Currently only unstable manifold correction is implemented.
+
+module glide_nonlin
+
+ use glimmer_global, only: dp
+ use glimmer_physcon, only: pi
+ implicit none
+
+contains
+
+ subroutine check_vector_size(start, veclen, ni, nj, nk)
+ use glimmer_log
+ integer :: start, veclen, ni, nj, nk
+ character(256) :: message
+ if (ni*nj*nk > veclen - start + 1) then
+ write(message, *) "Need ",ni*nj*nk," elements in vector, starting from element ",start," only have ",veclen - start+1
+ call write_log(message, GM_FATAL)
+ end if
+ end subroutine
+
+
+ subroutine linearize_3d(vector, start, field)
+ use glimmer_paramets, only: GLC_DEBUG
+ real(dp), dimension(:) :: vector
+ integer :: start
+ real(dp), dimension(:,:,:) :: field
+ integer :: ni, nj, nk
+ integer :: i,j,k
+
+ ni = size(field, 1)
+ nj = size(field, 2)
+ nk = size(field, 3)
+ if (GLC_DEBUG) then
+ call check_vector_size(start, size(vector), ni, nj, nk)
+ end if
+ do i=1,ni
+ do j=1,nj
+ do k=1,nk
+ vector(start) = field(i,j,k)
+ start = start + 1
+ end do
+ end do
+ end do
+ end subroutine
+
+ subroutine linearize_2d(vector, start, field)
+ use glimmer_paramets, only: GLC_DEBUG
+ real(dp), dimension(:) :: vector
+ integer :: start
+ real(dp), dimension(:,:) :: field
+ integer :: ni, nj
+ integer :: i,j
+
+ ni = size(field, 1)
+ nj = size(field, 2)
+ if (GLC_DEBUG) then
+ call check_vector_size(start, size(vector), ni, nj, 1)
+ end if
+ do i=1,ni
+ do j=1,nj
+ vector(start) = field(i,j)
+ start = start + 1
+ end do
+ end do
+ end subroutine
+
+ subroutine delinearize_3d(vector, start, field)
+ real(dp), dimension(:) :: vector
+ integer :: start
+ real(dp), dimension(:,:,:) :: field
+ integer :: ni, nj, nk
+ integer :: i,j,k
+
+ ni = size(field, 1)
+ nj = size(field, 2)
+ nk = size(field, 3)
+
+ do i=1,ni
+ do j=1,nj
+ do k=1,nk
+ field(i,j,k) = vector(start)
+ start = start + 1
+ end do
+ end do
+ end do
+ end subroutine
+
+ subroutine delinearize_2d(vector, start, field)
+ real(dp), dimension(:) :: vector
+ integer :: start
+ real(dp), dimension(:,:) :: field
+ integer :: ni, nj
+ integer :: i,j
+
+ ni = size(field, 1)
+ nj = size(field, 2)
+
+ do i=1,ni
+ do j=1,nj
+ field(i,j) = vector(start)
+ start = start + 1
+ end do
+ end do
+ end subroutine
+
+ function picard_iterate(vec_new, vec_old, vec_size, toler, tot_out)
+ logical :: picard_iterate
+
+ real(dp), dimension(:), intent(in) :: vec_new
+ real(dp), dimension(:), intent(inout) :: vec_old
+ integer :: vec_size
+ real(dp) :: toler
+ real(dp), optional, intent(out) :: tot_out
+
+ real(dp) :: err, norm1, norm2
+
+ norm1 = sqrt(sum(vec_new**2))
+ norm2 = sqrt(sum((vec_new-vec_old)**2))
+
+ err = norm2/(norm1 + 1d-10)
+ picard_iterate = err >= toler
+
+ vec_old = vec_new
+
+ if (present(tot_out)) then
+ tot_out = err
+ end if
+ end function picard_iterate
+
+ function unstable_manifold_correction(vec_new, vec_old, vec_correction, &
+ vec_size, toler, tot_out, theta_out)
+ logical :: unstable_manifold_correction
+
+ real(dp), dimension(:), intent(in) :: vec_new
+ real(dp), dimension(:), intent(inout) :: vec_old
+ real(dp), dimension(:), intent(inout) :: vec_correction
+ integer :: vec_size
+ real(dp) :: toler
+ real(dp), optional, intent(out) :: tot_out
+ real(dp), optional, intent(out) :: theta_out
+
+ real(dp) :: norm1, norm2, norm3, norm4, norm5
+ real(dp) :: tot
+ real(dp) :: theta
+ real(dp) :: alpha
+ integer :: i
+ real(dp) :: vmean
+ real(dp) :: vstd
+
+ real(dp), dimension(vec_size) :: vec_correction_new
+
+ !Assume we need to iterate again until proven otherwise
+ unstable_manifold_correction = .true.
+
+ norm1 = 0.d0
+ norm2 = 0.d0
+ norm3 = 0.d0
+ norm4 = 0.d0
+ norm5 = 0.d0
+
+ vec_correction_new = vec_new(1:vec_size) - vec_old(1:vec_size)
+
+ do i = 1, vec_size
+ vmean = vmean + abs(vec_correction_new(i))
+ end do
+ vmean = vmean / vec_size
+
+ do i = 1, vec_size
+ vstd = vstd + (vec_correction_new(i) - vmean)**2
+ end do
+ vstd = sqrt(vstd/vec_size)
+
+ do i = 1,vec_size
+ norm1 = norm1 + (vec_correction_new(i) - vec_correction(i)) ** 2
+ norm2 = norm2 + vec_correction(i) ** 2
+ !if (abs(vec_correction_new(i)) > vmean * 4. * vstd) then
+ !else
+ norm3 = norm3 + vec_correction_new(i) ** 2
+ !endif
+ norm4 = norm4 + vec_correction(i) * vec_correction_new(i)
+ norm5 = norm5 + vec_new(i) ** 2
+ end do
+
+ !Compute the angle between successive correction vectors
+ if ((abs(norm2) < 1d-10) .or. (abs(norm3) < 1d-10)) then
+ theta=pi/2.
+ else
+ theta=acos(norm4/sqrt(norm2*norm3))
+ endif
+
+ if ( (theta <= (5.*pi/6.) ) ) then
+ !We've requested unstable manifold correction, and the angle is
+ !small (less than 5pi/6, a value identified by Hindmarsh and Payne
+ !to work well). If this is the case, we compute and apply
+ !a correction vector.
+
+ !Compute the error between the last two *correction vectors* (not
+ !the last two iteration values!) (See (51) in Pattyn's paper)
+ if (abs(norm2) > 0.) then !We're just avoiding a divide by 0 here
+ alpha=sqrt(norm1/norm2)
+ else
+ alpha=1.
+ endif
+
+ if (alpha < 1.e-6) then
+ !If the correction vector didn't change much, we're done
+ unstable_manifold_correction = .false.
+ else
+ !Update the previous guess of the velocity with the correction
+ !vector. This throws out the current iteration's computed
+ !velocity, and instead uses the computed correction vector.
+ vec_old = vec_old + vec_correction_new / alpha
+ vec_correction = vec_correction_new
+
+ endif
+ else
+ !Copy this iteration's new values to the old values
+ !for the next iteration - because the angle between correction
+ !vectors is large we do not want to apply a correction, so
+ !we just go Picard instead
+ vec_old = vec_new
+ vec_correction = vec_correction_new
+ endif
+
+ tot=sqrt(norm3/(norm5+1d-10)) !Regularize the denominator so we don't get NAN with simple geometries
+
+ if (present(tot_out)) then
+ tot_out = tot
+ end if
+
+ if (present(theta_out)) then
+ theta_out = theta * 180 / pi
+ end if
+
+ if (tot < toler) unstable_manifold_correction = .false.
+ end function unstable_manifold_correction
+end module glide_nonlin
diff --git a/components/cism/glimmer-cism/libglide/glide_profile.F90 b/components/cism/glimmer-cism/libglide/glide_profile.F90
new file mode 100644
index 0000000000..a76204cb6d
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_profile.F90
@@ -0,0 +1,102 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_profile.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+ ! This module and (profile.F90) is needed for both the Glimmer 1.x profiling functionality
+ ! and the newer GPTL profiling functionality added by Pat Worley during the SEACISM project.
+ ! When GPTL profiling is enabled some of the below routines are used and others
+ ! are ifdef'ed out to allow GPTL-enabled versions to be used instead.
+ ! Currently, the old Glimmer 1.x profiling functionality does nothing more than
+ ! print the total run time, and should eventually be deprecated, at which point
+ ! the glide_profile.F90 and profile.F90 modules could be cleaned up.
+
+module glide_profile
+
+ ! profiling for glide
+
+ implicit none
+
+contains
+
+ subroutine glide_prof_init(model)
+
+ ! initialise glide profiling
+ use profile
+ use glide_types
+ implicit none
+
+ type(glide_global_type) :: model !> model instance
+
+ if (model%profile%profile_unit == 0) then
+ call profile_init(model%profile,'glide.profile')
+#if (defined PROFILING && ! defined CCSMCOUPLED && ! defined CESMTIMERS)
+ write(model%profile%profile_unit,*) '# take a profile every ',model%numerics%profile_period,' time steps'
+#endif
+ end if
+
+ ! registering glide profiles
+ model%glide_prof%geomderv = profile_register(model%profile,'horizontal derivatives')
+ model%glide_prof%hvelos = profile_register(model%profile,'horizontal velocities')
+ model%glide_prof%ice_mask1 = profile_register(model%profile,'ice mask 1')
+ model%glide_prof%temperature = profile_register(model%profile,'temperature')
+ model%glide_prof%ice_evo = profile_register(model%profile,'ice evolution')
+ model%glide_prof%ice_mask2 = profile_register(model%profile,'ice mask 2')
+ model%glide_prof%isos_water = profile_register(model%profile,'isostasy water')
+ model%glide_prof%isos = profile_register(model%profile,'isostasy')
+ end subroutine glide_prof_init
+
+ subroutine glide_prof_start(model,profn)
+ !> start logging profile
+ use profile
+ use glide_types
+ implicit none
+ type(glide_global_type) :: model !> model instance
+ integer, intent(in) :: profn !> profile number
+
+ call profile_start(model%profile,profn)
+ end subroutine glide_prof_start
+
+ subroutine glide_prof_stop(model,profn)
+ !> write message to profile
+ use profile
+ use glide_types
+ implicit none
+ type(glide_global_type) :: model !> model instance
+ integer, intent(in) :: profn !> profile number
+
+ !local variables
+ character (len=20) :: timestring
+
+ call profile_stop(model%profile,profn)
+ if (mod(model%numerics%timecounter,model%numerics%profile_period)==0) then
+ write(timestring,*) real(model%numerics%time)
+ call profile_log(model%profile,profn,trim(timestring))
+ end if
+ end subroutine glide_prof_stop
+end module glide_profile
diff --git a/components/cism/glimmer-cism/libglide/glide_setup.F90 b/components/cism/glimmer-cism/libglide/glide_setup.F90
new file mode 100644
index 0000000000..d7801a2d48
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_setup.F90
@@ -0,0 +1,1775 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_setup.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_mask.inc"
+
+module glide_setup
+
+ ! general routines for initialisation, etc, called from top-level glimmer subroutines
+
+ use glimmer_global, only: dp
+
+ implicit none
+
+ private
+ public :: glide_readconfig, glide_printconfig, glide_scale_params, &
+ glide_load_sigma, glide_read_sigma, glide_calc_sigma
+
+!-------------------------------------------------------------------------
+
+contains
+
+!-------------------------------------------------------------------------
+
+ subroutine glide_readconfig(model,config)
+
+ ! read GLIDE configuration file
+ ! Note: sigma coordinates are handled by a subsequent call to glide_read_sigma
+
+ use glide_types
+ use glimmer_config
+ implicit none
+ type(glide_global_type) :: model !> model instance
+ type(ConfigSection), pointer :: config !> structure holding sections of configuration file
+
+ ! local variables
+ type(ConfigSection), pointer :: section
+
+ ! read grid size parameters
+ call GetSection(config,section,'grid')
+ if (associated(section)) then
+ call handle_grid(section, model)
+ end if
+
+ ! read time parameters
+ call GetSection(config,section,'time')
+ if (associated(section)) then
+ call handle_time(section, model)
+ end if
+
+ ! read options parameters
+ call GetSection(config,section,'options')
+ if (associated(section)) then
+ call handle_options(section, model)
+ end if
+
+ !read options for higher-order computation
+ call GetSection(config,section,'ho_options')
+ if (associated(section)) then
+ call handle_ho_options(section, model)
+ end if
+
+ !read options for computation using an external dycore -- Doug Ranken 04/20/12
+ call GetSection(config,section,'external_dycore_options')
+ if (associated(section)) then
+ call handle_dycore_options(section, model)
+ end if
+
+ ! read parameters
+ call GetSection(config,section,'parameters')
+ if (associated(section)) then
+ call handle_parameters(section, model)
+ end if
+
+ ! read GTHF
+ ! NOTE: The [GTHF] section is ignored unless model%options%gthf = GTHF_COMPUTE
+ if (model%options%gthf == GTHF_COMPUTE) then
+ call GetSection(config,section,'GTHF')
+ if (associated(section)) then
+ call handle_gthf(section, model)
+ end if
+ endif
+
+ ! read isostasy
+ ! NOTE: The [isostasy] section is ignored unless model%options%isostasy = ISOSTASY_COMPUTE
+ if (model%options%isostasy == ISOSTASY_COMPUTE) then
+ call GetSection(config,section,'isostasy')
+ if (associated(section)) then
+ call handle_isostasy(section, model)
+ end if
+ endif
+
+ ! Till options are not currently supported
+ ! read till parameters
+!! call GetSection(config,section,'till_options')
+!! if (associated(section)) then
+!! call handle_till_options(section, model)
+!! end if
+
+ ! Construct the list of necessary restart variables based on the config options
+ ! selected by the user in the config file.
+ ! (Glint restart variables are handled separately by Glint setup routines.)
+ ! This is done regardless of whether or not a restart ouput file is going
+ ! to be created for this run, but this information is needed before setting up outputs. MJH 1/17/13
+
+ call define_glide_restart_variables(model%options)
+
+ end subroutine glide_readconfig
+
+!-------------------------------------------------------------------------
+
+ subroutine glide_printconfig(model)
+
+ !> print model configuration to log
+ use glimmer_log
+ use glide_types
+ implicit none
+ type(glide_global_type) :: model !> model instance
+
+ call write_log_div
+ call print_grid(model)
+ call print_time(model)
+ call print_options(model)
+ call print_parameters(model)
+ call print_gthf(model)
+ call print_isostasy(model)
+!! call print_till_options(model) ! disabled for now
+
+ end subroutine glide_printconfig
+
+!-------------------------------------------------------------------------
+
+ subroutine glide_scale_params(model)
+ !> scale parameters
+ use glide_types
+ use glimmer_physcon, only: scyr
+
+ use glimmer_physcon, only: gn
+ use glimmer_paramets, only: thk0, tim0, len0, vel0, vis0, acc0, tau0
+
+ implicit none
+
+ type(glide_global_type) :: model !> model instance
+
+ model%numerics%dttem = model%numerics%ntem * model%numerics%tinc
+
+ ! convert dt and dttem to scaled time units
+ model%numerics%dt = model%numerics%tinc * scyr / tim0
+ model%numerics%dttem = model%numerics%dttem * scyr / tim0
+
+ ! allow for subcycling of ice transport
+ model%numerics%dt_transport = model%numerics%dt / real(model%numerics%subcyc, dp)
+
+ model%numerics%thklim = model%numerics%thklim / thk0
+ model%numerics%thklim_temp = model%numerics%thklim_temp / thk0
+
+ model%numerics%dew = model%numerics%dew / len0
+ model%numerics%dns = model%numerics%dns / len0
+
+ model%numerics%mlimit = model%numerics%mlimit / thk0
+
+ model%numerics%periodic_offset_ew = model%numerics%periodic_offset_ew / thk0
+ model%numerics%periodic_offset_ns = model%numerics%periodic_offset_ns / thk0
+
+ model%velowk%trc0 = vel0 * len0 / (thk0**2)
+ model%velowk%btrac_const = model%paramets%btrac_const/model%velowk%trc0/scyr
+ model%velowk%btrac_max = model%paramets%btrac_max / model%velowk%trc0/scyr
+ model%velowk%btrac_slope = model%paramets%btrac_slope*acc0/model%velowk%trc0
+
+ model%paramets%ho_beta_const = model%paramets%ho_beta_const / (tau0/(vel0*scyr))
+
+ end subroutine glide_scale_params
+
+!-------------------------------------------------------------------------
+
+ subroutine glide_read_sigma(model,config)
+
+ ! read sigma levels from configuration file, if present
+ ! called immediately after glide_readconfig
+
+ use glide_types
+ use glimmer_config
+ use glimmer_log
+ implicit none
+
+ type(glide_global_type) :: model !> model instance
+ type(ConfigSection), pointer :: config !> structure holding sections of configuration file
+
+ ! local variables
+ type(ConfigSection), pointer :: section
+
+ ! read sigma levels
+ ! NOTE: The [sigma] section is ignored unless model%options%which_sigma = SIGMA_CONFIG
+
+ if (model%options%which_sigma == SIGMA_CONFIG) then
+ call GetSection(config,section,'sigma')
+ if (associated(section)) then
+ call handle_sigma(section, model)
+ else
+ model%options%which_sigma = SIGMA_COMPUTE_GLIDE ! default to standard sigma levels
+ call write_log('No [sigma] section present; will compute standard Glide sigma levels')
+ end if
+ endif
+
+ end subroutine glide_read_sigma
+
+!-------------------------------------------------------------------------
+
+ subroutine glide_load_sigma(model,unit)
+
+ ! Compute sigma coordinates or read them from a file
+ ! Note: This subroutine is called from glide_initialise or glissade_initialise.
+ ! If sigma levels are provided in the config file, then they are read
+ ! in by glide_read_sigma, and model%options%which_sigma is set to
+ ! SIGMA_CONFIG, in which case this subroutine does nothing.
+
+ use glide_types
+ use glimmer_log
+ use glimmer_filenames
+ use parallel
+
+ implicit none
+
+ ! Arguments
+ type(glide_global_type),intent(inout) :: model !> Ice model to use
+ integer, intent(in) :: unit !> Logical file unit to use.
+ !> (Must not already be in use)
+
+ ! Internal variables
+
+ integer :: up,upn
+ logical :: there
+ real(dp) :: level
+
+ ! Beginning of code
+
+ upn=model%general%upn
+
+ select case(model%options%which_sigma)
+
+ case(SIGMA_COMPUTE_GLIDE) ! compute standard Glide sigma levels
+
+ do up = 1,upn
+ level = real(up-1,kind=dp) / real(upn-1,kind=dp)
+ model%numerics%sigma(up) = glide_calc_sigma(level, 2.d0)
+ end do
+
+ call write_log('Computing Glide sigma levels')
+
+ case(SIGMA_EXTERNAL) ! read from external file
+
+ if (main_task) inquire (exist=there, file=process_path(model%funits%sigfile))
+ call broadcast(there)
+ if (.not.there) then
+ call write_log('Sigma levels file: '//trim(process_path(model%funits%sigfile))// &
+ ' does not exist',GM_FATAL)
+ end if
+ call write_log('Reading sigma file: '//process_path(model%funits%sigfile))
+ if (main_task) then
+ open(unit,file=process_path(model%funits%sigfile))
+ read(unit,'(f9.7)',err=10,end=10) (model%numerics%sigma(up), up=1,upn)
+ close(unit)
+ end if
+ call broadcast(model%numerics%sigma)
+
+ case(SIGMA_CONFIG) ! read from config file
+
+ ! sigma levels have already been read from glide_read_sigma
+
+ call write_log('Getting sigma levels from configuration file')
+
+ case(SIGMA_COMPUTE_EVEN)
+
+ do up = 1,upn
+ model%numerics%sigma(up) = real(up-1,kind=dp) / real(upn-1,kind=dp)
+ enddo
+
+ call write_log('Computing evenly spaced sigma levels')
+
+ case(SIGMA_COMPUTE_PATTYN)
+
+ do up = 1,upn
+ if (up == 1) then
+ model%numerics%sigma(up) = 0.d0
+ else if (up == upn) then
+ model%numerics%sigma(up) = 1.d0
+ else
+ level = real(up-1,kind=dp) / real(upn-1,kind=dp)
+ model%numerics%sigma(up) = glide_calc_sigma_pattyn(level)
+ end if
+ enddo
+
+ call write_log('Computing Pattyn sigma levels')
+
+ end select
+
+
+ !NOTE: Glam will always use evenly spaced levels,
+ ! overriding other values of which_sigma
+ ! (including sigma levels in config file)
+
+ if (model%options%whichdycore == DYCORE_GLAM) then ! evenly spaced levels are required
+
+ do up = 1,upn
+ model%numerics%sigma(up) = real(up-1,kind=dp) / real(upn-1,kind=dp)
+ enddo
+
+ call write_log('Using evenly spaced sigma levels for Glam as required')
+
+ endif
+
+ ! Compute stagsigma (= sigma values at layers midpoints)
+
+ model%numerics%stagsigma(1:upn-1) = &
+ (model%numerics%sigma(1:upn-1) + model%numerics%sigma(2:upn)) / 2.0_dp
+
+ ! Compute stagwbndsigma, adding the boundaries to stagsigma
+
+ model%numerics%stagwbndsigma(1:upn-1) = model%numerics%stagsigma(1:upn-1)
+ model%numerics%stagwbndsigma(0) = 0.d0
+ model%numerics%stagwbndsigma(upn) = 1.d0
+
+ call print_sigma(model)
+
+ return
+
+10 call write_log('something wrong with sigma coord file',GM_FATAL)
+
+ end subroutine glide_load_sigma
+
+!--------------------------------------------------------------------------------
+
+ function glide_calc_sigma(x,n)
+
+ implicit none
+ real(dp) :: glide_calc_sigma, x, n
+
+ glide_calc_sigma = (1-(x+1)**(-n)) / (1-2**(-n))
+
+ end function glide_calc_sigma
+
+!--------------------------------------------------------------------------------
+
+ function glide_calc_sigma_pattyn(x)
+
+ ! Implements an alternate set of sigma levels that encourages better
+ ! convergence for higher-order velocities
+
+ implicit none
+ real(dp) :: glide_calc_sigma_pattyn, x
+
+ glide_calc_sigma_pattyn = &
+ (-2.5641025641d-4)*(41d0*x)**2+3.5256410256d-2*(41d0*x)-8.0047080075d-13
+
+ end function glide_calc_sigma_pattyn
+
+!--------------------------------------------------------------------------------
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! private procedures
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! grid sizes
+
+ subroutine handle_grid(section, model)
+ use glimmer_config
+ use glide_types
+ use glimmer_filenames
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glide_global_type) :: model
+
+ call GetValue(section,'ewn',model%general%ewn)
+ call GetValue(section,'nsn',model%general%nsn)
+ call GetValue(section,'upn',model%general%upn)
+ call GetValue(section,'dew',model%numerics%dew)
+ call GetValue(section,'dns',model%numerics%dns)
+ call GetValue(section,'sigma_file',model%funits%sigfile)
+
+ !WHL - added global boundary conditions
+ call GetValue(section,'global_bc',model%general%global_bc)
+
+ ! We set this flag to one to indicate we've got a sigfile name.
+ ! A warning/error is generated if sigma levels are specified in some other way
+ ! and mangle the name
+ if (trim(model%funits%sigfile) /= '') then
+ model%funits%sigfile = filenames_inputname(model%funits%sigfile)
+ model%options%which_sigma = SIGMA_EXTERNAL
+ end if
+
+ end subroutine handle_grid
+
+!--------------------------------------------------------------------------------
+
+ subroutine print_grid(model)
+ use glide_types
+ use glimmer_log
+ implicit none
+ type(glide_global_type) :: model
+ character(len=512) :: message
+
+
+ call write_log('Grid specification')
+ call write_log('------------------')
+ write(message,*) 'ewn : ',model%general%ewn
+ call write_log(trim(message))
+ write(message,*) 'nsn : ',model%general%nsn
+ call write_log(trim(message))
+ write(message,*) 'upn : ',model%general%upn
+ call write_log(trim(message))
+ write(message,*) 'EW grid spacing : ',model%numerics%dew
+ call write_log(trim(message))
+ write(message,*) 'NS grid spacing : ',model%numerics%dns
+ call write_log(trim(message))
+ if (model%general%global_bc==GLOBAL_BC_PERIODIC) then
+ write(message,*) 'Periodic global boundary conditions'
+ call write_log(trim(message))
+ elseif (model%general%global_bc==GLOBAL_BC_OUTFLOW) then
+ write(message,*) 'Outflow global boundary conditions; scalars in global halo will be set to zero'
+ call write_log(trim(message))
+ endif
+
+ write(message,*) 'sigma file : ',trim(model%funits%sigfile)
+ call write_log(trim(message))
+ call write_log('')
+
+ end subroutine print_grid
+
+!--------------------------------------------------------------------------------
+
+ ! time
+ subroutine handle_time(section, model)
+ use glimmer_config
+ use glide_types
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glide_global_type) :: model
+
+!TODO - Make the ice dynamic timestep more flexible.
+! To handle timesteps both greater and less than one year, we may want to
+! define ice_dt_option and ice_dt_count in place of the current dt.
+! For instance, ice_dt_option could be either 'nyears' or 'steps_per_year'.
+! For timesteps < 1 year, we would use ice_dt_option = 'steps_per_year'.
+! This would ensure that the ice sheet dynamic timestep divides evenly
+! into the mass balance timestep (= 1 year) when running with Glint.
+ call GetValue(section,'tstart',model%numerics%tstart)
+ call GetValue(section,'tend',model%numerics%tend)
+ call GetValue(section,'dt',model%numerics%tinc)
+ call GetValue(section,'subcyc',model%numerics%subcyc)
+ call GetValue(section,'ntem',model%numerics%ntem)
+ call GetValue(section,'profile',model%numerics%profile_period)
+
+ call GetValue(section,'dt_diag',model%numerics%dt_diag)
+ call GetValue(section,'idiag',model%numerics%idiag)
+ call GetValue(section,'jdiag',model%numerics%jdiag)
+
+ !WHL - ndiag replaced by dt_diag, but retained for backward compatibility
+ call GetValue(section,'ndiag',model%numerics%ndiag)
+
+ end subroutine handle_time
+
+!--------------------------------------------------------------------------------
+
+ subroutine print_time(model)
+ use glide_types
+ use glimmer_log
+ implicit none
+ type(glide_global_type) :: model
+ character(len=100) :: message
+
+ call write_log('Time steps')
+ call write_log('----------')
+ write(message,*) 'start time (yr) : ',model%numerics%tstart
+ call write_log(message)
+ write(message,*) 'end time (yr) : ',model%numerics%tend
+ call write_log(message)
+ write(message,*) 'time step (yr) : ',model%numerics%tinc
+ call write_log(message)
+ write(message,*) 'thermal dt factor : ',model%numerics%ntem
+ call write_log(message)
+ if ( (model%numerics%ntem < 1.0d0) .or. &
+ (floor(model%numerics%ntem) /= model%numerics%ntem) ) then
+ call write_log('ntem is a multiplier on the basic time step. It should be a positive integer. Aborting.',GM_FATAL)
+ endif
+ write(message,*) 'profile frequency : ',model%numerics%profile_period
+ call write_log(message)
+
+ if (model%numerics%dt_diag > 0.d0) then
+ write(message,*) 'diagnostic time (yr): ',model%numerics%dt_diag
+ call write_log(message)
+ !TODO - Verify that this mod statement works for real numbers. Might need different logic.
+ if (mod(model%numerics%dt_diag, model%numerics%tinc) > 1.e-11) then
+ write(message,*) 'Warning: diagnostic interval does not divide evenly into ice timestep dt'
+ call write_log(message)
+ endif
+ endif
+
+ !WHL - ndiag replaced by dt_diag, but retained for backward compatibility
+ if (model%numerics%ndiag > 0) then
+ write(message,*) 'diag time (steps) : ',model%numerics%ndiag
+ call write_log(message)
+ endif
+
+ !WHL - Written to log in glide_init_diag
+! write(message,*) 'idiag : ',model%numerics%idiag
+! call write_log(message)
+! write(message,*) 'jdiag : ',model%numerics%jdiag
+! call write_log(message)
+
+ call write_log('')
+
+ end subroutine print_time
+
+!--------------------------------------------------------------------------------
+
+ ! options
+ subroutine handle_options(section, model)
+
+ use glimmer_config
+ use glide_types
+
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glide_global_type) :: model
+
+ call GetValue(section,'dycore',model%options%whichdycore)
+ call GetValue(section,'evolution',model%options%whichevol)
+ call GetValue(section,'temperature',model%options%whichtemp)
+ call GetValue(section,'temp_init',model%options%temp_init)
+ call GetValue(section,'flow_law',model%options%whichflwa)
+ call GetValue(section,'slip_coeff',model%options%whichbtrc)
+ call GetValue(section,'basal_water',model%options%whichbwat)
+ call GetValue(section,'basal_mass_balance',model%options%basal_mbal)
+ call GetValue(section,'gthf',model%options%gthf)
+ call GetValue(section,'isostasy',model%options%isostasy)
+ call GetValue(section,'marine_margin',model%options%whichmarn)
+ call GetValue(section,'vertical_integration',model%options%whichwvel)
+ call GetValue(section,'topo_is_relaxed',model%options%whichrelaxed)
+ call GetValue(section,'periodic_ew',model%options%periodic_ew)
+ call GetValue(section,'sigma',model%options%which_sigma)
+ call GetValue(section,'ioparams',model%funits%ncfile)
+
+ ! Both terms 'hotstart' and 'restart' are supported in the config file,
+ ! but if they are both supplied for some reason, then restart will be used.
+ ! 'restart' is the preferred term moving forward.
+ ! 'hotstart' is retained for backward compatability.
+ call GetValue(section,'hotstart',model%options%is_restart)
+ call GetValue(section,'restart',model%options%is_restart)
+
+ ! These are not currently supported
+ !call GetValue(section, 'use_plume',model%options%use_plume)
+ !call GetValue(section,'basal_proc',model%options%which_bproc)
+
+ end subroutine handle_options
+
+!--------------------------------------------------------------------------------
+
+ !Higher order options
+ subroutine handle_ho_options(section, model)
+ use glimmer_config
+ use glide_types
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glide_global_type) :: model
+
+ call GetValue(section, 'which_ho_efvs', model%options%which_ho_efvs)
+ call GetValue(section, 'which_ho_disp', model%options%which_ho_disp)
+ call GetValue(section, 'which_ho_babc', model%options%which_ho_babc)
+ call GetValue(section, 'which_ho_resid', model%options%which_ho_resid)
+ call GetValue(section, 'which_ho_nonlinear', model%options%which_ho_nonlinear)
+ call GetValue(section, 'which_ho_sparse', model%options%which_ho_sparse)
+ call GetValue(section, 'which_ho_approx', model%options%which_ho_approx)
+ call GetValue(section, 'which_ho_precond', model%options%which_ho_precond)
+ call GetValue(section, 'which_ho_gradient', model%options%which_ho_gradient)
+ call GetValue(section, 'which_ho_gradient_margin', model%options%which_ho_gradient_margin)
+ call GetValue(section, 'which_ho_assemble_beta', model%options%which_ho_assemble_beta)
+ call GetValue(section, 'which_ho_ground', model%options%which_ho_ground)
+ call GetValue(section, 'glissade_maxiter', model%options%glissade_maxiter)
+
+ end subroutine handle_ho_options
+
+!--------------------------------------------------------------------------------
+
+ ! Handles external dycore options -- Doug Ranken 03/26/12
+ subroutine handle_dycore_options(section, model)
+ use glimmer_config
+ use glide_types
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glide_global_type) :: model
+
+ call GetValue(section, 'external_dycore_type', model%options%external_dycore_type)
+ call GetValue(section, 'dycore_input_file', model%options%dycore_input_file)
+ if (model%options%external_dycore_type .eq. 1) model%options%whichdycore = 4 ! DYCORE_BISICLES
+ if (model%options%external_dycore_type .eq. 2) model%options%whichdycore = 3 ! DYCORE_ALBANYFELIX
+
+ print *,"In handle_dycore_options, external dycore type, input file = ", &
+ model%options%external_dycore_type,model%options%dycore_input_file
+ ! print *,"In handle_dycore_options, whichdycore = ",model%options%whichdycore
+ end subroutine handle_dycore_options
+
+!--------------------------------------------------------------------------------
+
+ subroutine print_options(model)
+
+ use glide_types
+ use glimmer_log
+
+ use parallel
+
+ implicit none
+
+ type(glide_global_type) :: model
+ character(len=500) :: message
+
+ ! basic Glide/Glimmer options
+
+ character(len=*), dimension(0:4), parameter :: dycore = (/ &
+ 'glide ', & ! Glimmer SIA
+ 'glam ', & ! Payne-Price finite difference
+ 'glissade ', & ! prototype finite element
+ 'albany-felix ', & ! External Albany-FELIX finite element
+ 'bisicles ' /) ! External BISICLES-Chombo FVM
+
+ character(len=*), dimension(0:5), parameter :: evolution = (/ &
+ 'pseudo-diffusion ', &
+ 'ADI scheme ', &
+ 'iterated diffusion ', &
+ 'incremental remapping ', &
+ '1st order upwind ', &
+ 'thickness fixed at initial value ' /)
+
+ character(len=*), dimension(0:3), parameter :: temperature = (/ &
+ 'isothermal ', &
+ 'prognostic temperature', &
+ 'constant in time ', &
+ 'prognostic enthalpy ' /)
+
+ character(len=*), dimension(0:2), parameter :: temp_init = (/ &
+ 'set to 0 C ', &
+ 'set to surface air temp', &
+ 'linear vertical profile' /)
+
+ character(len=*), dimension(0:2), parameter :: flow_law = (/ &
+ 'const 1e-16 Pa^-n a^-1 ', &
+ 'Paterson and Budd (T = -5 C)', &
+ 'Paterson and Budd ' /)
+
+ !TODO - Rename slip_coeff to something like which_btrc?
+ character(len=*), dimension(0:5), parameter :: slip_coeff = (/ &
+ 'no basal sliding ', &
+ 'constant basal traction', &
+ 'constant where bwat > 0', &
+ 'constant where T = Tpmp', &
+ 'linear function of bmlt', &
+ 'tanh function of bwat ' /)
+
+ character(len=*), dimension(0:4), parameter :: basal_water = (/ &
+ 'none ', &
+ 'local water balance ', &
+ 'local + steady-state flux', &
+ 'Constant value (= 10 m) ', &
+ 'ocean penetration ' /)
+!! 'From basal proc model '/) ! not supported
+
+ ! basal proc model is disabled for now.
+!! character(len=*), dimension(0:2), parameter :: which_bproc = (/ &
+!! 'Basal proc mod disabled ' , &
+!! 'Basal proc, high res. ' , &
+!! 'Basal proc, fast calc. ' /)
+ character(len=*), dimension(0:0), parameter :: which_bproc = (/ &
+ 'Basal process model disabled ' /)
+
+ character(len=*), dimension(0:1), parameter :: b_mbal = (/ &
+ 'not in continuity eqn', &
+ 'in continuity eqn ' /)
+
+ ! NOTE: Set gthf = 1 in the config file to read the geothermal heat flux from an input file.
+ ! Otherwise it will be overwritten, even if the 'bheatflx' field is present.
+
+ character(len=*), dimension(0:2), parameter :: gthf = (/ &
+ 'uniform geothermal flux ', &
+ 'read flux from file, if present ', &
+ 'compute flux from diffusion eqn ' /)
+
+ ! NOTE: This option has replaced the old do_isos option
+ character(len=*), dimension(0:1), parameter :: isostasy = (/ &
+ 'no isostasy calculation ', &
+ 'compute isostasy with model ' /)
+
+ character(len=*), dimension(0:5), parameter :: marine_margin = (/ &
+ 'do nothing at marine margin ', &
+ 'remove all floating ice ', &
+ 'remove fraction of floating ice ', &
+ 'relaxed bedrock threshold ', &
+ 'present bedrock threshold ', &
+ 'Huybrechts grounding line scheme' /)
+
+ character(len=*), dimension(0:1), parameter :: vertical_integration = (/ &
+ 'standard ', &
+ 'obey upper BC' /)
+
+ ! higher-order options
+
+ character(len=*), dimension(0:2), parameter :: ho_whichefvs = (/ &
+ 'constant value ', &
+ 'multiple of flow factor ', &
+ 'nonlinear, from eff strain rate' /)
+
+ character(len=*), dimension(-1:1), parameter :: ho_whichdisp = (/ &
+ 'no dissipation ', &
+ '0-order SIA ', &
+ 'first-order model (Blatter-Pattyn)' /)
+
+ character(len=*), dimension(0:10), parameter :: ho_whichbabc = (/ &
+ 'constant beta ', &
+ 'simple pattern of beta ', &
+ 'till yield stress (Picard) ', &
+ 'function of bwat ', &
+ 'no slip (using large B^2) ', &
+ 'beta passed from CISM ', &
+ 'no slip (Dirichlet implementation) ', &
+ 'till yield stress (Newton) ', &
+ 'beta as in ISMIP-HOM test C ', &
+ 'power law using effective pressure ', &
+ 'Coulomb friction law using effec press ' /)
+
+ character(len=*), dimension(0:1), parameter :: which_ho_nonlinear = (/ &
+ 'use standard Picard iteration ', &
+ 'use JFNK '/)
+
+ character(len=*), dimension(0:4), parameter :: ho_whichresid = (/ &
+ 'max value ', &
+ 'max value ignoring ubas ', &
+ 'mean value ', &
+ 'L2 norm of Ax-b = resid ', &
+ 'relative L2 norm, |Ax-b|/|b|' /)
+
+ character(len=*), dimension(-1:4), parameter :: ho_whichsparse = (/ &
+ 'PCG with incomplete Cholesky preconditioner', &
+ 'BiCG with LU preconditioner ', &
+ 'GMRES with LU preconditioner ', &
+ 'Native PCG solver, standard ', &
+ 'Native PCG solver, Chronopoulos-Gear ', &
+ 'Trilinos interface '/)
+
+ character(len=*), dimension(-1:3), parameter :: ho_whichapprox = (/ &
+ 'SIA only (glissade_velo_sia) ', &
+ 'SIA only (glissade_velo_higher) ', &
+ 'SSA only (glissade_velo_higher) ', &
+ 'Blatter-Pattyn HO (glissade_velo_higher) ', &
+ 'Depth-integrated L1L2 (glissade_velo_higher)' /)
+
+ character(len=*), dimension(0:2), parameter :: ho_whichprecond = (/ &
+ 'No preconditioner (glissade PCG) ', &
+ 'Diagonal preconditioner (glissade PCG) ', &
+ 'SIA preconditioner (glissade PCG) ' /)
+
+ character(len=*), dimension(0:1), parameter :: ho_whichgradient = (/ &
+ 'centered gradient (glissade dycore) ', &
+ 'upstream gradient (glissade dycore) ' /)
+
+ character(len=*), dimension(0:2), parameter :: ho_whichgradient_margin = (/ &
+ 'all neighbor cells in gradient (glissade dycore) ', &
+ 'ice-covered &/or land cells in gradient (glissade dycore)', &
+ 'only ice-covered cells in gradient (glissade dycore) ' /)
+
+ character(len=*), dimension(0:1), parameter :: ho_whichassemble_beta = (/ &
+ 'standard finite-element assembly (glissade dycore) ', &
+ 'use local beta for assembly (glissade dycore) ' /)
+
+ character(len=*), dimension(0:2), parameter :: ho_whichground = (/ &
+ 'f_ground = 0 or 1; no GLP (glissade dycore) ', &
+ 'f_ground = 1 for all active cells (glissade dycore)', &
+ '0 <= f_ground <= 1, based on GLP (glissade dycore) ' /)
+
+ call write_log('GLIDE options')
+ call write_log('-------------')
+
+ write(message,*) 'I/O parameter file : ',trim(model%funits%ncfile)
+ call write_log(message)
+
+ if (model%options%whichdycore < 0 .or. model%options%whichdycore >= size(dycore)) then
+ call write_log('Error, dycore option out of range',GM_FATAL)
+ end if
+ write(message,*) 'Dycore : ',model%options%whichdycore,dycore(model%options%whichdycore)
+ call write_log(message)
+
+ ! unsupported dycore options
+ if (model%options%whichdycore == DYCORE_GLAM) then
+ call write_log('Glam dycore is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+ if (model%options%whichdycore == DYCORE_ALBANYFELIX) then
+ call write_log('Albany-FELIX dycore is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+ if (model%options%whichdycore == DYCORE_BISICLES) then
+ call write_log('BISICLES dycore is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+
+ ! Forbidden options associated with the Glide dycore
+ if (model%options%whichdycore == DYCORE_GLIDE) then
+
+ if (model%options%whichevol==EVOL_INC_REMAP .or. &
+ model%options%whichevol==EVOL_UPWIND .or. &
+ model%options%whichevol==EVOL_NO_THICKNESS) then
+ call write_log('Error, Glam/glissade thickness evolution options cannot be used with Glide dycore', GM_FATAL)
+ endif
+
+ if (model%options%whichtemp == TEMP_ENTHALPY) then
+ call write_log('Error, Enthalpy scheme cannot be used with Glide dycore', GM_FATAL)
+ endif
+
+ if (tasks > 1) then
+ call write_log('Error, Glide dycore not supported for runs with more than one processor', GM_FATAL)
+ end if
+
+ if (model%options%whichevol==EVOL_ADI) then
+ call write_log('Warning, exact restarts are not currently possible with ADI evolution', GM_WARNING)
+ endif
+
+ else ! forbidden evolution options with dycores other than Glide
+
+ if (model%options%whichevol==EVOL_PSEUDO_DIFF .or. &
+ model%options%whichevol==EVOL_ADI .or. &
+ model%options%whichevol==EVOL_DIFFUSION) then
+ call write_log('Error, Glide thickness evolution options cannot be used with glam/glissade dycore', GM_FATAL)
+ endif
+
+ endif
+
+ ! Forbidden options for running in parallel
+ if (tasks > 1 .and. (model%options%which_ho_sparse==HO_SPARSE_BICG .or. &
+ model%options%which_ho_sparse==HO_SPARSE_GMRES .or. &
+ model%options%which_ho_sparse==HO_SPARSE_PCG_INCH) ) then
+ call write_log('Error, SLAP solver not supported for more than one processor', GM_FATAL)
+ end if
+
+ if (tasks > 1 .and. model%options%which_ho_babc==HO_BABC_ISHOMC) then
+ call write_log('Error, ISHOM basal BCs not supported for more than one processor', GM_FATAL)
+ endif
+
+ if (tasks > 1 .and. model%options%whichbwat==BWATER_FLUX) then
+ call write_log('Error, flux-based basal water option not supported for more than one processor', GM_FATAL)
+ endif
+
+ ! Forbidden options associated with Glam and Glissade dycores
+
+ if (model%options%whichdycore == DYCORE_GLISSADE) then
+ if ( (model%options%which_ho_approx == HO_APPROX_SSA .or. &
+ model%options%which_ho_approx == HO_APPROX_L1L2) &
+ .and. &
+ (model%options%which_ho_sparse == HO_SPARSE_PCG_STANDARD .or. &
+ model%options%which_ho_sparse == HO_SPARSE_PCG_CHRONGEAR) ) then
+ if (model%options%which_ho_precond == HO_PRECOND_SIA) then
+ call write_log('Error, cannot use SIA preconditioning for 2D solve', GM_FATAL)
+ endif
+ endif
+ endif
+
+ if (model%options%whichdycore == DYCORE_GLISSADE) then
+ if ( model%options%which_ho_approx == HO_APPROX_LOCAL_SIA .and. &
+ model%options%which_ho_disp == HO_DISP_FIRSTORDER ) then
+ call write_log('Error, cannot use first-order dissipation with local SIA solver', GM_FATAL)
+ endif
+ endif
+
+ if (model%options%whichdycore /= DYCORE_GLISSADE) then
+ if (model%options%which_ho_sparse == HO_SPARSE_PCG_STANDARD .or. &
+ model%options%which_ho_sparse == HO_SPARSE_PCG_CHRONGEAR) then
+ call write_log('Error, native PCG solver requires glissade dycore', GM_FATAL)
+ endif
+ endif
+
+ if (model%options%whichdycore == DYCORE_GLAM) then
+ if (model%options%which_ho_approx == HO_APPROX_LOCAL_SIA .or. &
+ model%options%which_ho_approx == HO_APPROX_SIA .or. &
+ model%options%which_ho_approx == HO_APPROX_SSA .or. &
+ model%options%which_ho_approx == HO_APPROX_L1L2) then
+ call write_log('Error, Glam dycore must use higher-order Blatter-Pattyn approximation', GM_FATAL)
+ endif
+ endif
+
+ ! Config specific to Albany-Felix dycore
+ if (model%options%whichdycore == DYCORE_ALBANYFELIX) then
+ call write_log('Warning, Albany-FELIX dycore requires external libraries, and it is still in development!!!', GM_WARNING)
+ endif
+
+ !NOTE : Old option 3 (TEMP_REMAP_ADV) has been removed.
+ ! If this has been set, then change to option 1 (TEMP_PROGNOSTIC), which applies to any dycore.
+
+ if (model%options%whichtemp < 0 .or. model%options%whichtemp >= size(temperature)) then
+ call write_log('Error, temperature option out of range',GM_FATAL)
+ end if
+ write(message,*) 'temperature calculation : ',model%options%whichtemp,temperature(model%options%whichtemp)
+ call write_log(message)
+
+ ! unsupported temperature options
+ if (model%options%whichtemp == TEMP_ENTHALPY) then
+ call write_log('Enthalpy-based formulation for solving temperature evolution is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+
+ if (model%options%temp_init < 0 .or. model%options%temp_init >= size(temp_init)) then
+ call write_log('Error, temp_init option out of range',GM_FATAL)
+ end if
+ ! Note: If reading temperature from an input or restart file, the temp_init option is overridden,
+ ! in which case it could be confusing here to write the option to the log file.
+ ! The method actually used is written to the log file by glide_init_temp.
+
+ if (model%options%whichflwa < 0 .or. model%options%whichflwa >= size(flow_law)) then
+ call write_log('Error, flow_law out of range',GM_FATAL)
+ end if
+ write(message,*) 'flow law : ',model%options%whichflwa,flow_law(model%options%whichflwa)
+ call write_log(message)
+
+ if (model%options%whichbwat < 0 .or. model%options%whichbwat >= size(basal_water)) then
+ call write_log('Error, basal_water out of range',GM_FATAL)
+ end if
+ write(message,*) 'basal_water : ',model%options%whichbwat,basal_water(model%options%whichbwat)
+ call write_log(message)
+
+ ! unsupported basal_water options
+ if (model%options%whichbwat == BWATER_FLUX) then
+ call write_log('Steady state routing basal_water option is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+ if (model%options%whichbwat == BWATER_OCEAN_PENETRATION) then
+ call write_log('Ocean penetration basal_water option is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+
+ if (model%options%whichmarn < 0 .or. model%options%whichmarn >= size(marine_margin)) then
+ call write_log('Error, marine_margin out of range',GM_FATAL)
+ end if
+ write(message,*) 'marine_margin : ', model%options%whichmarn, marine_margin(model%options%whichmarn)
+ call write_log(message)
+
+ if (model%options%whichbtrc < 0 .or. model%options%whichbtrc >= size(slip_coeff)) then
+ call write_log('Error, slip_coeff out of range',GM_FATAL)
+ end if
+
+ !WHL - Currently, not all basal traction options are supported for the Glissade SIA solver
+ if (model%options%whichdycore == DYCORE_GLISSADE .and. model%options%which_ho_approx == HO_APPROX_LOCAL_SIA) then
+ if (model%options%whichbtrc > BTRC_CONSTANT_TPMP) then
+ call write_log('Error, slip_coeff out of range for Glissade dycore',GM_FATAL)
+ end if
+ endif
+
+ write(message,*) 'slip_coeff : ', model%options%whichbtrc, slip_coeff(model%options%whichbtrc)
+ call write_log(message)
+
+ if (model%options%whichevol < 0 .or. model%options%whichevol >= size(evolution)) then
+ call write_log('Error, evolution out of range',GM_FATAL)
+ end if
+
+ write(message,*) 'evolution : ', model%options%whichevol, evolution(model%options%whichevol)
+ call write_log(message)
+
+ if (model%options%whichwvel < 0 .or. model%options%whichwvel >= size(vertical_integration)) then
+ call write_log('Error, vertical_integration out of range',GM_FATAL)
+ end if
+
+ if (model%options%whichwvel /= VERTINT_STANDARD .and. model%options%whichdycore /= DYCORE_GLIDE) then
+ call write_log('Error, only standard vertical velocity calculation is supported for higher-order dycores.',GM_FATAL)
+ end if
+
+ write(message,*) 'vertical_integration : ',model%options%whichwvel,vertical_integration(model%options%whichwvel)
+ call write_log(message)
+
+ if (model%options%basal_mbal < 0 .or. model%options%basal_mbal >= size(b_mbal)) then
+ call write_log('Error, basal_mass_balance out of range',GM_FATAL)
+ end if
+
+ write(message,*) 'basal_mass_balance : ',model%options%basal_mbal,b_mbal(model%options%basal_mbal)
+ call write_log(message)
+
+ if (model%options%gthf < 0 .or. model%options%gthf >= size(gthf)) then
+ print*, 'gthf =', model%options%gthf
+ call write_log('Error, geothermal flux option out of range',GM_FATAL)
+ end if
+
+ write(message,*) 'geothermal heat flux : ',model%options%gthf,gthf(model%options%gthf)
+ call write_log(message)
+
+ if (model%options%isostasy < 0 .or. model%options%isostasy >= size(isostasy)) then
+ print*, 'isostasy =', model%options%isostasy
+ call write_log('Error, isostasy option out of range',GM_FATAL)
+ end if
+
+ write(message,*) 'isostasy : ',model%options%isostasy,isostasy(model%options%isostasy)
+ call write_log(message)
+
+ if (model%options%whichrelaxed==1) then
+ call write_log('First topo time slice has relaxed bedrock topography')
+ end if
+
+ if (model%options%periodic_ew) then
+ if (model%options%whichevol == EVOL_ADI) then
+ call write_log('Periodic boundary conditions not implemented in ADI scheme',GM_FATAL)
+ end if
+ call write_log('Periodic EW lateral boundary condition')
+ call write_log(' Slightly cheated with how temperature is implemented.',GM_WARNING)
+ end if
+
+ if (model%options%is_restart == RESTART_TRUE) then
+ call write_log('Restarting model from a previous run')
+ end if
+
+!! This option is not currently supported
+!! if (model%options%which_bproc < 0 .or. model%options%which_bproc >= size(which_bproc)) then
+!! call write_log('Error, basal_proc out of range',GM_FATAL)
+!! end if
+!! write(message,*) 'basal_proc : ',model%options%which_bproc,which_bproc(model%options%which_bproc)
+!! call write_log(message)
+
+ !HO options
+
+ if (model%options%whichdycore /= DYCORE_GLIDE) then ! glam/glissade higher-order
+
+ call write_log(' ')
+ call write_log('Higher-order options:')
+ call write_log('----------')
+
+ write(message,*) 'ho_whichefvs : ',model%options%which_ho_efvs, &
+ ho_whichefvs(model%options%which_ho_efvs)
+ call write_log(message)
+ if (model%options%which_ho_efvs < 0 .or. model%options%which_ho_efvs >= size(ho_whichefvs)) then
+ call write_log('Error, HO effective viscosity input out of range', GM_FATAL)
+ end if
+
+ write(message,*) 'ho_whichdisp : ',model%options%which_ho_disp, &
+ ho_whichdisp(model%options%which_ho_disp)
+ call write_log(message)
+ if (model%options%which_ho_disp < -1 .or. model%options%which_ho_disp >= size(ho_whichdisp)-1) then
+ call write_log('Error, HO dissipation input out of range', GM_FATAL)
+ end if
+
+ write(message,*) 'ho_whichbabc : ',model%options%which_ho_babc, &
+ ho_whichbabc(model%options%which_ho_babc)
+ call write_log(message)
+ if (model%options%which_ho_babc < 0 .or. model%options%which_ho_babc >= size(ho_whichbabc)) then
+ call write_log('Error, HO basal BC input out of range', GM_FATAL)
+ end if
+ ! unsupported ho-babc options
+ if (model%options%which_ho_babc == HO_BABC_YIELD_NEWTON) then
+ call write_log('Yield stress higher-order basal boundary condition is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+ if (model%options%which_ho_babc == HO_BABC_POWERLAW) then
+ call write_log('Weertman-style power law higher-order basal boundary condition is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+ if (model%options%which_ho_babc == HO_BABC_COULOMB_FRICTION) then
+ call write_log('Coulomb friction law higher-order basal boundary condition is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+
+ write(message,*) 'which_ho_nonlinear : ',model%options%which_ho_nonlinear, &
+ which_ho_nonlinear(model%options%which_ho_nonlinear)
+ call write_log(message)
+ if (model%options%which_ho_nonlinear < 0 .or. model%options%which_ho_nonlinear >= size(which_ho_nonlinear)) then
+ call write_log('Error, HO nonlinear solution input out of range', GM_FATAL)
+ end if
+ ! unsupported nonlinear options
+ if (model%options%which_ho_nonlinear == HO_NONLIN_JFNK) then
+ call write_log('JFNK treatment of nonlinearity in momentum balance is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+
+ write(message,*) 'ho_whichresid : ',model%options%which_ho_resid, &
+ ho_whichresid(model%options%which_ho_resid)
+ call write_log(message)
+ if (model%options%which_ho_resid < 0 .or. model%options%which_ho_resid >= size(ho_whichresid)) then
+ call write_log('Error, HO residual input out of range', GM_FATAL)
+ end if
+ ! unsupported resid options
+ if (model%options%which_ho_resid == HO_RESID_MAXU) then
+ call write_log('Residual as max. value of normalized velocity vector update is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+ if (model%options%which_ho_resid == HO_RESID_MAXU_NO_UBAS) then
+ call write_log('Residual as max. value of normalized velocity vector update with basal velocity omitted is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+ if (model%options%which_ho_resid == HO_RESID_MEANU) then
+ call write_log('Residual as mean value of normalized velocity vector update is not currently scientifically supported. USE AT YOUR OWN RISK.', GM_WARNING)
+ endif
+
+ write(message,*) 'ho_whichsparse : ',model%options%which_ho_sparse, &
+ ho_whichsparse(model%options%which_ho_sparse)
+ call write_log(message)
+ if (model%options%which_ho_sparse < -1 .or. model%options%which_ho_sparse >= size(ho_whichsparse)) then
+ call write_log('Error, HO sparse solver input out of range', GM_FATAL)
+ end if
+
+ if (model%options%whichdycore == DYCORE_GLISSADE) then
+
+ write(message,*) 'ho_whichapprox : ',model%options%which_ho_approx, &
+ ho_whichapprox(model%options%which_ho_approx)
+ call write_log(message)
+ if (model%options%which_ho_approx < -1 .or. model%options%which_ho_approx >= size(ho_whichapprox)-1) then
+ call write_log('Error, Stokes approximation out of range for glissade dycore', GM_FATAL)
+ end if
+
+ write(message,*) 'ho_whichgradient : ',model%options%which_ho_gradient, &
+ ho_whichgradient(model%options%which_ho_gradient)
+ call write_log(message)
+ if (model%options%which_ho_gradient < 0 .or. model%options%which_ho_gradient >= size(ho_whichgradient)) then
+ call write_log('Error, gradient option out of range for glissade dycore', GM_FATAL)
+ end if
+
+ write(message,*) 'ho_whichgradient_margin : ',model%options%which_ho_gradient_margin, &
+ ho_whichgradient_margin(model%options%which_ho_gradient_margin)
+ call write_log(message)
+ if (model%options%which_ho_gradient_margin < 0 .or. &
+ model%options%which_ho_gradient_margin >= size(ho_whichgradient_margin)) then
+ call write_log('Error, gradient margin option out of range for glissade dycore', GM_FATAL)
+ end if
+
+ write(message,*) 'ho_whichassemble_beta : ',model%options%which_ho_assemble_beta, &
+ ho_whichassemble_beta(model%options%which_ho_assemble_beta)
+ call write_log(message)
+ if (model%options%which_ho_assemble_beta < 0 .or. &
+ model%options%which_ho_assemble_beta >= size(ho_whichassemble_beta)) then
+ call write_log('Error, beta assembly option out of range for glissade dycore', GM_FATAL)
+ end if
+
+ write(message,*) 'ho_whichground : ',model%options%which_ho_ground, &
+ ho_whichground(model%options%which_ho_ground)
+ call write_log(message)
+ if (model%options%which_ho_ground < 0 .or. model%options%which_ho_ground >= size(ho_whichground)) then
+ call write_log('Error, ground option out of range for glissade dycore', GM_FATAL)
+ end if
+
+ write(message,*) 'glissade_maxiter : ',model%options%glissade_maxiter
+ call write_log(message)
+
+ end if
+
+ if (model%options%whichdycore == DYCORE_GLISSADE .and. &
+ (model%options%which_ho_sparse == HO_SPARSE_PCG_STANDARD .or. &
+ model%options%which_ho_sparse == HO_SPARSE_PCG_CHRONGEAR) ) then
+ write(message,*) 'ho_whichprecond : ',model%options%which_ho_precond, &
+ ho_whichprecond(model%options%which_ho_precond)
+ call write_log(message)
+ if (model%options%which_ho_precond < 0 .or. model%options%which_ho_precond >= size(ho_whichprecond)) then
+ call write_log('Error, glissade preconditioner out of range', GM_FATAL)
+ end if
+ end if
+
+ endif ! whichdycore
+
+ end subroutine print_options
+
+!--------------------------------------------------------------------------------
+
+ ! parameters
+ subroutine handle_parameters(section, model)
+
+ use glimmer_config
+ use glide_types
+ use glimmer_log
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glide_global_type) :: model
+ real(dp), pointer, dimension(:) :: tempvar => NULL()
+ integer :: loglevel
+
+ loglevel = GM_levels-GM_ERROR
+
+ !TODO - Change default_flwa to flwa_constant? Would have to change config files.
+ ! Change flow_factor to flow_enhancement_factor? Would have to change many SIA config files
+ call GetValue(section,'log_level',loglevel)
+ call glimmer_set_msg_level(loglevel)
+ call GetValue(section,'ice_limit', model%numerics%thklim)
+ call GetValue(section,'ice_limit_temp', model%numerics%thklim_temp)
+ call GetValue(section,'marine_limit', model%numerics%mlimit)
+ call GetValue(section,'calving_fraction', model%numerics%calving_fraction)
+ call GetValue(section,'geothermal', model%paramets%geot)
+ call GetValue(section,'flow_factor', model%paramets%flow_enhancement_factor)
+ call GetValue(section,'default_flwa', model%paramets%default_flwa)
+ call GetValue(section,'efvs_constant', model%paramets%efvs_constant)
+ call GetValue(section,'hydro_time', model%paramets%hydtim)
+
+ ! NOTE: bpar is used only for BTRC_TANH_BWAT
+ ! btrac_max and btrac_slope are used (with btrac_const) for BTRC_LINEAR_BMLT
+ ! btrac_const is used for several options
+
+ call GetValue(section,'basal_tract_const', model%paramets%btrac_const)
+ call GetValue(section,'basal_tract_max', model%paramets%btrac_max)
+ call GetValue(section,'basal_tract_slope', model%paramets%btrac_slope)
+
+ !WHL - Changed this so that bpar can be read correctly from config file.
+ ! This parameter is now called 'basal_tract_tanh' instead of 'basal_tract'.
+ call GetValue(section,'basal_tract_tanh', tempvar, 5)
+ if (associated(tempvar)) then
+!! model%paramets%btrac_const = tempvar(1) ! old code
+ model%paramets%bpar(:) = tempvar(:)
+ deallocate(tempvar)
+ end if
+
+!! call GetValue(section,'sliding_constant', model%climate%slidconst) ! not currently used
+
+ call GetValue(section,'ho_beta_const', model%paramets%ho_beta_const)
+
+ ! Friction law parameters
+ call GetValue(section, 'friction_powerlaw_k', model%basal_physics%friction_powerlaw_k)
+ call GetValue(section, 'coulomb_c', model%basal_physics%Coulomb_C)
+ call GetValue(section, 'coulomb_bump_max_slope', model%basal_physics%Coulomb_Bump_max_slope)
+ call GetValue(section, 'coulomb_bump_wavelength', model%basal_physics%Coulomb_bump_wavelength)
+
+ ! ocean penetration parameterization parameter
+ call GetValue(section,'p_ocean_penetration', model%paramets%p_ocean_penetration)
+
+ ! added for ismip-hom
+ call GetValue(section,'periodic_offset_ew',model%numerics%periodic_offset_ew)
+ call GetValue(section,'periodic_offset_ns',model%numerics%periodic_offset_ns)
+
+ end subroutine handle_parameters
+
+!--------------------------------------------------------------------------------
+
+ subroutine print_parameters(model)
+
+ use glide_types
+ use glimmer_log
+ implicit none
+ type(glide_global_type) :: model
+ character(len=100) :: message
+
+ call write_log(' ')
+ call write_log('Parameters')
+ call write_log('----------')
+
+ write(message,*) 'ice limit for dynamics (m) : ',model%numerics%thklim
+ call write_log(message)
+
+ !Note: The Glissade dycore is known to crash for thklim = 0, but has not
+ ! been extensively tested for small values of thklim.
+ ! Values smaller than 1 mm may be OK, but no guarantees.
+ if (model%options%whichdycore == DYCORE_GLISSADE .and. &
+ model%numerics%thklim < 1.d-3) then ! 1 mm
+ call write_log('ice limit (thklim) is too small for Glissade dycore', GM_FATAL)
+ endif
+
+ if (model%options%whichdycore /= DYCORE_GLIDE) then
+ write(message,*) 'ice limit for temperature (m) : ',model%numerics%thklim_temp
+ call write_log(message)
+ endif
+
+ write(message,*) 'marine depth limit (m) : ',model%numerics%mlimit
+ call write_log(message)
+
+ if (model%options%whichmarn == MARINE_FLOAT_FRACTION) then
+ write(message,*) 'ice fraction lost due to calving : ', model%numerics%calving_fraction
+ call write_log(message)
+ end if
+
+ write(message,*) 'geothermal flux (W/m2) : ', model%paramets%geot
+ call write_log(message)
+
+ write(message,*) 'flow enhancement factor : ', model%paramets%flow_enhancement_factor
+ call write_log(message)
+
+ write(message,*) 'basal hydro time constant (yr): ', model%paramets%hydtim
+ call write_log(message)
+
+ if (model%options%whichflwa == FLWA_CONST_FLWA) then
+ write(message,*) 'constant flow factor (Pa^-n yr^-1):', model%paramets%default_flwa
+ call write_log(message)
+ end if
+
+ if (model%options%which_ho_efvs == HO_EFVS_CONSTANT) then
+ write(message,*) 'constant effec viscosity (Pa yr): ', model%paramets%efvs_constant
+ call write_log(message)
+ end if
+
+ if (model%options%whichbtrc == BTRC_CONSTANT .or. &
+ model%options%whichbtrc == BTRC_CONSTANT_BWAT .or. &
+ model%options%whichbtrc == BTRC_LINEAR_BMLT .or. &
+ model%options%whichbtrc == BTRC_CONSTANT_TPMP) then
+ write(message,*) 'basal traction param (m/yr/Pa): ', model%paramets%btrac_const
+ call write_log(message)
+ end if
+
+ if (model%options%whichbtrc == BTRC_TANH_BWAT) then
+ write(message,*) 'basal traction tanh factors: ',model%paramets%bpar(1)
+ call write_log(message)
+ write(message,*) ' ',model%paramets%bpar(2)
+ call write_log(message)
+ write(message,*) ' ',model%paramets%bpar(3)
+ call write_log(message)
+ write(message,*) ' ',model%paramets%bpar(4)
+ call write_log(message)
+ write(message,*) ' ',model%paramets%bpar(5)
+ call write_log(message)
+ end if
+
+ if (model%options%whichbtrc == BTRC_LINEAR_BMLT) then
+ write(message,*) 'basal traction max : ',model%paramets%btrac_max
+ call write_log(message)
+ write(message,*) 'basal traction slope : ',model%paramets%btrac_slope
+ call write_log(message)
+ end if
+
+ if (model%options%which_ho_babc == HO_BABC_CONSTANT) then
+ write(message,*) 'uniform beta (Pa yr/m) : ',model%paramets%ho_beta_const
+ call write_log(message)
+ end if
+
+ if (model%options%which_ho_babc == HO_BABC_ISHOMC) then
+ if (model%general%ewn /= model%general%nsn) then
+ call write_log('Error, must have ewn = nsn for ISMIP-HOM test C', GM_FATAL)
+ endif
+ endif
+
+ if (model%options%which_ho_babc == HO_BABC_POWERLAW) then
+ write(message,*) 'roughness parameter, k, for power-law friction law : ',model%basal_physics%friction_powerlaw_k
+ call write_log(message)
+ end if
+
+ if (model%options%which_ho_babc == HO_BABC_COULOMB_FRICTION) then
+ write(message,*) 'C coefficient for Coulomb friction law : ', model%basal_physics%Coulomb_C
+ call write_log(message)
+ write(message,*) 'bed bump max. slope for Coulomb friction law : ', model%basal_physics%Coulomb_Bump_max_slope
+ call write_log(message)
+ write(message,*) 'bed bump wavelength for Coulomb friction law : ', model%basal_physics%Coulomb_bump_wavelength
+ call write_log(message)
+ end if
+
+ if (model%options%whichbwat == BWATER_OCEAN_PENETRATION) then
+ write(message,*) 'p_ocean_penetration : ', model%paramets%p_ocean_penetration
+ call write_log(message)
+ endif
+
+ if (model%numerics%idiag < 1 .or. model%numerics%idiag > model%general%ewn &
+ .or. &
+ model%numerics%jdiag < 1 .or. model%numerics%jdiag > model%general%nsn) then
+ call write_log('Error, global diagnostic point (idiag, jdiag) is out of bounds', GM_FATAL)
+ endif
+
+ ! added for ismip-hom
+ if (model%numerics%periodic_offset_ew /= 0.d0) then
+ write(message,*) 'periodic offset_ew (m) : ',model%numerics%periodic_offset_ew
+ call write_log(message)
+ endif
+
+ if (model%numerics%periodic_offset_ns /= 0.d0) then
+ write(message,*) 'periodic offset_ns (m) : ',model%numerics%periodic_offset_ns
+ call write_log(message)
+ endif
+
+ call write_log('')
+
+ end subroutine print_parameters
+
+!--------------------------------------------------------------------------------
+
+ ! Sigma levels
+ subroutine handle_sigma(section, model)
+
+ use glimmer_config
+ use glide_types
+ use glimmer_log
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glide_global_type) :: model
+
+ if (model%options%which_sigma==SIGMA_EXTERNAL) then
+ call write_log('Sigma levels specified twice - use only'// &
+ ' config file or separate file, not both',GM_FATAL)
+ else
+ call GetValue(section,'sigma_levels',model%numerics%sigma,model%general%upn)
+ end if
+
+ end subroutine handle_sigma
+
+!--------------------------------------------------------------------------------
+
+ subroutine print_sigma(model)
+ use glide_types
+ use glimmer_log
+ implicit none
+ type(glide_global_type) :: model
+ character(len=100) :: message,temp
+ integer :: i
+
+ call write_log('Sigma levels:')
+ call write_log('------------------')
+ message=''
+ do i=1,model%general%upn
+ write(temp,'(f6.3)') model%numerics%sigma(i)
+ message=trim(message)//trim(temp)
+ enddo
+ call write_log(trim(message))
+ call write_log('')
+
+ end subroutine print_sigma
+
+!--------------------------------------------------------------------------------
+
+ ! geothermal heat flux calculations
+ subroutine handle_gthf(section, model)
+ use glimmer_config
+ use glide_types
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glide_global_type) :: model
+
+ call GetValue(section,'num_dim',model%lithot%num_dim)
+ call GetValue(section,'nlayer',model%lithot%nlayer)
+ call GetValue(section,'surft',model%lithot%surft)
+ call GetValue(section,'rock_base',model%lithot%rock_base)
+ call GetValue(section,'numt',model%lithot%numt)
+ call GetValue(section,'rho',model%lithot%rho_r)
+ call GetValue(section,'shc',model%lithot%shc_r)
+ call GetValue(section,'con',model%lithot%con_r)
+ end subroutine handle_gthf
+
+!--------------------------------------------------------------------------------
+
+ subroutine print_gthf(model)
+ use glide_types
+ use glimmer_log
+ implicit none
+ type(glide_global_type) :: model
+ character(len=100) :: message
+
+ if (model%options%gthf == GTHF_COMPUTE) then
+ call write_log('Geothermal heat flux configuration')
+ call write_log('----------------------------------')
+ if (model%lithot%num_dim==1) then
+ call write_log('solve 1D diffusion equation')
+ else if (model%lithot%num_dim==3) then
+ call write_log('solve 3D diffusion equation')
+ else
+ call write_log('Wrong number of dimensions.',GM_FATAL,__FILE__,__LINE__)
+ end if
+ write(message,*) 'number of layers : ',model%lithot%nlayer
+ call write_log(message)
+ write(message,*) 'initial surface temperature : ',model%lithot%surft
+ call write_log(message)
+ write(message,*) 'rock base : ',model%lithot%rock_base
+ call write_log(message)
+ write(message,*) 'density of rock layer : ',model%lithot%rho_r
+ call write_log(message)
+ write(message,*) 'specific heat capacity of rock layer : ',model%lithot%shc_r
+ call write_log(message)
+ write(message,*) 'thermal conductivity of rock layer : ',model%lithot%con_r
+ call write_log(message)
+ write(message,*) 'number of time steps for spin-up : ',model%lithot%numt
+ call write_log(message)
+ call write_log('')
+ end if
+ end subroutine print_gthf
+
+!--------------------------------------------------------------------------------
+
+ subroutine handle_isostasy(section, model)
+ use glimmer_config
+ use glide_types
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glide_global_type) :: model
+
+ call GetValue(section,'lithosphere',model%isostasy%lithosphere)
+ call GetValue(section,'asthenosphere',model%isostasy%asthenosphere)
+ call GetValue(section,'relaxed_tau',model%isostasy%relaxed_tau)
+ call GetValue(section,'update',model%isostasy%period)
+
+ !NOTE: This value used to be in a separate section ('elastic lithosphere')
+ ! Now part of 'isostasy' section
+ call GetValue(section,'flexural_rigidity',model%isostasy%rbel%d)
+
+!! call GetSection(config,section,'elastic lithosphere')
+!! if (associated(section)) then
+!! call GetValue(section,'flexural_rigidity',isos%rbel%d)
+!! end if
+
+ end subroutine handle_isostasy
+
+!--------------------------------------------------------------------------------
+
+ subroutine print_isostasy(model)
+ use glide_types
+ use glimmer_log
+ use parallel, only: tasks
+ implicit none
+ type(glide_global_type) :: model
+ character(len=100) :: message
+
+ if (model%options%isostasy == ISOSTASY_COMPUTE) then
+ call write_log('Isostasy')
+ call write_log('--------')
+
+ if (model%isostasy%lithosphere==LITHOSPHERE_LOCAL) then
+ call write_log('using local lithosphere approximation')
+ else if (model%isostasy%lithosphere==LITHOSPHERE_ELASTIC) then
+ if (tasks > 1) then
+ call write_log('Error, elastic lithosphere not supported for multiple processors',GM_FATAL)
+ endif
+ call write_log('using elastic lithosphere approximation')
+ write(message,*) ' flexural rigidity : ', model%isostasy%rbel%d
+ call write_log(message)
+ write(message,*) ' update period (yr): ', model%isostasy%period
+ call write_log(message)
+ else
+ call write_log('Error, unknown lithosphere option',GM_FATAL)
+ end if
+
+ if (model%isostasy%asthenosphere==ASTHENOSPHERE_FLUID) then
+ call write_log('using fluid mantle')
+ else if (model%isostasy%asthenosphere==ASTHENOSPHERE_RELAXING) then
+ call write_log('using relaxing mantle')
+ write(message,*) ' characteristic time constant (yr): ', model%isostasy%relaxed_tau
+ call write_log(message)
+ else
+ call write_log('Error, unknown asthenosphere option',GM_FATAL)
+ end if
+ call write_log('')
+ endif ! compute isostasy
+
+ end subroutine print_isostasy
+
+!--------------------------------------------------------------------------------
+
+! These options are disabled for now.
+
+!! subroutine handle_till_options(section,model)
+!! !Till options
+!! use glimmer_config
+!! use glide_types
+!! implicit none
+!! type(ConfigSection), pointer :: section
+!! type(glide_global_type) :: model
+
+!! if (model%options%which_bproc==1) then
+!! call GetValue(section, 'fric', model%basalproc%fric)
+!! call GetValue(section, 'etillo', model%basalproc%etillo)
+!! call GetValue(section, 'No', model%basalproc%No)
+!! call GetValue(section, 'Comp', model%basalproc%Comp)
+!! call GetValue(section, 'Cv', model%basalproc%Cv)
+!! call GetValue(section, 'Kh', model%basalproc%Kh)
+!! else if (model%options%which_bproc==2) then
+!! call GetValue(section, 'aconst', model%basalproc%aconst)
+!! call GetValue(section, 'bconst', model%basalproc%bconst)
+!! end if
+!! if (model%options%which_bproc > 0) then
+!! call GetValue(section, 'Zs', model%basalproc%Zs)
+!! call GetValue(section, 'tnodes', model%basalproc%tnodes)
+!! call GetValue(section, 'till_hot', model%basalproc%till_hot)
+!! end if
+!! end subroutine handle_till_options
+
+!! subroutine print_till_options(model)
+!! use glide_types
+!! use glimmer_log
+!! implicit none
+!! type(glide_global_type) :: model
+!! character(len=100) :: message
+
+!! if (model%options%which_bproc > 0) then
+!! call write_log('Till options')
+!! call write_log('----------')
+!! if (model%options%which_bproc==1) then
+!! write(message,*) 'Internal friction : ',model%basalproc%fric
+!! call write_log(message)
+!! write(message,*) 'Reference void ratio : ',model%basalproc%etillo
+!! call write_log(message)
+!! write(message,*) 'Reference effective Stress : ',model%basalproc%No
+!! call write_log(message)
+!! write(message,*) 'Compressibility : ',model%basalproc%Comp
+!! call write_log(message)
+!! write(message,*) 'Diffusivity : ',model%basalproc%Cv
+!! call write_log(message)
+!! write(message,*) 'Hyd. conductivity : ',model%basalproc%Kh
+!! call write_log(message)
+!! end if
+!! if (model%options%which_bproc==2) then
+!! write(message,*) 'aconst : ',model%basalproc%aconst
+!! call write_log(message)
+!! write(message,*) 'bconst : ',model%basalproc%aconst
+!! call write_log(message)
+!! end if
+!! write(message,*) 'Solid till thickness : ',model%basalproc%Zs
+!! call write_log(message)
+!! write(message,*) 'Till nodes number : ',model%basalproc%tnodes
+!! call write_log(message)
+!! write(message,*) 'till_hot :',model%basalproc%till_hot
+!! call write_log(message)
+!! end if
+!! end subroutine print_till_options
+
+!--------------------------------------------------------------------------------
+
+ subroutine define_glide_restart_variables(options)
+ !> This subroutine analyzes the glide/glissade options input by the user in the config file
+ !> and determines which variables are necessary for an exact restart. MJH 1/11/2013
+
+ ! Please comment thoroughly the reasons why a particular variable needs to be a restart variable for a given config.
+ ! Note: this subroutine assumes that any restart variables you add you loadable. Check glide_vars.def to make sure any variables you add have load: 1
+
+ use glide_types
+ use glide_io, only: glide_add_to_restart_variable_list
+
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+ type(glide_options), intent (in) :: options !> Derived type holding all model options
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ !------------------------------------------------------------------------------------
+
+ !This was the restart list as of 1/11/13 using the old hot=1 systme in glide_vars.def:
+ !restart_variable_list=' lat relx tauf thk thkmask topg bheatflx bmlt bwat uvel vvel wgrd flwa temp litho_temp age '
+
+ ! Start with a few variables that we always want - prognostic variables and b.c.
+ ! topg - needed to reconstruct all other geometry fields
+ ! thk - prognostic variable
+ ! temp - prognostic variable
+ ! Note: the conversion from temp/flwa to tempstag/flwastag (if necessary) happens in glide_io.F90
+ ! bheatflx, artm, acab - boundary conditions. Of course if these fields are 0 they don't need
+ ! to be in the restart file, but without adding a check for that we cannot assume any of them are.
+ ! There are some options where artm would not be needed. Logic could be added to make that distinction.
+ ! Note that bheatflx may not be an input variable but can also be assigned as a parameter in the config file!
+ call glide_add_to_restart_variable_list('topg thk temp bheatflx artm acab')
+
+ ! add dycore specific restart variables
+ select case (options%whichdycore)
+
+ case (DYCORE_GLIDE)
+ ! thkmask - TODO is this needed?
+ ! wgrd & wvel - temp driver calculates weff = f(wgrd, wvel) so both are needed by temp code.
+ ! It looks possible to calculate wvel on a restart from wgrd because wvel does not
+ ! appear to require a time derivative (see subroutine wvelintg).
+ ! wgrd does require time derivatives and therefore should be
+ ! calculated at the end of each time step and stored as a restart variable
+ ! so that the time derivatives do not need to be restart variables.
+ ! For now I am calculating wvel at the same time (end of glide time step)
+ ! and then saving both as restart variables. This has the advantage of
+ ! them being on consistent time levels in the output file.
+ ! (If we waited to calculate wvel in the temp driver, we would not need to
+ ! add it as a restart variable, been then in the output wgrd and wvel would
+ ! be based on different time levels.)
+ ! flwa - in principal this could be reconstructed from temp. However in the current
+ ! implementation of glide the flwa calculation occurs after temp evolution but
+ ! before thk evolution. This means flwa is calculated from the current temp and
+ ! the old thk. The old thk is not available on a restart (just the current thk).
+ ! (thk is needed to calculate flwa for 1) a mask for where ice is, 2) correction for pmp.)
+ call glide_add_to_restart_variable_list('thkmask wgrd wvel flwa uvel vvel')
+
+ ! slip option for SIA
+ select case (options%whichbtrc)
+ case (0)
+ ! no restart variable needed when no-slip is chosen
+ case default
+ ! when a slip option is chosen, ubas & vbas are needed by the temperature solver
+ ! for calculating basal heating prior to the first calculation of velocity.
+ ! Rather than recalculate the sliding field on restart, it is easier and
+ ! less error-prone to have them be restart variables.
+ ! This could either be done by making ubas, vbas restart variables or
+ ! having them assigned from the bottom level of uvel,vvel on init
+ ! Note that btrc and soft are not needed as restart variables because
+ ! their current implementation is as a scalar ('basal_tract_const' config parameter).
+ ! If they are ever implemented as 2-d fields, then they (probably just one of them)
+ ! should become restart variables.
+
+ ! Nothing needs to happen because ubas,vbas are assigned from uvel,vel in glide_init_state_diagnostic()
+ end select
+
+ case (DYCORE_GLAM, DYCORE_GLISSADE)
+ ! uvel,vvel - these are needed for an exact restart because we can only
+ ! recalculate them to within the picard/jfnk convergence tolerance.
+ ! beta - b.c. needed for runs with sliding - could add logic to only include in that case
+ ! flwa is not needed for glissade.
+ ! TODO not sure if thkmask is needed for HO
+ call glide_add_to_restart_variable_list('uvel vvel thkmask bfricflx dissip')
+
+ end select
+
+ ! ==== Other non-dycore specific options ====
+
+ ! basal water option
+ select case (options%whichbwat)
+ case (BWATER_NONE, BWATER_CONST)
+ ! no restart variables needed
+ case default
+ ! restart needs to know bwat value
+ call glide_add_to_restart_variable_list('bwat')
+ end select
+
+ ! internal water option (for enthalpy scheme)
+ select case (options%whichtemp)
+ case (TEMP_ENTHALPY)
+ ! restart needs to know internal water fraction
+ call glide_add_to_restart_variable_list('waterfrac')
+ case default
+ ! no restart variables needed
+ end select
+
+ select case (options%which_ho_babc)
+ case (HO_BABC_POWERLAW, HO_BABC_COULOMB_FRICTION)
+ ! These friction laws need effective pressure
+ call glide_add_to_restart_variable_list('effecpress')
+ case default
+ ! Most other HO basal boundary conditions need the beta field (although there are a few that don't)
+ call glide_add_to_restart_variable_list('beta')
+ end select
+
+ ! geothermal heat flux option
+ select case (options%gthf)
+ case(GTHF_COMPUTE)
+ ! restart needs to know lithosphere temperature
+ call glide_add_to_restart_variable_list('litho_temp')
+ case default
+ ! no restart variables needed
+ end select
+
+ !WHL - added isostasy option
+ select case (options%isostasy)
+ case(ISOSTASY_COMPUTE)
+ ! restart needs to know relaxation depth
+ ! TODO MJH: I suspect that relx is only needed when asthenosphere=1 (relaxing mantle), but I'm not sure -
+ ! this should be tested when isostasy implementation is finalized/tested.
+ call glide_add_to_restart_variable_list('relx')
+ case default
+ ! no new restart variables needed
+ end select
+
+
+ ! basal processes module - requires tauf for a restart
+!! if (options%which_bproc /= BAS_PROC_DISABLED ) then
+!! call glide_add_to_restart_variable_list('tauf')
+!! endif
+
+ ! TODO bmlt was set as a restart variable, but I'm not sure when or if it is needed.
+
+ ! TODO age should be a restart variable if it is an input variable.
+ ! Same goes for b.c. (bheatflxm, artm, acab) and any other tracers that get introduced.
+ ! These could be included all the time (as I have down above for b.c.), or
+ ! we could add logic to only include them when they were in the input file.
+ ! To do this, this subroutine would have to be moved to after where input files are read,
+ ! glide_io_readall(), but before the output files are created, glide_io_createall()
+
+ ! TODO lat is only needed for some climate drivers. It is not needed for cism_driver.
+ ! Need to add logic that will add it only when those drivers are used.
+
+ end subroutine define_glide_restart_variables
+
+!--------------------------------------------------------------------------------
+
+end module glide_setup
+
+!--------------------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglide/glide_stop.F90 b/components/cism/glimmer-cism/libglide/glide_stop.F90
new file mode 100644
index 0000000000..1760f69b3c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_stop.F90
@@ -0,0 +1,152 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_stop.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glide_stop
+
+ use glide_types
+ use glimmer_log
+
+ implicit none
+
+ !> module containing finalisation of glide
+ !> this subroutine had to be split out from glide.f90 to avoid circular dependencies
+
+ !> Updated by Tim Bocek to allow for several models to be
+ !> registered and finalized with a single call without needing
+ !> the model at call time
+
+ integer, parameter :: max_models = 32
+
+ type pmodel_type
+ !> Contains a pointer to a model
+ !> This is a hack to get around Fortran's lack of arrays of pointers
+ type(glide_global_type), pointer :: p => null()
+ end type pmodel_type
+
+ !> Pointers to all registered models
+ !> This has a fixed size at compile time
+ type(pmodel_type), dimension(max_models), save :: registered_models
+
+contains
+
+!EIB! register and finalise_all not present in gc2, are present in lanl, therefore added here
+
+ subroutine register_model(model)
+ !> Registers a model, ensuring that it is finalised in the case of an error
+ type(glide_global_type), target :: model
+ integer :: i
+
+ do i = 1, max_models
+ if (.not. associated(registered_models(i)%p)) then
+ registered_models(i)%p => model
+ model%model_id = i
+ return
+ end if
+ end do
+ call write_log("Model was not registered, did you instantiate too many instances?", GM_FATAL)
+ end subroutine
+
+ subroutine deregister_model(model)
+ !> Removes a model from the registry. Normally this should only be done
+ !> glide_finalise is called on the model, and is done automatically by
+ !> that function
+ type(glide_global_type) :: model
+
+ if (model%model_id < 1 .or. model%model_id > max_models) then
+ call write_log("Attempting to deregister a non-allocated model", GM_WARNING)
+ else
+ registered_models(model%model_id)%p => null()
+ model%model_id = 0
+ end if
+ end subroutine
+
+ !Note: Currently, glide_finalise_all is never called. (glide_finalise is called from cism_driver)
+
+ subroutine glide_finalise_all(crash_arg)
+ !> Finalises all models in the model registry
+ logical, optional :: crash_arg
+
+ logical :: crash
+ integer :: i
+
+ if (present(crash_arg)) then
+ crash = crash_arg
+ else
+ crash = .false.
+ end if
+
+ do i = 1,max_models
+ if (associated(registered_models(i)%p)) then
+ call glide_finalise(registered_models(i)%p, crash)
+ end if
+ end do
+ end subroutine
+
+
+ subroutine glide_finalise(model,crash)
+
+ !> finalise model instance
+
+ use glimmer_ncio
+ use glimmer_log
+ use glide_types
+ use glide_io
+ use profile
+ implicit none
+ type(glide_global_type) :: model !> model instance
+ logical, optional :: crash !> set to true if the model died unexpectedly
+ character(len=100) :: message
+
+ ! force last write if crashed
+ if (present(crash)) then
+ if (crash) then
+ call glide_io_writeall(model,model,.true.)
+ end if
+ end if
+
+ call closeall_in(model)
+ call closeall_out(model)
+
+ call glide_deallocarr(model)
+ call deregister_model(model)
+
+ ! write some statistics
+ call write_log('Some Stats')
+ write(message,*) 'Maximum temperature iterations: ',model%temper%niter
+ call write_log(message)
+
+ ! close profile
+#if (defined PROFILING || defined CCSMCOUPLED || defined CESMTIMERS)
+ call profile_close(model%profile)
+#endif
+
+ end subroutine glide_finalise
+
+end module glide_stop
diff --git a/components/cism/glimmer-cism/libglide/glide_stress.F90 b/components/cism/glimmer-cism/libglide/glide_stress.F90
new file mode 100644
index 0000000000..68ef091a73
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_stress.F90
@@ -0,0 +1,219 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_stress.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+! *sfp* module to hold subroutines for calculation of stress components from converged, higher-order
+! stress and effective viscosity fields. To be called at the end of HO vel calculation.
+
+module glide_stress
+
+ use glimmer_paramets, only : dp
+ use glide_types
+ use parallel
+
+ implicit none
+
+ private
+ public :: glide_calcstrsstr
+
+ contains
+
+ subroutine glide_calcstrsstr( model )
+
+ type(glide_global_type) :: model
+
+ call calcstrsstr(model%general%ewn, model%general%nsn, model%general%upn, &
+ model%numerics%dew, model%numerics%dns, &
+ model%numerics%sigma, model%numerics%stagsigma, &
+ model%geometry%thck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%geomderv%dthckdew, model%geomderv%dthckdns, &
+ model%velocity%uvel, model%velocity%vvel, &
+ model%stress%efvs, &
+ model%stress%tau%xx, model%stress%tau%yy, &
+ model%stress%tau%xy, model%stress%tau%scalar, &
+ model%stress%tau%xz, model%stress%tau%yz )
+
+ end subroutine glide_calcstrsstr
+
+ subroutine calcstrsstr( ewn, nsn, upn, &
+ dew, dns, &
+ sigma, stagsigma, &
+ thck, &
+ dusrfdew, dusrfdns, &
+ dthckdew, dthckdns, &
+ uvel, vvel, &
+ efvs, &
+ tauxx, tauyy, &
+ tauxy, tau, &
+ tauxz, tauyz )
+
+ use glimmer_paramets, only : len0, thk0
+
+ implicit none
+
+ integer, intent(in) :: ewn, nsn, upn
+
+ real(dp), intent(in) :: dew, dns
+ real(dp), intent(in), dimension(:) :: sigma, stagsigma
+ real(dp), intent(in), dimension(:,:,:) :: efvs, uvel, vvel
+ real(dp), intent(in), dimension(:,:) :: thck, dusrfdew, &
+ dusrfdns, dthckdew, dthckdns
+
+ real(dp), intent(out), dimension(:,:,:) :: tauxx, tauyy, tauxy, &
+ tauxz, tauyz, tau
+ !*sfp* local vars
+ integer :: ew, ns
+ real(dp), parameter :: f1 = len0 / thk0
+ real(dp) :: dew2, dew4, dns2, dns4
+ real(dp), dimension(upn-1) :: dup, dupm
+
+ !*sfp* note that these are already defined and used in glam_strs2. If needed by PB&J
+
+ ! stress calc routine as well, consider moving these up-scope
+ dup(1:upn-1) = sigma(2:upn) - sigma(1:upn-1)
+ dupm(:) = - 0.25_dp / dup(:)
+ dew2 = 2.0_dp * dew; dns2 = 2.0_dp * dns ! *sp* 2x the standard grid spacing
+ dew4 = 4.0_dp * dew; dns4 = 4.0_dp * dns ! *sp* 4x the standard grid spacing
+
+ do ns = 2,nsn-1
+ do ew = 2,ewn-1;
+
+ if (thck(ew,ns) > 0.0_dp) then
+
+ tauxz(:,ew,ns) = vertideriv(upn, hsum(uvel(:,ew-1:ew,ns-1:ns)), &
+ thck(ew,ns), dupm(1:upn-1))
+ tauyz(:,ew,ns) = vertideriv(upn, hsum(vvel(:,ew-1:ew,ns-1:ns)), &
+ thck(ew,ns), dupm(1:upn-1))
+ tauxx(:,ew,ns) = horizderiv(upn, stagsigma, &
+ sum(uvel(:,ew-1:ew,ns-1:ns),3), &
+ dew4, tauxz(:,ew,ns), &
+ sum(dusrfdew(ew-1:ew,ns-1:ns)), &
+ sum(dthckdew(ew-1:ew,ns-1:ns)))
+ tauyy(:,ew,ns) = horizderiv(upn, stagsigma, &
+ sum(vvel(:,ew-1:ew,ns-1:ns),2), &
+ dns4, tauyz(:,ew,ns), &
+ sum(dusrfdns(ew-1:ew,ns-1:ns)), &
+ sum(dthckdns(ew-1:ew,ns-1:ns)))
+ tauxy(:,ew,ns) = horizderiv(upn, stagsigma, &
+ sum(uvel(:,ew-1:ew,ns-1:ns),2), &
+ dns4, tauxz(:,ew,ns), &
+ sum(dusrfdns(ew-1:ew,ns-1:ns)), &
+ sum(dthckdns(ew-1:ew,ns-1:ns))) + &
+ horizderiv(upn, stagsigma, &
+ sum(vvel(:,ew-1:ew,ns-1:ns),3), &
+ dew4, tauyz(:,ew,ns), &
+ sum(dusrfdew(ew-1:ew,ns-1:ns)), &
+ sum(dthckdew(ew-1:ew,ns-1:ns)))
+ else
+ tauxz(:,ew,ns) = 0.0_dp
+ tauyz(:,ew,ns) = 0.0_dp
+ tauxx(:,ew,ns) = 0.0_dp
+ tauyy(:,ew,ns) = 0.0_dp
+ tauxy(:,ew,ns) = 0.0_dp
+ end if
+
+ end do
+ end do
+
+ tauxz = f1 * efvs * tauxz
+ tauyz = f1 * efvs * tauyz
+ tauxx = 2.0_dp * efvs * tauxx
+ tauyy = 2.0_dp * efvs * tauyy
+ tauxy = efvs * tauxy
+
+ !*sfp* expanding this in terms of viscosity and velocity gradients, I've confirmed that
+ ! one gets the same thing as if one took Tau_eff = N_eff * Eps_eff, where Eps_eff is the
+ ! 1st order approx. to the 2nd strain-rate invariant (outlined in model description document).
+ tau = sqrt(tauxz**2 + tauyz**2 + tauxx**2 + tauyy**2 + tauxx*tauyy + tauxy**2)
+
+ call parallel_halo(tauxx)
+ call parallel_halo(tauyy)
+ call parallel_halo(tauxy)
+ call parallel_halo(tauxz)
+ call parallel_halo(tauyz)
+ call parallel_halo(tau)
+ return
+
+ end subroutine calcstrsstr
+
+
+ function vertideriv( upn, varb, thck, dupm )
+
+ implicit none
+
+ integer, intent(in) :: upn
+ real(dp), intent(in), dimension(:) :: varb
+ real(dp), intent(in) :: thck
+ real(dp), intent(in), dimension(:) :: dupm
+
+ real(dp), dimension(size(varb)-1) :: vertideriv
+
+ !*sfp* 'dupm' defined as -1/(2*del_sigma), in which case it seems like
+ !there should be a '-' in front of this expression ... or, negative sign
+ !may be implicit in the vert indices ( "arb(2:upn) - varb(1:upn-1)" ) and
+ !the fact that up=1 at the sfc and up=upn at the bed ???
+ vertideriv(1:upn-1) = dupm(1:upn-1) * (varb(2:upn) - varb(1:upn-1)) / thck
+
+ return
+
+ end function vertideriv
+
+ function horizderiv( upn, stagsigma, &
+ varb, grid, &
+ dvarbdz, dusrfdx, dthckdx)
+
+ implicit none
+
+ integer, intent(in) :: upn
+ real(dp), dimension(:), intent(in) :: stagsigma
+ real(dp), dimension(:,:), intent(in) :: varb
+ real(dp), dimension(:), intent(in) :: dvarbdz
+ real(dp), intent(in) :: dusrfdx, dthckdx, grid
+
+ real(dp) :: horizderiv(size(varb,1)-1)
+
+ ! *sfp* where does this factor of 1/4 come from ... averaging?
+ horizderiv = (varb(1:upn-1,2) + varb(2:upn,2) - varb(1:upn-1,1) - varb(2:upn,1)) / grid - &
+ dvarbdz * (dusrfdx - stagsigma * dthckdx) / 4.0_dp
+
+ return
+
+ end function horizderiv
+
+ function hsum(inp)
+
+ implicit none
+
+ real(dp), dimension(:,:,:), intent(in) :: inp
+ real(dp), dimension(size(inp,dim=1)) :: hsum
+
+ hsum = sum(sum(inp(:,:,:),dim=3),dim=2)
+
+ return
+
+ end function hsum
+
+end module glide_stress
diff --git a/components/cism/glimmer-cism/libglide/glide_temp.F90 b/components/cism/glimmer-cism/libglide/glide_temp.F90
new file mode 100644
index 0000000000..d4d2605d38
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_temp.F90
@@ -0,0 +1,1351 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_temp.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_mask.inc"
+
+! some macros used to disable parts of the temperature equation
+! vertical diffusion
+#ifdef NO_VERTICAL_DIFFUSION
+#define VERT_DIFF 0.
+#else
+#define VERT_DIFF 1.
+#endif
+
+! horizontal advection
+#ifdef NO_HORIZONTAL_ADVECTION
+#define HORIZ_ADV 0.
+#else
+#define HORIZ_ADV 1.
+#endif
+
+! vertical advection
+#ifdef NO_VERTICAL_ADVECTION
+#define VERT_ADV 0.
+#else
+#define VERT_ADV 1.
+#endif
+
+! strain heating
+#ifdef NO_STRAIN_HEAT
+#define STRAIN_HEAT 0.
+#else
+#define STRAIN_HEAT 1.
+#endif
+
+module glide_temp
+
+ use glide_types
+ use glimmer_global, only : dp
+
+ !TODO - Remove 'oldglide' logic when comparisons are complete
+ use glimmer_paramets, only : oldglide
+
+ implicit none
+
+ private
+ public :: glide_init_temp, glide_temp_driver, glide_calcbmlt, glide_calcbpmp
+
+contains
+
+!------------------------------------------------------------------------------------
+
+ subroutine glide_init_temp(model)
+
+ !> initialise temperature module
+ use glimmer_physcon, only : rhoi, shci, coni, scyr, grav, gn, lhci, rhow, trpt
+ use glimmer_paramets, only : tim0, thk0, acc0, len0, vis0, vel0
+ use glimmer_log
+ use parallel, only: lhalo, uhalo
+
+ type(glide_global_type), intent(inout) :: model ! model instance
+
+ integer, parameter :: p1 = gn + 1
+ integer :: up, ns, ew
+
+ !TODO - Change VERT_DIFF, etc. to integers?
+ if (VERT_DIFF==0.) call write_log('Vertical diffusion is switched off')
+ if (HORIZ_ADV==0.) call write_log('Horizontal advection is switched off')
+ if (VERT_ADV==0.) call write_log('Vertical advection is switched off')
+ if (STRAIN_HEAT==0.) call write_log('Strain heating is switched off')
+
+ !TODO - Should these tempwk allocations be done in glide_allocarr, called from glide_types?
+ ! Should the arrays be deallocated here at the end of the run?
+
+ ! horizontal advection stuff
+ allocate(model%tempwk%hadv_u(model%general%upn,model%general%ewn,model%general%nsn))
+ allocate(model%tempwk%hadv_v(model%general%upn,model%general%ewn,model%general%nsn))
+ allocate(model%tempwk%initadvt(model%general%upn,model%general%ewn,model%general%nsn))
+
+ allocate(model%tempwk%inittemp(model%general%upn,model%general%ewn,model%general%nsn))
+ !WHL - Moved dissip to model%temper and allocated in glide_types.
+!! allocate(model%tempwk%dissip(model%general%upn,model%general%ewn,model%general%nsn))
+ allocate(model%tempwk%compheat(model%general%upn,model%general%ewn,model%general%nsn))
+ model%tempwk%compheat = 0.0d0
+ allocate(model%tempwk%dups(model%general%upn,3))
+
+ allocate(model%tempwk%c1(model%general%upn))
+
+ allocate(model%tempwk%dupa(model%general%upn),model%tempwk%dupb(model%general%upn))
+ allocate(model%tempwk%dupc(model%general%upn))
+
+ model%tempwk%advconst(1) = HORIZ_ADV*model%numerics%dttem / (16.0d0 * model%numerics%dew)
+ model%tempwk%advconst(2) = HORIZ_ADV*model%numerics%dttem / (16.0d0 * model%numerics%dns)
+
+ model%tempwk%dups = 0.0d0
+
+ do up = 2, model%general%upn-1
+ model%tempwk%dups(up,1) = 1.d0/((model%numerics%sigma(up+1) - model%numerics%sigma(up-1)) * &
+ (model%numerics%sigma(up) - model%numerics%sigma(up-1)))
+ model%tempwk%dups(up,2) = 1.d0/((model%numerics%sigma(up+1) - model%numerics%sigma(up-1)) * &
+ (model%numerics%sigma(up+1) - model%numerics%sigma(up)))
+ model%tempwk%dups(up,3) = 1.d0/(model%numerics%sigma(up+1) - model%numerics%sigma(up-1))
+ end do
+
+ model%tempwk%zbed = 1.0d0 / thk0
+ model%tempwk%dupn = model%numerics%sigma(model%general%upn) - model%numerics%sigma(model%general%upn-1)
+
+! In dimensional units, wmax = thk0 / (tim0/scyr) = 2000 m / 400 yr = 5 m/yr
+! In nondimensional units, wmax = 5 m/yr / (thk0*scyr/tim0) = 1.0
+! If we remove scaling, then tim0 = thk0 = 1, and wmax = 5 m/yr / scyr.
+! The following expression is correct if scaling is removed.
+
+ model%tempwk%wmax = 5.0d0 * tim0 / (scyr * thk0)
+
+ model%tempwk%cons = (/ 2.0d0 * tim0 * model%numerics%dttem * coni / (2.0d0 * rhoi * shci * thk0**2), &
+ model%numerics%dttem / 2.0d0, &
+ VERT_DIFF*2.0d0 * tim0 * model%numerics%dttem / (thk0 * rhoi * shci), &
+ VERT_ADV*tim0 * acc0 * model%numerics%dttem / coni, &
+ 0.d0 /) !WHL - last term no longer needed
+ !*sfp* added last term to vector above for use in HO & SSA dissip. cacl
+
+ model%tempwk%c1 = STRAIN_HEAT *(model%numerics%sigma * rhoi * grav * thk0**2 / len0)**p1 * &
+ 2.0d0 * vis0 * model%numerics%dttem * tim0 / (16.0d0 * rhoi * shci)
+
+ model%tempwk%dupc = (/ (model%numerics%sigma(2) - model%numerics%sigma(1)) / 2.0d0, &
+ ((model%numerics%sigma(up+1) - model%numerics%sigma(up-1)) / 2.0d0, &
+ up=2,model%general%upn-1), (model%numerics%sigma(model%general%upn) - &
+ model%numerics%sigma(model%general%upn-1)) / 2.0d0 /)
+
+ model%tempwk%dupa = (/ 0.0d0, 0.0d0, &
+ ((model%numerics%sigma(up) - model%numerics%sigma(up-1)) / &
+ ((model%numerics%sigma(up-2) - model%numerics%sigma(up-1)) * &
+ (model%numerics%sigma(up-2) - model%numerics%sigma(up))), &
+ up=3,model%general%upn) /)
+
+ model%tempwk%dupb = (/ 0.0d0, 0.0d0, &
+ ((model%numerics%sigma(up) - model%numerics%sigma(up-2)) / &
+ ((model%numerics%sigma(up-1) - model%numerics%sigma(up-2)) * &
+ (model%numerics%sigma(up-1) - model%numerics%sigma(up))), &
+ up=3,model%general%upn) /)
+
+ model%tempwk%f = (/ tim0 * coni / (thk0**2 * lhci * rhoi), &
+ tim0 / (thk0 * lhci * rhoi), &
+ tim0 * thk0 * rhoi * shci / (thk0 * tim0 * model%numerics%dttem * lhci * rhoi), &
+ tim0 * thk0**2 * vel0 * grav * rhoi / (4.0d0 * thk0 * len0 * rhoi * lhci), &
+ 0.d0 /) !WHL - last term no longer needed
+ !*sfp* added the last term in the vect above for HO and SSA dissip. calc.
+
+ ! setting up some factors for sliding contrib to basal heat flux
+ model%tempwk%slide_f = (/ VERT_DIFF * grav * thk0 * model%numerics%dttem/ shci, & ! vert diffusion
+ VERT_ADV * rhoi*grav*acc0*thk0*thk0*model%numerics%dttem/coni /) ! vert advection
+
+
+
+ !==== Initialize ice temperature.============
+
+ ! Five possibilities:
+ ! (1) Set ice temperature to 0 C everywhere in column (TEMP_INIT_ZERO)
+ ! (2) Set ice temperature to surface air temperature everywhere in column (TEMP_INIT_ARTM)
+ ! (3) Set up a linear temperature profile, with T = artm at the surface and T <= Tpmp
+ ! at the bed (TEMP_INIT_LINEAR).
+ ! A parameter (pmpt_offset) controls how far below Tpmp the initial bed temp is set.
+ ! (4) Read ice temperature from an initial input file.
+ ! (5) Read ice temperature from a restart file.
+ !
+ ! If restarting, we always do (5).
+ ! If not restarting and the temperature field is present in the input file, we do (4).
+ ! If (4) or (5), then the temperature field should already have been read from a file,
+ ! and the rest of this subroutine will do nothing.
+ ! Otherwise, the initial temperature is controlled by model%options%temp_init,
+ ! which can be read from the config file.
+ !
+ !TODO - Remove halo parameters below, since uhalo = lhalo = 0 for Glide.
+ !TODO - Make sure cells in the Glide temperature halo are initialized to reasonable values
+ ! (not unphys_val), e.g. if reading temps from input or restart file.
+
+ if (model%options%is_restart == RESTART_TRUE) then
+
+ ! Temperature has already been initialized from a restart file.
+ ! (Temperature is always a restart variable.)
+
+ call write_log('Initializing ice temperature from the restart file')
+
+ elseif ( minval(model%temper%temp(1:model%general%upn, &
+ 1+lhalo:model%general%ewn-lhalo, 1+uhalo:model%general%nsn-uhalo)) > &
+ (-1.0d0 * trpt) ) then ! trpt = 273.15 K
+ ! Default initial temps in glide_types are unphys_val = -999
+
+ ! Temperature has already been initialized from an input file.
+ ! (We know this because the unphysical initial values have been overwritten.)
+
+ call write_log('Initializing ice temperature from an input file')
+
+ else ! not reading temperature from restart or input file, so initialize it here
+
+ ! First set T = 0 C everywhere (including Glide temperature halo: ew = 0, ewn+1, ns = 0, nsn+1).
+
+ model%temper%temp(:,:,:) = 0.0d0
+
+ if (model%options%temp_init == TEMP_INIT_ZERO) then
+
+ ! Nothing else to do; just write to log
+ call write_log('Initializing ice temperature to 0 deg C')
+
+ elseif (model%options%temp_init == TEMP_INIT_ARTM) then
+
+ ! Initialize ice column temperature to surface air temperature
+ ! If artm > 0 C, then set T = 0 C.
+ ! Loop over physical cells where artm is defined (not temperature halo cells).
+
+ !Note: Old glide sets temp = artm everywhere without regard to whether ice exists in a column.
+
+ call write_log('Initializing ice temperature to the surface air temperature')
+
+ do ns = 1, model%general%nsn
+ do ew = 1, model%general%ewn
+
+ call glide_init_temp_column(model%options%temp_init, &
+ model%numerics%sigma(:), &
+ dble(model%climate%artm(ew,ns)), & !TODO - Remove 'dble' (artm is dp)
+ model%geometry%thck(ew,ns), &
+ model%temper%temp(:,ew,ns) )
+ end do
+ end do
+
+ elseif (model%options%temp_init == TEMP_INIT_LINEAR) then
+
+ ! Initialize ice column temperature with a linear profile:
+ ! T = artm at the surface, and T <= Tpmp at the bed.
+ ! Loop over physical cells where artm is defined (not temperature halo cells)
+
+ call write_log('Initializing ice temperature to a linear profile in each column')
+
+ do ns = 1, model%general%nsn
+ do ew = 1, model%general%ewn
+
+ call glide_init_temp_column(model%options%temp_init, &
+ model%numerics%sigma(:), &
+ dble(model%climate%artm(ew,ns)), &
+ model%geometry%thck(ew,ns), &
+ model%temper%temp(:,ew,ns) )
+ end do
+ end do
+
+ endif ! model%options%temp_init
+
+ endif ! restart file, input file, or other options
+
+
+ ! ====== Calculate initial value of flwa ==================
+
+ if (model%options%is_restart == RESTART_FALSE) then
+ call write_log("Calculating initial flwa from temp and thk fields")
+
+ ! Calculate Glen's A --------------------------------------------------------
+
+ call glide_calcflwa(model%numerics%sigma, &
+ model%numerics%thklim, &
+ model%temper%flwa, &
+ model%temper%temp(:,1:model%general%ewn,1:model%general%nsn), &
+ model%geometry%thck, &
+ model%paramets%flow_enhancement_factor, &
+ model%paramets%default_flwa, &
+ model%options%whichflwa)
+ else
+ call write_log("Using flwa values from restart file for flwa initial condition.")
+ endif
+
+ end subroutine glide_init_temp
+
+!****************************************************
+
+ subroutine glide_init_temp_column(temp_init, &
+ sigma, artm, &
+ thck, temp)
+
+ ! Initialize temperatures in a column based on the value of temp_init
+ ! Three possibilities:
+ ! (1) Set ice temperature in column to 0 C (TEMP_INIT_ZERO)
+ ! (2) Set ice temperature in column to surface air temperature (TEMP_INIT_ARTM)
+ ! (3) Set up a linear temperature profile, with T = artm at the surface and T <= Tpmp
+ ! at the bed (TEMP_INIT_LINEAR).
+ ! A local parameter (pmpt_offset) controls how far below Tpmp the initial bed temp is set.
+
+ ! In/out arguments
+
+ integer, intent(in) :: temp_init ! option for temperature initialization
+
+ real(dp), dimension(:), intent(in) :: sigma ! vertical coordinate
+ real(dp), intent(in) :: artm ! surface air temperature (deg C)
+ ! Note: artm should be passed in as double precision
+ real(dp), intent(in) :: thck ! ice thickness
+ real(dp), dimension(:), intent(inout) :: temp ! ice column temperature (deg C)
+
+ ! Local variables and parameters
+
+ real(dp) :: tbed ! initial temperature at bed
+ real(dp) :: pmptb ! pressure melting point temp at the bed
+ real(dp), dimension(size(sigma)) :: pmpt ! pressure melting point temp thru the column
+
+ real(dp), parameter :: pmpt_offset = 2.d0 ! offset of initial Tbed from pressure melting point temperature (deg C)
+ ! Note: pmtp_offset is positive for T < Tpmp
+
+ ! Set the temperature in the column
+
+ select case(temp_init)
+
+ case(TEMP_INIT_ZERO) ! set T = 0 C
+
+ temp(:) = 0.d0
+
+ case(TEMP_INIT_ARTM) ! initialize ice-covered areas to the min of artm and 0 C
+
+ if (thck > 0.0d0) then
+ temp(:) = dmin1(0.0d0, artm)
+ else
+ temp(:) = 0.d0
+ endif
+
+ case(TEMP_INIT_LINEAR)
+
+ ! Tsfc = artm, Tbed = Tpmp - pmpt_offset, linear profile in between
+
+ call calcpmptb (pmptb, thck)
+ tbed = pmptb - pmpt_offset
+
+ temp(:) = artm + (tbed - artm)*sigma(:)
+
+ ! Make sure T <= Tpmp - pmpt_offset throughout column
+
+ call calcpmpt(pmpt(:), thck, sigma(:))
+ temp(:) = min(temp(:), pmpt(:) - pmpt_offset)
+
+ end select
+
+ end subroutine glide_init_temp_column
+
+
+ subroutine glide_temp_driver(model,whichtemp)
+
+ !> Calculates the ice temperature, according to one
+ !> of several alternative methods.
+
+ use glimmer_utils, only: tridiag
+ use glimmer_paramets, only : thk0, tim0, GLC_DEBUG
+ use glide_grid_operators, only: stagvarb
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+
+ type(glide_global_type),intent(inout) :: model ! model instance
+ integer, intent(in) :: whichtemp ! flag to choose method.
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ real(dp),dimension(size(model%numerics%sigma)) :: subd, diag, supd, rhsd
+ real(dp),dimension(size(model%numerics%sigma)) :: prevtemp, iteradvt, diagadvt
+ real(dp) :: tempresid
+ real(dp) :: dTtop, dthck
+
+ integer :: iter
+ integer :: ew,ns
+
+ real(dp),parameter :: tempthres = 0.001d0, floatlim = 10.0d0 / thk0
+ integer, parameter :: mxit = 100
+ integer, parameter :: ewbc = 1, nsbc = 1
+
+ real(dp), dimension(size(model%numerics%sigma)) :: weff
+
+ !------------------------------------------------------------------------------------
+ ! ewbc/nsbc set the type of boundary condition aplied at the end of
+ ! the domain. a value of 0 implies zero gradient.
+ !------------------------------------------------------------------------------------
+
+ select case(whichtemp)
+
+ case(TEMP_SURFACE_AIR_TEMP) ! Set column to surface air temperature ------------------
+
+ do ns = 1,model%general%nsn
+ do ew = 1,model%general%ewn
+ model%temper%temp(:,ew,ns) = dmin1(0.0d0,dble(model%climate%artm(ew,ns)))
+ end do
+ end do
+
+ case(TEMP_PROGNOSTIC) ! Do full temperature solution as in standard Glide-------------
+
+ ! Note: In older versions of Glimmer, the vertical velocity was computed here.
+ ! It is now computed in glide_tstep_p3 to support exact restart.
+
+ model%tempwk%inittemp = 0.0d0
+ model%tempwk%initadvt = 0.0d0
+ !*MH model%temper%dissip = 0.0d0 is also set to zero in finddisp
+
+ ! ----------------------------------------------------------------------------------
+
+ call glide_finddisp(model, &
+ model%geometry%thck, &
+ model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns, &
+ model%temper%flwa)
+
+ ! Loop over all scalar points except outer row
+ ! Outer row of cells is omitted because velo points are not available at boundaries
+
+ ! translate velo field
+ do ns = 2,model%general%nsn-1
+ do ew = 2,model%general%ewn-1
+ model%tempwk%hadv_u(:,ew,ns) = model%tempwk%advconst(1) * ( model%velocity%uvel(:,ew-1,ns-1) &
+ + model%velocity%uvel(:,ew-1,ns) + model%velocity%uvel(:,ew,ns-1) + model%velocity%uvel(:,ew,ns) )
+ model%tempwk%hadv_v(:,ew,ns) = model%tempwk%advconst(2) * ( model%velocity%vvel(:,ew-1,ns-1) &
+ + model%velocity%vvel(:,ew-1,ns) + model%velocity%vvel(:,ew,ns-1) + model%velocity%vvel(:,ew,ns) )
+ end do
+ end do
+
+ call hadvall(model, &
+ model%temper%temp, &
+ model%geometry%thck)
+
+ ! zeroth iteration
+ iter = 0
+ tempresid = 0.0d0
+
+ ! Loop over all scalar points except outer row
+ ! Note: temperature array has dimensions (upn, 0:ewn+1, 0:nsn+1)
+
+ do ns = 2,model%general%nsn-1
+ do ew = 2,model%general%ewn-1
+ if (model%geometry%thck(ew,ns) > model%numerics%thklim) then
+
+ weff = model%velocity%wvel(:,ew,ns) - model%velocity%wgrd(:,ew,ns)
+
+ !TODO - It seems odd to zero out weff when it's big. Why not set to wmax?
+ if (maxval(abs(weff)) > model%tempwk%wmax) then
+ weff = 0.0d0
+ end if
+
+ call hadvpnt(iteradvt, &
+ diagadvt, &
+ model%temper%temp(:,ew-2:ew+2,ns), &
+ model%temper%temp(:,ew,ns-2:ns+2), &
+ model%tempwk%hadv_u(:,ew,ns), &
+ model%tempwk%hadv_v(:,ew,ns))
+
+ call findvtri(model,ew,ns,subd,diag,supd,diagadvt, &
+ weff, &
+ GLIDE_IS_FLOAT(model%geometry%thkmask(ew,ns)))
+
+ call findvtri_init(model,ew,ns,subd,diag,supd,weff,model%temper%temp(:,ew,ns), &
+ model%geometry%thck(ew,ns),GLIDE_IS_FLOAT(model%geometry%thkmask(ew,ns)))
+
+ call findvtri_rhs(model,ew,ns,model%climate%artm(ew,ns),iteradvt,rhsd, &
+ GLIDE_IS_FLOAT(model%geometry%thkmask(ew,ns)))
+
+ prevtemp(:) = model%temper%temp(:,ew,ns)
+
+ call tridiag(subd(1:model%general%upn), &
+ diag(1:model%general%upn), &
+ supd(1:model%general%upn), &
+ model%temper%temp(1:model%general%upn,ew,ns), &
+ rhsd(1:model%general%upn))
+
+ call corrpmpt(model%temper%temp(:,ew,ns), &
+ model%geometry%thck(ew,ns), &
+ model%temper%bwat(ew,ns), &
+ model%numerics%sigma, &
+ model%general%upn)
+
+ tempresid = max(tempresid,maxval(abs(model%temper%temp(:,ew,ns)-prevtemp(:))))
+
+ endif ! thk > thklim
+ end do ! ew
+ end do ! ns
+
+ do while (tempresid > tempthres .and. iter <= mxit)
+
+ tempresid = 0.0d0
+
+ do ns = 2,model%general%nsn-1
+ do ew = 2,model%general%ewn-1
+
+ if(model%geometry%thck(ew,ns) > model%numerics%thklim) then
+
+ weff = model%velocity%wvel(:,ew,ns) - model%velocity%wgrd(:,ew,ns)
+ if (maxval(abs(weff)) > model%tempwk%wmax) then
+ weff = 0.0d0
+ end if
+
+ call hadvpnt(iteradvt, &
+ diagadvt, &
+ model%temper%temp(:,ew-2:ew+2,ns), &
+ model%temper%temp(:,ew,ns-2:ns+2), &
+ model%tempwk%hadv_u(:,ew,ns), &
+ model%tempwk%hadv_v(:,ew,ns))
+
+ call findvtri(model,ew,ns,subd,diag,supd,diagadvt, &
+ weff, &
+ GLIDE_IS_FLOAT(model%geometry%thkmask(ew,ns)))
+
+ call findvtri_rhs(model,ew,ns,model%climate%artm(ew,ns),iteradvt,rhsd, &
+ GLIDE_IS_FLOAT(model%geometry%thkmask(ew,ns)))
+
+ prevtemp(:) = model%temper%temp(:,ew,ns)
+
+ call tridiag(subd(1:model%general%upn), &
+ diag(1:model%general%upn), &
+ supd(1:model%general%upn), &
+ model%temper%temp(1:model%general%upn,ew,ns), &
+ rhsd(1:model%general%upn))
+
+ call corrpmpt(model%temper%temp(:,ew,ns), &
+ model%geometry%thck(ew,ns), &
+ model%temper%bwat(ew,ns), &
+ model%numerics%sigma, &
+ model%general%upn)
+
+ ! Compute conductive flux = (k/H * dT/dsigma) at upper surface; positive down
+ ! This is computed in case it needs to be upscaled and passed back to a GCM.
+
+ dTtop = model%temper%temp(2,ew,ns) - model%temper%temp(1,ew,ns)
+ dthck = model%geometry%thck(ew,ns)*thk0 * (model%numerics%sigma(2) - model%numerics%sigma(1))
+ model%temper%ucondflx(ew,ns) = -coni * dTtop / dthck
+
+ ! Check whether the temperature has converged everywhere
+ tempresid = max(tempresid, maxval(abs(model%temper%temp(:,ew,ns)-prevtemp(:))))
+
+ else ! thck <= thklim
+ ! Still need to set ucondflx, even for thin ice, so that something is
+ ! passed to the coupler. Arbitrarily setting the flux to 0 in this case.
+ model%temper%ucondflx(ew,ns) = 0.0d0
+ endif ! thck > thklim
+ end do ! ew
+ end do ! ns
+
+ iter = iter + 1
+
+ end do ! tempresid > tempthres .and. iter <= mxit
+
+ model%temper%niter = max(model%temper%niter, iter)
+
+ ! Set temperature of thin ice based on model%options%temp_init
+ ! T = 0 for TEMP_INIT_ZERO
+ ! T = artm for TEMP_INIT_ARTM
+ ! Linear vertical profile for TEMP_INIT_LINEAR
+ ! Set T = 0 for ice-free cells
+ !
+ ! NOTE: Calling this subroutine will maintain a sensible temperature profile
+ ! for thin ice, but in general does *not* conserve energy.
+ ! To conserve energy, we need either thklim = 0, or some additional
+ ! energy accounting and correction.
+
+ do ns = 1, model%general%nsn
+ do ew = 1, model%general%ewn
+
+ if (GLIDE_IS_THIN(model%geometry%thkmask(ew,ns))) then
+
+ !TODO - Remove 'oldglide' logic when comparisons are complete
+ if (oldglide) then
+ model%temper%temp(:,ew,ns) = min(0.0d0,dble(model%climate%artm(ew,ns)))
+ else
+ call glide_init_temp_column(model%options%temp_init, &
+ model%numerics%sigma(:), &
+ dble(model%climate%artm(ew,ns)), &
+ model%geometry%thck(ew,ns), &
+ model%temper%temp(:,ew,ns) )
+ endif
+
+ else if (GLIDE_NO_ICE(model%geometry%thkmask(ew,ns))) then
+
+ model%temper%temp(:,ew,ns) = 0.0d0
+
+ end if
+ end do
+ end do
+
+ ! apply periodic ew BC
+ if (model%options%periodic_ew) then
+ model%temper%temp(:,0,:) = model%temper%temp(:,model%general%ewn-2,:)
+ model%temper%temp(:,1,:) = model%temper%temp(:,model%general%ewn-1,:)
+ model%temper%temp(:,model%general%ewn,:) = model%temper%temp(:,2,:)
+ model%temper%temp(:,model%general%ewn+1,:) = model%temper%temp(:,3,:)
+ end if
+
+ ! Calculate basal melt rate --------------------------------------------------
+
+ call glide_calcbmlt(model, &
+ model%temper%temp, &
+ model%geometry%thck, &
+ model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns, &
+ model%velocity%ubas, &
+ model%velocity%vbas, &
+ model%temper%bmlt, &
+ GLIDE_IS_FLOAT(model%geometry%thkmask))
+
+ ! Transform basal temperature and pressure melting point onto velocity grid
+ ! We need stagbpmp for one of the basal traction cases.
+
+ call stagvarb(model%temper%temp(model%general%upn,1:model%general%ewn,1:model%general%nsn), &
+ model%temper%stagbtemp ,&
+ model%general% ewn, &
+ model%general% nsn)
+
+ call glide_calcbpmp(model,model%geometry%thck,model%temper%bpmp)
+
+ call stagvarb(model%temper%bpmp, &
+ model%temper%stagbpmp ,&
+ model%general% ewn, &
+ model%general% nsn)
+
+ case(TEMP_STEADY) ! *sfp* stealing this un-used option ...
+
+ ! DO NOTHING. That is, hold T const. at initially assigned value
+
+ end select ! whichtemp
+
+ ! Rescale dissipation term to deg C/s (instead of deg C)
+ !WHL - Treat dissip above as a rate (deg C/s) instead of deg
+ model%temper%dissip(:,:,:) = model%temper%dissip(:,:,:) / (model%numerics%dttem*tim0)
+
+ ! Calculate Glen's A --------------------------------------------------------
+
+ call glide_calcflwa(model%numerics%sigma, &
+ model%numerics%thklim, &
+ model%temper%flwa, &
+ model%temper%temp(:,1:model%general%ewn,1:model%general%nsn), &
+ model%geometry%thck, &
+ model%paramets%flow_enhancement_factor, &
+ model%paramets%default_flwa, &
+ model%options%whichflwa)
+
+ ! Output some information ----------------------------------------------------
+
+ if (GLC_DEBUG) then
+ print *, "* temp ", model%numerics%time, iter, model%temper%niter, &
+ real(model%temper%temp(model%general%upn,model%general%ewn/2+1,model%general%nsn/2+1))
+ end if
+
+ end subroutine glide_temp_driver
+
+ !-------------------------------------------------------------------------
+
+ subroutine hadvpnt(iteradvt,diagadvt,tempx,tempy,u,v)
+
+ real(dp), dimension(:), intent(in) :: u,v
+ real(dp), dimension(:,:), intent(in) :: tempx, tempy
+ real(dp), dimension(:), intent(out) :: iteradvt, diagadvt
+
+ iteradvt = 0.0d0
+ diagadvt = 0.0d0
+
+ if (u(1) > 0.0d0) then
+ iteradvt = u * (- 4.0d0*tempx(:,2) + tempx(:,1))
+ diagadvt = u * 3.0d0
+ else if (u(1) < 0.0d0) then
+ iteradvt = u * (4.0d0*tempx(:,4) - tempx(:,5))
+ diagadvt = - u * 3.0d0
+ end if
+
+ if (v(1) > 0.0d0) then
+ iteradvt = iteradvt + v * (- 4.0d0*tempy(:,2) + tempy(:,1))
+ diagadvt = diagadvt + v * 3.0d0
+ else if (v(1) < 0.0d0) then
+ iteradvt = iteradvt + v * (4.0d0*tempy(:,4) - tempy(:,5))
+ diagadvt = diagadvt - v * 3.0d0
+ end if
+
+ end subroutine hadvpnt
+
+ !-------------------------------------------------------------------------
+
+ subroutine fohadvpnt(tempwk,iteradvt,diagadvt,tempx,tempy,uvel,vvel)
+
+ use glimmer_utils, only: hsum
+
+ type(glide_tempwk) :: tempwk
+ real(dp), dimension(:,:,:), intent(in) :: uvel, vvel
+ real(dp), dimension(:,:), intent(in) :: tempx, tempy
+ real(dp), dimension(:), intent(out) :: iteradvt, diagadvt
+
+ real(dp), dimension(size(iteradvt)) :: u, v
+
+ iteradvt = 0.0d0
+ diagadvt = 0.0d0
+
+ u = tempwk%advconst(1) * hsum(uvel(:,:,:))
+ v = tempwk%advconst(2) * hsum(vvel(:,:,:))
+
+ if (u(1) > 0.0d0) then
+ iteradvt = - u * 2.0d0 * tempx(:,1)
+ diagadvt = 2.0d0 * u
+ else if (u(1) < 0.0d0) then
+ iteradvt = u * 2.0d0 * tempx(:,3)
+ diagadvt = - 2.0d0 * u
+ end if
+
+ if (v(1) > 0.0d0) then
+ iteradvt = iteradvt - v * 2.0d0 * tempy(:,1)
+ diagadvt = diagadvt + 2.0d0 * v
+ else if (v(1) < 0.0d0) then
+ iteradvt = iteradvt + v * 2.0d0 * tempy(:,3)
+ diagadvt = diagadvt - 2.0d0 * v
+ end if
+
+ end subroutine fohadvpnt
+
+ !-------------------------------------------------------------------------
+
+ subroutine hadvall(model,temp,thck)
+
+ type(glide_global_type) :: model
+ real(dp), dimension(:,0:,0:), intent(in) :: temp
+ real(dp), dimension(:,:), intent(in) :: thck
+
+ real(dp), dimension(size(temp,dim=1)) :: diagadvt
+
+ integer :: ew,ns
+
+ model%tempwk%initadvt = 0.0d0
+
+ do ns = 2,model%general%nsn-1
+ do ew = 2,model%general%ewn-1
+ if (thck(ew,ns) > model%numerics%thklim) then
+
+ call hadvpnt(model%tempwk%initadvt(:,ew,ns), &
+ diagadvt, &
+ temp(:,ew-2:ew+2,ns), &
+ temp(:,ew,ns-2:ns+2), &
+ model%tempwk%hadv_u(:,ew,ns), &
+ model%tempwk%hadv_v(:,ew,ns))
+ end if
+ end do
+ end do
+
+ end subroutine hadvall
+
+ !-------------------------------------------------------------------------
+
+ subroutine findvtri(model,ew,ns,subd,diag,supd,diagadvt,weff,float)
+
+ type(glide_global_type) :: model
+ integer, intent(in) :: ew, ns
+ real(dp), dimension(:), intent(in) :: weff, diagadvt
+ real(dp), dimension(:), intent(out) :: subd, diag, supd
+ logical, intent(in) :: float
+
+ real(dp) :: fact(2)
+
+! These constants are precomputed:
+! model%tempwk%cons(1) = 2.0d0 * tim0 * model%numerics%dttem * coni / (2.0d0 * rhoi * shci * thk0**2)
+! model%tempwk%cons(2) = model%numerics%dttem / 2.0d0
+
+ fact(1) = VERT_DIFF*model%tempwk%cons(1) / model%geometry%thck(ew,ns)**2
+ fact(2) = VERT_ADV*model%tempwk%cons(2) / model%geometry%thck(ew,ns)
+
+ subd(2:model%general%upn-1) = fact(2) * weff(2:model%general%upn-1) * &
+ model%tempwk%dups(2:model%general%upn-1,3)
+
+ supd(2:model%general%upn-1) = - subd(2:model%general%upn-1) - fact(1) * &
+ model%tempwk%dups(2:model%general%upn-1,2)
+
+ subd(2:model%general%upn-1) = subd(2:model%general%upn-1) - fact(1) * &
+ model%tempwk%dups(2:model%general%upn-1,1)
+
+ diag(2:model%general%upn-1) = 1.0d0 - subd(2:model%general%upn-1) &
+ - supd(2:model%general%upn-1) &
+ + diagadvt(2:model%general%upn-1)
+
+ supd(1) = 0.0d0
+ subd(1) = 0.0d0
+ diag(1) = 1.0d0
+
+ ! now do the basal boundary
+ ! for grounded ice, a heat flux is applied
+ ! for floating ice, temperature held constant
+
+ if (float) then
+
+ supd(model%general%upn) = 0.0d0
+ subd(model%general%upn) = 0.0d0
+ diag(model%general%upn) = 1.0d0
+
+ else
+
+ supd(model%general%upn) = 0.0d0
+ subd(model%general%upn) = -0.5*fact(1)/(model%tempwk%dupn**2)
+ diag(model%general%upn) = 1.0d0 - subd(model%general%upn) + diagadvt(model%general%upn)
+
+ end if
+
+ end subroutine findvtri
+
+ !-------------------------------------------------------------------------
+
+ subroutine findvtri_init(model,ew,ns,subd,diag,supd,weff,temp,thck,float)
+ !> called during first iteration to set inittemp
+
+ use glimmer_paramets, only: vel0, vel_scale
+
+ type(glide_global_type) :: model
+ integer, intent(in) :: ew, ns
+ real(dp), dimension(:), intent(in) :: temp,diag,subd,supd,weff
+ real(dp), intent(in) :: thck
+ logical, intent(in) :: float
+
+ ! local variables
+ real(dp) :: slterm
+ integer ewp,nsp
+ integer slide_count
+
+ model%tempwk%inittemp(2:model%general%upn-1,ew,ns) = temp(2:model%general%upn-1) * &
+ (2.0d0 - diag(2:model%general%upn-1)) &
+ - temp(1:model%general%upn-2) * subd(2:model%general%upn-1) &
+ - temp(3:model%general%upn) * supd(2:model%general%upn-1) &
+ - model%tempwk%initadvt(2:model%general%upn-1,ew,ns) &
+ + model%temper%dissip(2:model%general%upn-1,ew,ns)
+
+ if (float) then
+ model%tempwk%inittemp(model%general%upn,ew,ns) = temp(model%general%upn)
+ !EIB old!model%tempwk%inittemp(model%general%upn,ew,ns) = pmpt(thck)
+ else
+ ! sliding contribution to basal heat flux
+ slterm = 0.
+ slide_count = 0
+
+ !whl - BUG! - The following expression for taub*ubas is valid only for the SIA
+ ! Need a different expression for HO dynamics
+
+ ! only include sliding contrib if temperature node is surrounded by sliding velo nodes
+ do nsp = ns-1,ns
+ do ewp = ew-1,ew
+
+!SCALING - WHL: Multiply ubas by vel0/vel_scale so we get the same result in these two cases:
+! (1) Old Glimmer with scaling: vel0 = vel_scale = 500/scyr, and ubas is non-dimensional.
+! (2) New CISM without scaling: vel0 = 1/scyr, vel_scale = 500/scyr, and ubas is in m/yr.
+
+!!! if ( abs(model%velocity%ubas(ewp,nsp)) > 0.000001 .or. &
+!!! abs(model%velocity%vbas(ewp,nsp)) > 0.000001 ) then
+ if ( abs(model%velocity%ubas(ewp,nsp))*(vel0/vel_scale) > 1.d-6 .or. &
+ abs(model%velocity%vbas(ewp,nsp))*(vel0/vel_scale) > 1.d-6 ) then
+
+ slide_count = slide_count + 1
+ slterm = slterm + (&
+ model%geomderv%dusrfdew(ewp,nsp) * model%velocity%ubas(ewp,nsp) + &
+ model%geomderv%dusrfdns(ewp,nsp) * model%velocity%vbas(ewp,nsp))
+ end if
+ end do
+ end do
+ if (slide_count >= 4) then
+ slterm = 0.25*slterm
+ else
+ slterm = 0.
+ end if
+ model%tempwk%inittemp(model%general%upn,ew,ns) = temp(model%general%upn) * &
+ (2.0d0 - diag(model%general%upn)) &
+ - temp(model%general%upn-1) * subd(model%general%upn) &
+ - 0.5*model%tempwk%cons(3) * model%temper%bheatflx(ew,ns) / (thck * model%tempwk%dupn) & ! geothermal heat flux (diff)
+ - model%tempwk%slide_f(1)*slterm/ model%tempwk%dupn & ! sliding heat flux (diff)
+ - model%tempwk%cons(4) * model%temper%bheatflx(ew,ns) * weff(model%general%upn) & ! geothermal heat flux (adv)
+ - model%tempwk%slide_f(2)*thck*slterm* weff(model%general%upn) & ! sliding heat flux (adv)
+ - model%tempwk%initadvt(model%general%upn,ew,ns) &
+ + model%temper%dissip(model%general%upn,ew,ns)
+ end if
+
+ end subroutine findvtri_init
+
+ !-----------------------------------------------------------------------
+
+ subroutine findvtri_rhs(model,ew,ns,artm,iteradvt,rhsd,float)
+
+ !> RHS of temperature tri-diag system
+
+ type(glide_global_type) :: model
+ integer, intent(in) :: ew, ns
+ real(dp), intent(in) :: artm
+ real(dp), dimension(:), intent(in) :: iteradvt
+ real(dp), dimension(:), intent(out) :: rhsd
+ logical, intent(in) :: float
+
+ ! upper boundary condition
+ rhsd(1) = artm
+ if (float) then
+ rhsd(model%general%upn) = model%tempwk%inittemp(model%general%upn,ew,ns)
+ else
+ rhsd(model%general%upn) = model%tempwk%inittemp(model%general%upn,ew,ns) - iteradvt(model%general%upn)
+ end if
+ rhsd(2:model%general%upn-1) = model%tempwk%inittemp(2:model%general%upn-1,ew,ns) - iteradvt(2:model%general%upn-1)
+
+ end subroutine findvtri_rhs
+
+!-------------------------------------------------------------------
+
+ subroutine glide_calcbmlt(model, temp, &
+ thck, stagthck, &
+ dusrfdew, dusrfdns, &
+ ubas, vbas, &
+ bmlt, floater)
+
+ type(glide_global_type) :: model
+ real(dp), dimension(:,0:,0:), intent(in) :: temp
+ real(dp), dimension(:,:), intent(in) :: thck, stagthck, dusrfdew, dusrfdns, ubas, vbas
+ real(dp), dimension(:,:), intent(inout) :: bmlt ! scaled basal melting, m/s * tim0/thk0
+ ! > 0 for melting, < 0 for freeze-on
+ logical, dimension(:,:), intent(in) :: floater
+
+ real(dp), dimension(size(model%numerics%sigma)) :: pmptemp
+ real(dp) :: slterm, newmlt
+
+ integer :: ewp, nsp, up, ew, ns
+
+ !LOOP: all scalar points except outer row
+
+ do ns = 2, model%general%nsn-1
+ do ew = 2, model%general%ewn-1
+ if (thck(ew,ns) > model%numerics%thklim .and. .not. floater(ew,ns)) then
+
+ call calcpmpt(pmptemp,thck(ew,ns),model%numerics%sigma)
+
+ if (abs(temp(model%general%upn,ew,ns)-pmptemp(model%general%upn)) < 0.001) then
+
+ slterm = 0.0d0
+
+ ! 0-order SIA approx. --> Tau_d = Tau_b
+
+ do nsp = ns-1,ns
+ do ewp = ew-1,ew
+ slterm = slterm - stagthck(ewp,nsp) * &
+ (dusrfdew(ewp,nsp) * ubas(ewp,nsp) + dusrfdns(ewp,nsp) * vbas(ewp,nsp))
+ end do
+ end do
+
+ !*sfp* NOTE that multiplication by this term has been moved up from below
+ slterm = model%tempwk%f(4) * slterm
+
+ bmlt(ew,ns) = 0.0d0
+
+ !*sfp* changed this so that 'slterm' is multiplied by f(4) const. above ONLY for the 0-order SIA case,
+ ! since for the HO and SSA cases a diff. const. needs to be used
+
+ ! OLD version
+! newmlt = model%tempwk%f(4) * slterm - model%tempwk%f(2)*model%temper%bheatflx(ew,ns) + model%tempwk%f(3) * &
+! model%tempwk%dupc(model%general%upn) * &
+! thck(ew,ns) * model%temper%dissip(model%general%upn,ew,ns)
+
+ ! NEW version (sfp)
+ newmlt = slterm - model%tempwk%f(2)*model%temper%bheatflx(ew,ns) &
+ + model%tempwk%f(3) * model%tempwk%dupc(model%general%upn) * &
+ thck(ew,ns) * model%temper%dissip(model%general%upn,ew,ns)
+
+ up = model%general%upn - 1
+
+ do while (abs(temp(up,ew,ns)-pmptemp(up)) < 1.d-3 .and. up >= 3)
+ bmlt(ew,ns) = bmlt(ew,ns) + newmlt
+ newmlt = model%tempwk%f(3) * model%tempwk%dupc(up) * thck(ew,ns) * model%temper%dissip(up,ew,ns)
+ up = up - 1
+ end do
+
+ up = up + 1
+
+ if (up == model%general%upn) then
+ bmlt(ew,ns) = newmlt - &
+ model%tempwk%f(1) * ( (temp(up-2,ew,ns) - pmptemp(up-2)) * model%tempwk%dupa(up) &
+ + (temp(up-1,ew,ns) - pmptemp(up-1)) * model%tempwk%dupb(up) ) / thck(ew,ns)
+ else
+ bmlt(ew,ns) = bmlt(ew,ns) + max(0.d0, newmlt - &
+ model%tempwk%f(1) * ( (temp(up-2,ew,ns) - pmptemp(up-2)) * model%tempwk%dupa(up) &
+ + (temp(up-1,ew,ns) - pmptemp(up-1)) * model%tempwk%dupb(up) ) / thck(ew,ns))
+ end if
+
+ else
+
+ bmlt(ew,ns) = 0.d0
+
+ end if
+
+ !EIB! else if (model%options%use_plume == 1) then
+
+ ! do nothing because the plume model will have written the bmlt field
+ else
+
+ bmlt(ew,ns) = 0.d0
+
+ end if
+ end do
+ end do
+
+ ! apply periodic BC
+
+ if (model%options%periodic_ew) then
+ do ns = 2,model%general%nsn-1
+ bmlt(1,ns) = bmlt(model%general%ewn-1,ns)
+ bmlt(model%general%ewn,ns) = bmlt(2,ns)
+ end do
+ end if
+
+ end subroutine glide_calcbmlt
+
+!-------------------------------------------------------------------
+
+ subroutine glide_finddisp(model, &
+ thck , stagthck, &
+ dusrfdew, dusrfdns, &
+ flwa)
+
+ ! Compute the dissipation source term associated with strain heating.
+ ! Note also that dissip and flwa must have the same vertical dimension
+ ! (1:upn on an unstaggered vertical grid, or 1:upn-1 on a staggered vertical grid).
+
+ use glimmer_physcon, only : gn
+
+ type(glide_global_type) :: model
+ real(dp), dimension(:,:), intent(in) :: thck, stagthck, dusrfdew, dusrfdns
+ real(dp), dimension(:,:,:), intent(in) :: flwa
+
+ integer, parameter :: p1 = gn + 1
+ integer :: ew, ns
+
+ real(dp) :: c2
+
+ !WHL - Previously, this subroutine computed dissipation using either an SIA
+ ! or 1st-order expression, based on the value of which_disp.
+ ! Now only the SIA expression is used for Glide.
+ ! (Glissade can use either one, depending on which_ho_disp.)
+
+ !*sfp* 0-order SIA case only
+ ! two methods of doing this.
+ ! 1. find dissipation at u-pts and then average
+ ! 2. find dissipation at H-pts by averaging quantities from u-pts
+ ! 2. works best for eismint divide (symmetry) but 1 likely to be better for full expts
+
+ model%temper%dissip(:,:,:) = 0.0d0
+
+ do ns = 2, model%general%nsn-1
+ do ew = 2, model%general%ewn-1
+ if (thck(ew,ns) > model%numerics%thklim) then
+
+ c2 = (0.25*sum(stagthck(ew-1:ew,ns-1:ns)) * dsqrt((0.25*sum(dusrfdew(ew-1:ew,ns-1:ns)))**2 &
+ + (0.25*sum(dusrfdns(ew-1:ew,ns-1:ns)))**2))**p1
+
+ model%temper%dissip(:,ew,ns) = c2 * model%tempwk%c1(:) * ( &
+ flwa(:,ew-1,ns-1) + flwa(:,ew-1,ns+1) + flwa(:,ew+1,ns+1) + flwa(:,ew+1,ns-1) + &
+ 2*(flwa(:,ew-1,ns)+flwa(:,ew+1,ns)+flwa(:,ew,ns-1)+flwa(:,ew,ns+1)) + &
+ 4*flwa(:,ew,ns))
+
+ end if
+ end do
+ end do
+
+ end subroutine glide_finddisp
+
+!-----------------------------------------------------------------------------------
+
+ subroutine corrpmpt(temp,thck,bwat,sigma,upn)
+
+ real(dp), dimension(:), intent(inout) :: temp
+ real(dp), intent(in) :: thck, bwat
+ integer,intent(in) :: upn
+ real(dp),dimension(:),intent(in) :: sigma
+
+ real(dp), dimension(:) :: pmptemp(size(temp))
+
+ ! corrects a temperature column for melting point effects
+ ! 1. if temperature at any point in column is above pressure melting point then
+ ! set temperature to pressure melting point
+ ! 2. if bed is wet set basal temperature to pressure melting point
+
+ call calcpmpt(pmptemp,thck,sigma)
+
+ temp = dmin1(temp,pmptemp)
+
+ if (bwat > 0.0d0) temp(upn) = pmptemp(upn)
+
+ end subroutine corrpmpt
+
+!-------------------------------------------------------------------
+
+ subroutine calcpmpt(pmptemp,thck,sigma)
+
+ use glimmer_physcon, only : rhoi, grav, pmlt
+ use glimmer_paramets, only : thk0
+
+ real(dp), dimension(:), intent(out) :: pmptemp
+ real(dp), intent(in) :: thck
+ real(dp),intent(in),dimension(:) :: sigma
+
+ real(dp), parameter :: fact = - grav * rhoi * pmlt * thk0
+
+ pmptemp(:) = fact * thck * sigma(:)
+
+ end subroutine calcpmpt
+
+ !-----------------------------------------------------------------------
+
+ subroutine glide_calcbpmp(model,thck,bpmp)
+
+ ! Calculate the pressure melting point at the base of the ice sheet
+
+ type(glide_global_type) :: model
+ real(dp), dimension(:,:), intent(in) :: thck
+ real(dp), dimension(:,:), intent(out) :: bpmp
+
+ integer :: ew,ns
+
+ bpmp = 0.d0
+
+ do ns = 2, model%general%nsn-1
+ do ew = 2, model%general%ewn-1
+ call calcpmptb(bpmp(ew,ns),thck(ew,ns))
+ end do
+ end do
+
+ end subroutine glide_calcbpmp
+
+!-------------------------------------------------------------------
+
+ subroutine calcpmptb(pmptemp,thck)
+
+ use glimmer_physcon, only : rhoi, grav, pmlt
+ use glimmer_paramets, only : thk0
+
+ real(dp), intent(out) :: pmptemp
+ real(dp), intent(in) :: thck
+
+ real(dp), parameter :: fact = - grav * rhoi * pmlt * thk0
+
+ pmptemp = fact * thck
+
+ end subroutine calcpmptb
+
+!-------------------------------------------------------------------
+
+ subroutine glide_calcflwa(sigma, thklim, flwa, temp, thck, flow_enhancement_factor, default_flwa_arg, flag)
+
+ !> Calculates Glen's $A$ over the three-dimensional domain,
+ !> using one of three possible methods.
+
+ use glimmer_physcon
+ use glimmer_paramets, only : thk0, vis0
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+
+ ! Note: The temperature array is assumed to start with horizontal index 1 (not 0).
+ ! We are not updating flwa in the glide temperature halo.
+
+ ! The flwa, temp, and sigma arrays should have the same vertical dimension, 1:upn.
+ ! These quantities are defined at layer interfaces (not layer midpoints as in the
+ ! glam/glissade dycore).
+
+ real(dp),dimension(:), intent(in) :: sigma !> Vertical coordinate
+ real(dp), intent(in) :: thklim !> thickness threshold
+ real(dp),dimension(:,:,:), intent(out) :: flwa !> The calculated values of $A$
+ real(dp),dimension(:,:,:), intent(in) :: temp !> The 3D temperature field
+ real(dp),dimension(:,:), intent(in) :: thck !> The ice thickness
+ real(dp) :: flow_enhancement_factor !> flow enhancement factor in arrhenius relationship
+ real(dp), intent(in) :: default_flwa_arg !> Glen's A to use in isothermal case
+ integer, intent(in) :: flag !> Flag to select the method
+ !> of calculation:
+ !> \begin{description}
+ !> \item[0] {\em Paterson and Budd} relationship.
+ !> \item[1] {\em Paterson and Budd} relationship, with temperature set to
+ !> -5$^{\circ}$C.
+ !> \item[2] Set constant, {\em but not sure how this works at the moment\ldots}
+ !> \end{description}
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ real(dp), parameter :: fact = grav * rhoi * pmlt * thk0
+ real(dp), parameter :: contemp = -5.0d0
+ real(dp) :: default_flwa
+ real(dp),dimension(4) :: arrfact
+ real(dp), dimension(size(flwa,1)) :: tempcor
+
+ integer :: ew, ns, up, ewn, nsn, upn
+
+ !------------------------------------------------------------------------------------
+
+! Some notes:
+! vis0 = 1.39e-032 Pa-3 s-1 for glam dycore (and here for glide)
+! = tau0**(-gn) * (vel0/len0) where tau0 = rhoi*grav*thk0
+! vis0*scyr = 4.39e-025 Pa-2 yr-1
+! For glam: default_flwa_arg = 1.0d-16 Pa-3 yr-1 by default
+! Result is default_flwa = 227657117 (unitless) if flow factor = 1
+! This is the value given to thin ice.
+!
+! In old glide, default_flwa is just set to the flow factor (called 'fiddle')
+! vis0 = 3.17E-024 Pa-3 s-1 for old glide dycore = 1d-16 Pa-3 yr-1 / scyr
+!
+
+ default_flwa = flow_enhancement_factor * default_flwa_arg / (vis0*scyr)
+
+ !write(*,*)"Default flwa = ",default_flwa
+
+ upn=size(flwa,1) ; ewn=size(flwa,2) ; nsn=size(flwa,3)
+
+ arrfact = (/ flow_enhancement_factor * arrmlh / vis0, & ! Value of a when T* is above -263K
+ flow_enhancement_factor * arrmll / vis0, & ! Value of a when T* is below -263K
+ -actenh / gascon, & ! Value of -Q/R when T* is above -263K
+ -actenl / gascon/) ! Value of -Q/R when T* is below -263K
+
+ select case(flag)
+ case(FLWA_PATERSON_BUDD)
+
+ ! This is the Paterson and Budd relationship
+
+ do ns = 1,nsn
+ do ew = 1,ewn
+ if (thck(ew,ns) > thklim) then
+
+ ! Calculate the corrected temperature
+
+ do up = 1, upn
+ tempcor(up) = min(0.0d0, temp(up,ew,ns) + thck(ew,ns) * fact * sigma(up))
+ tempcor(up) = max(-50.0d0, tempcor(up))
+ enddo
+
+ ! Calculate Glen's A
+
+ call patebudd(tempcor(:), flwa(:,ew,ns), arrfact)
+
+ else
+ flwa(:,ew,ns) = default_flwa
+ end if
+
+ end do
+ end do
+
+ case(FLWA_PATERSON_BUDD_CONST_TEMP)
+
+ ! This is the Paterson and Budd relationship, but with the temperature held constant
+ ! at -5 deg C
+
+ do ns = 1,nsn
+ do ew = 1,ewn
+ if (thck(ew,ns) > thklim) then
+
+ ! Calculate Glen's A with a fixed temperature.
+
+ call patebudd((/(contemp, up=1,upn)/),flwa(:,ew,ns),arrfact)
+
+ else
+ flwa(:,ew,ns) = default_flwa
+ end if
+ end do
+ end do
+
+ case(FLWA_CONST_FLWA)
+
+ flwa(:,:,:) = default_flwa
+
+ end select
+
+ end subroutine glide_calcflwa
+
+!------------------------------------------------------------------------------------------
+
+ subroutine patebudd(tempcor,calcga,arrfact)
+
+ !> Calculates the value of Glen's $A$ for the temperature values in a one-dimensional
+ !> array. The input array is usually a vertical temperature profile. The equation used
+ !> is from \emph{Paterson and Budd} [1982]:
+ !> \[
+ !> A(T^{*})=a \exp \left(\frac{-Q}{RT^{*}}\right)
+ !> \]
+ !> This is equation 9 in {\em Payne and Dongelmans}. $a$ is a constant of proportionality,
+ !> $Q$ is the activation energy for for ice creep, and $R$ is the universal gas constant.
+ !> The pressure-corrected temperature, $T^{*}$ is given by:
+ !> \[
+ !> T^{*}=T-T_{\mathrm{pmp}}+T_0
+ !> \]
+ !> \[
+ !> T_{\mathrm{pmp}}=T_0-\sigma \rho g H \Phi
+ !> \]
+ !> $T$ is the ice temperature, $T_{\mathrm{pmp}}$ is the pressure melting point
+ !> temperature, $T_0$ is the triple point of water, $\rho$ is the ice density, and
+ !> $\Phi$ is the (constant) rate of change of melting point temperature with pressure.
+
+ use glimmer_physcon, only : trpt
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+
+ real(dp),dimension(:), intent(in) :: tempcor !> Input temperature profile. This is
+ !> {\em not} $T^{*}$, as it has $T_0$
+ !> added to it later on; rather it is
+ !> $T-T_{\mathrm{pmp}}$.
+ real(dp),dimension(:), intent(out) :: calcga !> The output values of Glen's $A$.
+ real(dp),dimension(4), intent(in) :: arrfact !> Constants for the calculation. These
+ !> are set when the velo module is initialised
+
+ !------------------------------------------------------------------------------------
+
+! arrfact = (/ flow_enhancement_factor * arrmlh / vis0, & ! Value of a when T* is above -263K
+! flow_enhancement_factor * arrmll / vis0, & ! Value of a when T* is below -263K
+! -actenh / gascon, & ! Value of -Q/R when T* is above -263K
+! -actenl / gascon/) ! Value of -Q/R when T* is below -263K
+!
+! where arrmlh = 1.733d3 Pa-3 s-1
+! arrmll = 3.613d-13 Pa-3 s-1
+! and vis0 has units Pa-3 s-1
+! The result calcga is a scaled flwa, multiplied by flow_enhancement_factor
+
+ ! Actual calculation is done here - constants depend on temperature -----------------
+
+ where (tempcor >= -10.0d0)
+ calcga = arrfact(1) * exp(arrfact(3) / (tempcor + trpt))
+ elsewhere
+ calcga = arrfact(2) * exp(arrfact(4) / (tempcor + trpt))
+ end where
+
+ end subroutine patebudd
+
+!-------------------------------------------------------------------
+
+end module glide_temp
+
+!-------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglide/glide_thck.F90 b/components/cism/glimmer-cism/libglide/glide_thck.F90
new file mode 100644
index 0000000000..a8f47ecb08
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_thck.F90
@@ -0,0 +1,1137 @@
+! WJS (1-30-12): The following (turning optimization off) is included as a workaround for
+! LONG (infinite???) compile times with xlf, at least in IBM XL Fortran for AIX, V12.1 on bluefire
+#ifdef CPRIBM
+@PROCESS OPT(0)
+#endif
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_thck.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_nan.inc"
+
+module glide_thck
+
+ use glimmer_global, only : dp
+ use glide_types
+ use glimmer_sparse
+ use glimmer_sparse_type
+
+ !DEBUG only
+!! use xls
+
+ !TODO - Remove oldglide when code comparisons are complete
+ use glimmer_paramets, only: oldglide
+
+ implicit none
+
+ private
+
+ public :: init_thck, thck_nonlin_evolve, thck_lin_evolve, stagleapthck, &
+ glide_thck_index, glide_calclsrf
+
+ ! debugging Picard iteration
+ integer, private, parameter :: picard_unit=101
+ real(dp),private, parameter :: picard_interval=500.d0
+ integer, private :: picard_max=0
+
+contains
+
+ subroutine init_thck(model)
+
+ !> initialise work data for ice thickness evolution
+ use glimmer_log
+ use glimmer_paramets, only: GLC_DEBUG
+ implicit none
+ type(glide_global_type) :: model
+
+ ! Removed this messy array
+!! model%solver_data%fc2 = (/ model%numerics%alpha * model%numerics%dt / (2.0d0 * model%numerics%dew * model%numerics%dew), &
+!! model%numerics%dt, &
+!! (1.0d0 - model%numerics%alpha) / model%numerics%alpha, &
+!! 1.0d0 / model%numerics%alpha, &
+!! model%numerics%alpha * model%numerics%dt / (2.0d0 * model%numerics%dns * model%numerics%dns), &
+!! 0.0d0 /)
+
+ ! WJS: The following code has syntax errors; simply commenting it out for now
+ ! if (GLC_DEBUG) then
+ ! call write_log('Logging Picard iterations')
+ ! if (main_task) then
+ ! open(picard_unit,name='picard_info.data',status='unknown')
+ ! write(picard_unit,*) '#time max_iter'
+ ! end if
+ ! end if
+
+!TODO - Make sure the arrays allocated here are deallocated at the end of the run.
+! Might want to move allocation/deallocation to subroutines in glide_types.
+
+ ! allocate memory for ADI scheme
+
+ if (model%options%whichevol == EVOL_ADI) then
+ allocate(model%thckwk%alpha(max(model%general%ewn, model%general%nsn)))
+ allocate(model%thckwk%beta (max(model%general%ewn, model%general%nsn)))
+ allocate(model%thckwk%gamma(max(model%general%ewn, model%general%nsn)))
+ allocate(model%thckwk%delta(max(model%general%ewn, model%general%nsn)))
+ end if
+
+ end subroutine init_thck
+
+!---------------------------------------------------------------------------------
+
+ subroutine thck_lin_evolve(model,newtemps)
+
+ !> this subroutine solves the linearised ice thickness equation by computing the
+ !> diffusivity from quantities of the previous time step
+
+ use glide_velo
+ use glimmer_paramets, only: GLC_DEBUG
+ use glide_grid_operators, only: glide_geometry_derivs
+
+ implicit none
+
+ ! subroutine arguments
+ type(glide_global_type) :: model
+ logical, intent(in) :: newtemps !> true when we should recalculate Glen's A
+
+ if (model%geometry%empty) then
+
+ model%geometry%thck = dmax1(0.0d0, model%geometry%thck + model%climate%acab * model%numerics%dt)
+ if (GLC_DEBUG) then
+ print *, "* thck empty - net accumulation added", model%numerics%time
+ end if
+ else
+
+ !Note: glide_geometry_derivs is called at the beginning of glide_tstep_p1,
+ ! and the geometry has not changed, so stagthck and the geometry
+ ! derivatives are still up to date. A call might be needed here
+ ! if glide_tstep_p2 were called out of order.
+
+!! call glide_geometry_derivs(model)
+
+ ! calculate basal velos
+ if (newtemps) then
+
+ call slipvelo(model, &
+ 1, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+
+ ! calculate Glen's A if necessary
+ call velo_integrate_flwa(model%velowk, &
+ model%geomderv%stagthck, &
+ model%temper%flwa)
+
+ end if
+
+ call slipvelo(model, &
+ 2, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+
+ ! calculate diffusivity
+
+ call velo_calc_diffu(model%velowk, model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%velocity%diffu)
+
+ ! get new thicknesses
+
+ call thck_evolve(model, &
+ model%velocity%diffu, model%velocity%diffu, &
+ .true., &
+ model%geometry%thck, model%geometry%thck)
+
+!--- MJH: Since the linear evolution uses a diffusivity based on the old geometry, the
+! velocity calculated here will also be based on the old geometry. If it is
+! desired to calculate a velocity for the new evolved geometry, then the diffusivity
+! and other things need to be updated before calculating velocity (commented out with !* ).
+! If using this block starting with !* , delete the call to slipvelo with option 3 below.
+!* ! Update geometry information for new thickness before calculating velocity
+!* model%geometry%usrf = model%geometry%lsrf + model%geometry%thck ! usrf needed for slope calculations in geometry_derivs
+!* call geometry_derivs(model) !this updates stagthck and the slopes
+!* call velo_calc_diffu(model%velowk,model%geomderv%stagthck,model%geomderv%dusrfdew, &
+!* model%geomderv%dusrfdns,model%velocity%diffu)
+!* call slipvelo(model, &
+!* 0, &
+!* model%velocity% btrc, &
+!* model%velocity% ubas, &
+!* model%velocity% vbas)
+!----
+
+ ! calculate horizontal velocity field
+ ! (These calls must appear after thck_evolve, as thck_evolve uses ubas,
+ ! which slipvelo mutates)
+
+ call slipvelo(model, &
+ 3, &
+ model%velocity%btrc, &
+ model%velocity%ubas, &
+ model%velocity%vbas)
+
+ call velo_calc_velo(model%velowk, model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%temper%flwa, model%velocity%diffu, &
+ model%velocity%ubas, model%velocity%vbas, &
+ model%velocity%uvel, model%velocity%vvel, &
+ model%velocity%uflx, model%velocity%vflx,&
+ model%velocity%velnorm)
+
+ end if
+
+ end subroutine thck_lin_evolve
+
+!---------------------------------------------------------------------------------
+
+ subroutine thck_nonlin_evolve(model,newtemps)
+
+ !> this subroutine solves the ice thickness equation by doing an outer,
+ !> non-linear iteration to update the diffusivities and in inner, linear
+ !> iteration to calculate the new ice thickness distrib
+
+ use glide_velo
+ use glide_setup
+ use glide_nonlin !For unstable manifold correction
+ use glimmer_paramets, only: thk0, thk_scale, GLC_DEBUG
+ use glide_grid_operators, only: glide_geometry_derivs
+
+ !EIB! use glide_deriv, only : df_field_2d_staggered
+ implicit none
+ ! subroutine arguments
+ type(glide_global_type) :: model
+ logical, intent(in) :: newtemps !> true when we should recalculate Glen's A
+
+ ! local variables
+ integer, parameter :: pmax=50 !> maximum Picard iterations
+
+ real(dp), parameter :: tol=1.0d-6
+ real(dp) :: residual
+ integer p
+ logical first_p
+
+#ifdef USE_UNSTABLE_MANIFOLD
+ ! local variables used by unstable manifold correction
+ real(dp), dimension(model%general%ewn*model%general%nsn) :: umc_new_vec
+ real(dp), dimension(model%general%ewn*model%general%nsn) :: umc_old_vec
+ real(dp), dimension(model%general%ewn*model%general%nsn) :: umc_correction_vec
+ logical :: umc_continue_iteration
+ integer :: linearize_start
+
+ umc_correction_vec = 0
+ umc_new_vec = 0
+ umc_old_vec = 0
+#endif
+
+ if (model%geometry%empty) then
+
+ model%geometry%thck = dmax1(0.0d0, model%geometry%thck + model%climate%acab * model%numerics%dt)
+ if (GLC_DEBUG) then
+ print *, "* thck empty - net accumulation added", model%numerics%time
+ end if
+ else
+
+ !Note: glide_geometry_derivs is called at the beginning of glide_tstep_p1,
+ ! and the geometry has not changed, so stagthck and the geometry
+ ! derivatives are still up to date. A call might be needed here
+ ! if glide_tstep_p2 were called out of order.
+ ! This subroutine must be called during each Picard iteration below.
+
+!! call glide_geometry_derivs(model)
+
+ ! calculate basal velos
+ if (newtemps) then
+
+ call slipvelo(model, &
+ 1, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+
+ ! calculate Glen's A if necessary
+ call velo_integrate_flwa(model%velowk, &
+ model%geomderv%stagthck, &
+ model%temper%flwa)
+
+ end if
+
+ first_p = .true.
+ model%thckwk%oldthck = model%geometry%thck
+
+ ! do Picard iteration
+
+ model%thckwk%oldthck2 = model%geometry%thck
+
+ do p = 1, pmax
+
+ ! update stagthck, dusrfdew/dns, dthckdew/dns
+
+ call glide_geometry_derivs(model)
+
+ ! flag = 2: compute basal contribution to diffusivity
+ call slipvelo(model, &
+ 2, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+
+ ! calculate diffusivity
+ call velo_calc_diffu(model%velowk, model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%velocity%diffu)
+
+ ! get new thickness
+ call thck_evolve(model, model%velocity%diffu, model%velocity%diffu, &
+ first_p, model%geometry%thck, model%geometry%thck)
+
+ first_p = .false.
+
+!TODO - Is this option ever used? If so, then replace the ifdef with a logical option?
+#ifdef USE_UNSTABLE_MANIFOLD
+ linearize_start = 1
+ call linearize_2d(umc_new_vec, linearize_start, model%geometry%thck)
+ linearize_start = 1
+ call linearize_2d(umc_old_vec, linearize_start, model%thckwk%oldthck2)
+ umc_continue_iteration = unstable_manifold_correction(umc_new_vec, umc_old_vec, &
+ umc_correction_vec, size(umc_correction_vec),&
+ tol)
+ !Only the old thickness might change as a result of this call
+ linearize_start = 1
+ call delinearize_2d(umc_old_vec, linearize_start, model%thckwk%oldthck2)
+
+ if (umc_continue_iteration) then
+ exit
+ end if
+#else
+!SCALING - Multiply thickness residual by thk0/thk_scale so we get the same result in these two cases:
+! (1) Old Glimmer with scaling: thk0 = thk_scale = 2000 m, and thck is non-dimensional
+! (2) New CISM without scaling: thk0 = 1, thk_scale = 2000 m, and thck is in true meters.
+
+!!! residual = maxval(abs(model%geometry%thck-model%thckwk%oldthck2))
+ residual = maxval( abs(model%geometry%thck-model%thckwk%oldthck2) * (thk0/thk_scale) )
+
+ if (residual <= tol) then
+ exit
+ end if
+
+ model%thckwk%oldthck2 = model%geometry%thck
+#endif
+
+ end do ! Picard iteration
+
+ if (GLC_DEBUG) then
+ picard_max = max(picard_max,p)
+ if (model%numerics%tinc > mod(model%numerics%time,picard_interval)) then
+ write(picard_unit,*) model%numerics%time, p
+ picard_max = 0
+ end if
+ end if
+
+ ! Note: the values for stagthck, slopes, diffu, and ubas (from option 2 call to slipvelo)
+ ! will be outdated from the previous Picard iteration.
+ ! To ensure exact restarts are possible, calculate these one more time so that
+ ! they can be reconstructed with the restart values of thk and flwa
+ ! This will change answers very slightly (to within the Picard convergence tolerance)
+ ! relative to older versions of the code. --MJH 1/9/13
+
+!WHL - oldglide does not update the diffusivity here
+! By skipping this update, I get the same velocities as oldglide on the
+! first timestep of the dome test case (within double-precision roundoff).
+! Including this update, velocites agree only to ~4 sig digits.
+!
+
+ if (.not. oldglide) then ! update the diffusivity before computing the final velocity
+
+ call glide_geometry_derivs(model) ! stagvarb, geomders as in old Glide code
+
+ ! flag = 2: basal contribution to diffusivity
+ call slipvelo(model, &
+ 2, &
+ model%velocity%btrc, &
+ model%velocity%ubas, &
+ model%velocity%vbas)
+
+ ! calculate diffusivity
+ call velo_calc_diffu(model%velowk, model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%velocity%diffu)
+
+ endif ! oldglide = F
+
+ ! calculate horizontal velocity field
+
+ ! flag = 3: Calculate the basal velocity from the diffusivities
+ call slipvelo(model, &
+ 3, &
+ model%velocity%btrc, &
+ model%velocity%ubas, &
+ model%velocity%vbas)
+
+ call velo_calc_velo(model%velowk, model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%temper%flwa, model%velocity%diffu, &
+ model%velocity%ubas, model%velocity%vbas, &
+ model%velocity%uvel, model%velocity%vvel, &
+ model%velocity%uflx, model%velocity%vflx,&
+ model%velocity%velnorm)
+
+ end if ! model%geometry%empty
+
+ end subroutine thck_nonlin_evolve
+
+!---------------------------------------------------------------------------------
+
+ subroutine thck_evolve(model, diffu_x, diffu_y, calc_rhs, old_thck, new_thck)
+
+ !> set up sparse matrix and solve matrix equation to find new ice thickness distribution
+ !> this routine does not override the old thickness distribution
+
+ use glimmer_log
+ use glimmer_paramets, only: vel0, thk0, GLC_DEBUG
+
+ implicit none
+
+ ! subroutine arguments -------------------------------------------------------------
+
+ type(glide_global_type) :: model
+ logical,intent(in) :: calc_rhs !> set to true when rhs should be calculated
+ !> i.e. when doing lin solution or first picard iteration
+ real(dp), intent(in), dimension(:,:) :: diffu_x
+ real(dp), intent(in), dimension(:,:) :: diffu_y
+ real(dp), intent(in), dimension(:,:) :: old_thck !> contains ice thicknesses from previous time step
+ real(dp), intent(inout), dimension(:,:) :: new_thck !> on entry contains first guess for new ice thicknesses
+ !> on exit contains ice thicknesses of new time step
+
+ ! local variables ------------------------------------------------------------------
+
+ real(dp), dimension(5) :: sumd
+ real(dp) :: err
+ integer :: linit
+ integer :: ew,ns
+
+ real(dp) :: alpha_dt_ew, alpha_dt_ns ! factors used repeatedly in matrix elements
+
+ alpha_dt_ew = model%numerics%alpha * model%numerics%dt / (2.0d0 * model%numerics%dew * model%numerics%dew)
+ alpha_dt_ns = model%numerics%alpha * model%numerics%dt / (2.0d0 * model%numerics%dns * model%numerics%dns)
+
+ ! Zero the arrays holding the sparse matrix
+ call sparse_clear(model%solver_data%matrix)
+
+ ! Set the order of the matrix
+ model%solver_data%matrix%order = model%geometry%totpts
+
+ !EIB! old way
+ ! the number of grid points
+ !model%solver_data%pcgsize(1) = model%geometry%totpts
+ ! Zero the arrays holding the sparse matrix
+ !model%solver_data%pcgval = 0.0
+ !model%solver_data%pcgcol = 0
+ !model%solver_data%pcgrow = 0
+ !model%solver_data%ct = 1
+
+ ! Boundary Conditions ---------------------------------------------------------------
+
+ ! BCs are for scalar points in outer layer of cells
+
+ ! north and south BC
+
+ do ew = 1,model%general%ewn
+
+ ns=1
+ if (model%geometry%thck_index(ew,ns) /= 0) then
+ call sparse_insert_val(model%solver_data%matrix, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,ns), 1d0)
+ !EIB! old way
+ !call putpcgc(model%solver_data,1.0d0, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,ns))
+ if (calc_rhs) then
+ model%solver_data%rhsd(model%geometry%thck_index(ew,ns)) = old_thck(ew,ns)
+ end if
+ model%solver_data%answ(model%geometry%thck_index(ew,ns)) = new_thck(ew,ns)
+ end if
+
+ ns=model%general%nsn
+ if (model%geometry%thck_index(ew,ns) /= 0) then
+ call sparse_insert_val(model%solver_data%matrix, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,ns), 1d0)
+ !EIB! old way
+ !call putpcgc(model%solver_data,1.0d0, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,ns))
+ if (calc_rhs) then
+ model%solver_data%rhsd(model%geometry%thck_index(ew,ns)) = old_thck(ew,ns)
+ end if
+ model%solver_data%answ(model%geometry%thck_index(ew,ns)) = new_thck(ew,ns)
+ end if
+
+ end do
+
+ ! east and west BC
+
+ if (model%options%periodic_ew) then
+
+ do ns=2,model%general%nsn-1
+ ew = 1
+ if (model%geometry%thck_index(ew,ns) /= 0) then
+ call findsums(model%general%ewn-2,model%general%ewn-1,ns-1,ns)
+ call generate_row(model%general%ewn-2,ew,ew+1,ns-1,ns,ns+1)
+ end if
+
+ ew=model%general%ewn
+ if (model%geometry%thck_index(ew,ns) /= 0) then
+ call findsums(1,2,ns-1,ns)
+ call generate_row(ew-1,ew,3,ns-1,ns,ns+1)
+ end if
+ end do
+
+ else
+
+ do ns=2,model%general%nsn-1
+
+ ew=1
+ if (model%geometry%thck_index(ew,ns) /= 0) then
+ call sparse_insert_val(model%solver_data%matrix, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,ns), 1d0)
+ !EIB! old way
+ !call putpcgc(model%solver_data,1.0d0, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,ns))
+ if (calc_rhs) then
+ model%solver_data%rhsd(model%geometry%thck_index(ew,ns)) = old_thck(ew,ns)
+ end if
+ model%solver_data%answ(model%geometry%thck_index(ew,ns)) = new_thck(ew,ns)
+ end if
+
+ ew=model%general%ewn
+ if (model%geometry%thck_index(ew,ns) /= 0) then
+ call sparse_insert_val(model%solver_data%matrix, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,ns), 1d0)
+ !EIB! old way
+ !call putpcgc(model%solver_data,1.0d0, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,ns))
+ if (calc_rhs) then
+ model%solver_data%rhsd(model%geometry%thck_index(ew,ns)) = old_thck(ew,ns)
+ end if
+ model%solver_data%answ(model%geometry%thck_index(ew,ns)) = new_thck(ew,ns)
+ end if
+
+ end do
+
+ end if ! periodic_ew
+
+ ! ice interior -------------------------------------------------------------------------
+
+ do ns = 2,model%general%nsn-1
+ do ew = 2,model%general%ewn-1
+
+ if (model%geometry%thck_index(ew,ns) /= 0) then
+
+ call findsums(ew-1, ew, ns-1, ns)
+ call generate_row(ew-1, ew, ew+1, ns-1, ns, ns+1)
+
+ end if
+ end do
+ end do
+
+ !TODO - EIB - not needed?
+ ! Calculate the total number of points
+ !model%solver_data%pcgsize(2) = model%solver_data%ct - 1
+
+ ! Solve the system using SLAP
+ !EIB! call slapsolv(model,linit,err)
+
+ call sparse_easy_solve(model%solver_data%matrix, &
+ model%solver_data%rhsd, model%solver_data%answ, &
+ err, linit)
+
+ ! Rejig the solution onto a 2D array
+
+ do ns = 1,model%general%nsn
+ do ew = 1,model%general%ewn
+ if (model%geometry%thck_index(ew,ns) /= 0) then
+ new_thck(ew,ns) = model%solver_data%answ(model%geometry%thck_index(ew,ns))
+ end if
+ end do
+ end do
+
+ new_thck = max(0.0d0, new_thck)
+
+ if (GLC_DEBUG) then
+ print *, "* thck ", model%numerics%time, linit, model%geometry%totpts, &
+ real(thk0 * new_thck(model%general%ewn/2+1,model%general%nsn/2+1)), &
+ real(vel0 * maxval(abs(model%velocity%ubas))), real(vel0*maxval(abs(model%velocity%vbas)))
+ end if
+
+ !TODO Why are lsrf and usrf calculated here? This is confusing because model%geometry%thck has only been updated
+ ! because new_thck points to it, but that was only the case because of the way this subroutine is called, and would
+ ! not generally be true. This calculation should be made with new_thck, if it's going to be made here at all!
+
+ ! calculate upper and lower surface
+
+ call glide_calclsrf(model%geometry%thck, model%geometry%topg, model%climate%eus, model%geometry%lsrf)
+ model%geometry%usrf = max(0.d0,model%geometry%thck + model%geometry%lsrf)
+
+ contains
+
+ subroutine generate_row(ewm, ew, ewp, &
+ nsm, ns, nsp)
+
+ ! calculate row of sparse matrix equation
+
+ implicit none
+
+ integer, intent(in) :: ewm,ew,ewp ! ew index to left, central, right node
+ integer, intent(in) :: nsm,ns,nsp ! ns index to lower, central, upper node
+
+ !fill matrix using the new API
+ call sparse_insert_val(model%solver_data%matrix, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ewm,ns), sumd(1)) ! point (ew-1,ns)
+ call sparse_insert_val(model%solver_data%matrix, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ewp,ns), sumd(2)) ! point (ew+1,ns)
+ call sparse_insert_val(model%solver_data%matrix, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,nsm), sumd(3)) ! point (ew,ns-1)
+ call sparse_insert_val(model%solver_data%matrix, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,nsp), sumd(4)) ! point (ew,ns+1)
+ call sparse_insert_val(model%solver_data%matrix, model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,ns), 1d0 + sumd(5))! point (ew,ns)
+
+ !EIB! old way
+ ! fill sparse matrix
+ ! call putpcgc(model%solver_data,sumd(1), model%geometry%thck_index(ewm,ns), model%geometry%thck_index(ew,ns)) ! point (ew-1,ns)
+ ! call putpcgc(model%solver_data,sumd(2), model%geometry%thck_index(ewp,ns), model%geometry%thck_index(ew,ns)) ! point (ew+1,ns)
+ ! call putpcgc(model%solver_data,sumd(3), model%geometry%thck_index(ew,nsm), model%geometry%thck_index(ew,ns)) ! point (ew,ns-1)
+ ! call putpcgc(model%solver_data,sumd(4), model%geometry%thck_index(ew,nsp), model%geometry%thck_index(ew,ns)) ! point (ew,ns+1)
+ ! call putpcgc(model%solver_data,1.0d0 + sumd(5), model%geometry%thck_index(ew,ns), model%geometry%thck_index(ew,ns))! point (ew,ns)
+
+ ! calculate RHS
+ if (calc_rhs) then
+
+ model%solver_data%rhsd(model%geometry%thck_index(ew,ns)) = &
+ old_thck(ew,ns) * (1.0d0 - ((1.0d0-model%numerics%alpha)/model%numerics%alpha) * sumd(5)) &
+ - ((1.0d0 - model%numerics%alpha) / model%numerics%alpha) * &
+ (old_thck(ewm,ns) * sumd(1) &
+ + old_thck(ewp,ns) * sumd(2) &
+ + old_thck(ew,nsm) * sumd(3) &
+ + old_thck(ew,nsp) * sumd(4)) &
+ - (1.d0 / model%numerics%alpha) * (model%geometry%lsrf(ew,ns) * sumd(5) &
+ + model%geometry%lsrf(ewm,ns) * sumd(1) &
+ + model%geometry%lsrf(ewp,ns) * sumd(2) &
+ + model%geometry%lsrf(ew,nsm) * sumd(3) &
+ + model%geometry%lsrf(ew,nsp) * sumd(4)) &
+ + model%climate%acab(ew,ns) * model%numerics%dt
+
+ if (model%options%basal_mbal==1) then ! basal melt rate included in continuity equation
+ model%solver_data%rhsd(model%geometry%thck_index(ew,ns)) = &
+ model%solver_data%rhsd(model%geometry%thck_index(ew,ns)) &
+ - model%temper%bmlt(ew,ns) * model%numerics%dt ! basal melt is positive for mass loss
+ end if
+
+ end if ! calc_rhs
+
+ model%solver_data%answ(model%geometry%thck_index(ew,ns)) = new_thck(ew,ns)
+
+ end subroutine generate_row
+
+!---------------------------------------------------------------
+
+ subroutine findsums(ewm, ew, nsm, ns)
+
+ ! calculate diffusivities
+
+ implicit none
+ integer, intent(in) :: ewm,ew ! ew index to left, right
+ integer, intent(in) :: nsm,ns ! ns index to lower, upper
+
+ ! calculate sparse matrix elements
+ sumd(1) = alpha_dt_ew * (&
+ (diffu_x(ewm,nsm) + diffu_x(ewm,ns)) + &
+ (model%velocity%ubas (ewm,nsm) + model%velocity%ubas (ewm,ns)))
+ sumd(2) = alpha_dt_ew * (&
+ (diffu_x(ew,nsm) + diffu_x(ew,ns)) + &
+ (model%velocity%ubas (ew,nsm) + model%velocity%ubas (ew,ns)))
+ sumd(3) = alpha_dt_ns * (&
+ (diffu_y(ewm,nsm) + diffu_y(ew,nsm)) + &
+ (model%velocity%ubas (ewm,nsm) + model%velocity%ubas (ew,nsm)))
+ sumd(4) = alpha_dt_ns * (&
+ (diffu_y(ewm,ns) + diffu_y(ew,ns)) + &
+ (model%velocity%ubas (ewm,ns) + model%velocity%ubas (ew,ns)))
+ sumd(5) = - (sumd(1) + sumd(2) + sumd(3) + sumd(4))
+
+ !EIB! old way
+ !sumd(1) = alpha_dt_ew * (&
+ ! (model%velocity%diffu(ewm,nsm) + model%velocity%diffu(ewm,ns)) + &
+ ! (model%velocity%ubas (ewm,nsm) + model%velocity%ubas (ewm,ns)))
+ !sumd(2) = alpha_dt_ew * (&
+ ! (model%velocity%diffu(ew,nsm) + model%velocity%diffu(ew,ns)) + &
+ ! (model%velocity%ubas (ew,nsm) + model%velocity%ubas (ew,ns)))
+ !sumd(3) = alpha_dt_ns * (&
+ ! (model%velocity%diffu(ewm,nsm) + model%velocity%diffu(ew,nsm)) + &
+ ! (model%velocity%ubas (ewm,nsm) + model%velocity%ubas (ew,nsm)))
+ !sumd(4) = alpha_dt_ns * (&
+ ! (model%velocity%diffu(ewm,ns) + model%velocity%diffu(ew,ns)) + &
+ ! (model%velocity%ubas (ewm,ns) + model%velocity%ubas (ew,ns)))
+ !sumd(5) = - (sumd(1) + sumd(2) + sumd(3) + sumd(4))
+
+ end subroutine findsums
+
+ end subroutine thck_evolve
+
+!---------------------------------------------------------------
+
+!WHL - This subroutine used to be called glide_maskthck and located in its own module,
+! but I put it in glide_thck.F90 since it is used only for the glide thickness calculation.
+
+ subroutine glide_thck_index(thck, acab, &
+ thck_index, totpts, &
+ include_adjacent, empty)
+
+ ! Compute an integer mask for the glide thickness calculation.
+ ! The mask generally includes ice-covered cells (thck > 0), cells adjacent to
+ ! ice-covered cells, and cells with a positive mass balance (acab > 0).
+
+ !-------------------------------------------------------------------------
+ ! Subroutine arguments
+ !-------------------------------------------------------------------------
+
+ real(dp),dimension(:,:),intent(in) :: thck !> Ice thickness
+ real(dp),dimension(:,:),intent(in) :: acab !> Mass balance
+ integer, dimension(:,:),intent(out) :: thck_index !> integer index (1, 2, 3, ..., totpts)
+ integer, intent(out) :: totpts !> Total number of points in mask
+ logical, intent(in) :: include_adjacent ! If true, points with no ice but that are adjacent
+ ! to points with ice are included in the mask
+ logical, intent(out) :: empty !> true if no points in mask
+
+ !-------------------------------------------------------------------------
+ ! Internal variables
+ !-------------------------------------------------------------------------
+
+ logical,dimension(size(thck,2)) :: full
+ integer :: covtot
+ integer :: ew,ns,ewn,nsn
+
+!! integer,dimension(size(thck,2),2) :: band ! no longer used
+!! integer, dimension(4) :: dom ! used to be an output argument, but no longer used
+
+ !-------------------------------------------------------------------------
+
+ ewn=size(thck,1) ; nsn=size(thck,2)
+
+ thck_index = 0
+ covtot = 0
+
+ !-------------------------------------------------------------------------
+
+ do ns = 1,nsn
+
+ full(ns) = .false.
+
+ do ew = 1,ewn
+
+ if ( thckcrit(thck(max(1,ew-1):min(ewn,ew+1),max(1,ns-1):min(nsn,ns+1)), acab(ew,ns)) ) then
+
+ covtot = covtot + 1
+ thck_index(ew,ns) = covtot
+
+ if ( .not. full(ns) ) then
+!! band(ns,1) = ew
+ full(ns) = .true.
+ else
+!! band(ns,2) = ew
+ end if
+
+ end if
+ end do
+ end do
+
+ totpts = covtot
+
+!! dom(1:2) = (/ewn,1/)
+ empty = .true.
+
+ do ns = 1,nsn
+
+ if (full(ns)) then
+
+ if (empty) then
+ empty = .false.
+!! dom(3) = ns
+ end if
+
+!! dom(4) = ns
+!! dom(1) = min0(dom(1),band(ns,1))
+!! dom(2) = max0(dom(2),band(ns,2))
+ end if
+
+ end do
+
+ contains
+
+ logical function thckcrit(ca,cb)
+
+ implicit none
+
+ real(dp),dimension(:,:),intent(in) :: ca
+ real(dp), intent(in) :: cb
+
+
+!TODO - Is there any case in which we would not want to include adjacent cells
+! in the mask for the thickness calculation?
+
+ if (.not. include_adjacent) then
+
+ ! Include only points with ice in the mask
+ ! ca(2,2) corresponds to the current (ew,ns)
+
+ if ( ca(2,2) > 0.d0 .or. cb > 0.d0) then
+ thckcrit = .true.
+ else
+ thckcrit = .false.
+ end if
+
+ else
+
+ ! If the thickness in the region under consideration
+ ! or the mass balance is positive, thckcrit is .true.
+ ! This means that the mask includes points that have no
+ ! ice but are adjacent to points that do have ice
+
+ if ( any((ca(:,:) > 0.d0)) .or. cb > 0.d0 ) then
+ thckcrit = .true.
+ else
+ thckcrit = .false.
+ end if
+
+ end if
+
+ end function thckcrit
+
+ end subroutine glide_thck_index
+
+ !-----------------------------------------------------------------------------
+ ! ADI routines
+ !-----------------------------------------------------------------------------
+
+ subroutine stagleapthck(model,newtemps)
+
+ !> this subroutine solves the ice sheet thickness equation using the ADI scheme
+ !> diffusivities are updated for each half time step
+
+ !TODO The ADI scheme has not been checked for consistency with the new time-stepping convention.
+
+ use glide_velo
+ use glimmer_utils, only: tridiag
+ use glimmer_paramets, only: GLC_DEBUG
+ use glide_grid_operators, only: glide_geometry_derivs
+ implicit none
+
+ ! subroutine arguments
+ type(glide_global_type) :: model
+ logical, intent(in) :: newtemps !> true when we should recalculate Glen's A
+
+ ! local variables
+ integer ew,ns, n
+
+ if (model%geometry%empty) then
+
+ model%geometry%thck = dmax1(0.0d0, model%geometry%thck + model%climate%acab * model%numerics%dt)
+ if (GLC_DEBUG) then
+ print *, "* thck empty - net accumulation added", model%numerics%time
+ end if
+
+ else
+
+ !Note: glide_geometry_derivs is called at the beginning of glide_tstep_p1,
+ ! and the geometry has not changed, so stagthck and the geometry
+ ! derivatives are still up to date. A call might be needed here
+ ! if glide_tstep_p2 were called out of order.
+
+!! call glide_geometry_derivs(model)
+
+ ! calculate basal velos
+
+ if (newtemps) then
+ call slipvelo(model, &
+ 1, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+
+ ! calculate Glen's A if necessary
+ call velo_integrate_flwa(model%velowk,model%geomderv%stagthck,model%temper%flwa)
+ end if
+
+ call slipvelo(model, &
+ 2, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+
+ ! calculate diffusivity
+
+ call velo_calc_diffu(model%velowk, model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%velocity%diffu)
+
+ model%velocity%total_diffu(:,:) = model%velocity%diffu(:,:) + model%velocity%ubas(:,:)
+
+ ! first ADI step, solve thickness equation along rows j
+
+ n = model%general%ewn
+ do ns=2,model%general%nsn-1
+
+ call adi_tri ( model%thckwk%alpha, &
+ model%thckwk%beta, &
+ model%thckwk%gamma, &
+ model%thckwk%delta, &
+ model%geometry%thck(:,ns), &
+ model%geometry%lsrf(:,ns), &
+ model%climate%acab(:,ns), &
+ model%velocity%vflx(:,ns), &
+ model%velocity%vflx(:,ns-1), &
+ model%velocity%total_diffu(:,ns), &
+ model%velocity%total_diffu(:,ns-1), &
+ model%numerics%dt, &
+ model%numerics%dew, &
+ model%numerics%dns )
+ !EIB! gc2 acab input, not sure why the difference
+ !model%climate%acab(:,ns)-real(model%options%basal_mbal)*real(model%temper%bmlt(:,ns),sp), &
+
+ call tridiag(model%thckwk%alpha(1:n), &
+ model%thckwk%beta(1:n), &
+ model%thckwk%gamma(1:n), &
+ model%thckwk%oldthck(:,ns), &
+ model%thckwk%delta(1:n))
+ end do
+
+ model%thckwk%oldthck(:,:) = max(model%thckwk%oldthck(:,:), 0.d0)
+
+ ! second ADI step, solve thickness equation along columns i
+ n = model%general%nsn
+ do ew=2,model%general%ewn-1
+ call adi_tri ( model%thckwk%alpha, &
+ model%thckwk%beta, &
+ model%thckwk%gamma, &
+ model%thckwk%delta, &
+ model%thckwk%oldthck(ew,:), &
+ model%geometry%lsrf(ew, :), &
+ model%climate%acab(ew, :), &
+ model%velocity%uflx(ew,:), &
+ model%velocity%uflx(ew-1,:), &
+ model%velocity%total_diffu(ew,:), &
+ model%velocity%total_diffu(ew-1,:), &
+ model%numerics%dt, &
+ model%numerics%dns, &
+ model%numerics%dew )
+ !EIB! again, input difference
+ !model%climate%acab(ew, :)-real(model%options%basal_mbal)*real(model%temper%bmlt(ew, :),sp), &
+
+ call tridiag(model%thckwk%alpha(1:n), &
+ model%thckwk%beta(1:n), &
+ model%thckwk%gamma(1:n), &
+ model%geometry%thck(ew, :), &
+ model%thckwk%delta(1:n))
+ end do
+
+ model%geometry%thck(:,:) = max(model%geometry%thck(:,:), 0.d0)
+
+ ! Apply boundary conditions
+ model%geometry%thck(1,:) = 0.d0
+ model%geometry%thck(model%general%ewn,:) = 0.d0
+ model%geometry%thck(:,1) = 0.d0
+ model%geometry%thck(:,model%general%nsn) = 0.d0
+
+ ! calculate horizontal velocity field
+
+ call slipvelo(model, &
+ 3, &
+ model%velocity%btrc, &
+ model%velocity%ubas, &
+ model%velocity%vbas)
+
+ call velo_calc_velo(model%velowk, model%geomderv%stagthck, &
+ model%geomderv%dusrfdew, model%geomderv%dusrfdns, &
+ model%temper%flwa, model%velocity%diffu, &
+ model%velocity%ubas, model%velocity%vbas, &
+ model%velocity%uvel, model%velocity%vvel, &
+ model%velocity%uflx, model%velocity%vflx, &
+ model%velocity%velnorm)
+
+ end if ! empty
+
+ !------------------------------------------------------------
+ ! calculate upper and lower surface
+ !------------------------------------------------------------
+
+ call glide_calclsrf(model%geometry%thck, model%geometry%topg, model%climate%eus, model%geometry%lsrf)
+ model%geometry%usrf = max(0.d0,model%geometry%thck + model%geometry%lsrf)
+
+ end subroutine stagleapthck
+
+!---------------------------------------------------------------------------------
+
+ subroutine adi_tri(a,b,c,d,thk,tpg,mb,flx_p,flx_m,dif_p,dif_m,dt,ds1, ds2)
+
+ !> construct tri-diagonal matrix system for a column/row
+
+ implicit none
+
+ real(dp), dimension(:), intent(out) :: a !> alpha (subdiagonal)
+ real(dp), dimension(:), intent(out) :: b !> alpha (diagonal)
+ real(dp), dimension(:), intent(out) :: c !> alpha (superdiagonal)
+ real(dp), dimension(:), intent(out) :: d !> right-hand side
+
+ real(dp), dimension(:), intent(in) :: thk !> ice thickness
+ real(dp), dimension(:), intent(in) :: tpg !> lower surface of ice
+ real(dp), dimension(:), intent(in) :: mb !> mass balance
+ real(dp), dimension(:), intent(in) :: flx_p !> flux +1/2
+ real(dp), dimension(:), intent(in) :: flx_m !> flux -1/2
+ real(dp), dimension(:), intent(in) :: dif_p !> diffusivity +1/2
+ real(dp), dimension(:), intent(in) :: dif_m !> diffusivity -1/2
+
+ real(dp), intent(in) :: dt !> time step
+ real(dp), intent(in) :: ds1, ds2 !> spatial steps inline and transversal
+
+ ! local variables
+ real(dp) :: f1, f2, f3
+ integer :: i,n
+
+ n = size(thk)
+
+ f1 = dt/(4*ds1*ds1)
+ f2 = dt/(4*ds2)
+ f3 = dt/2.
+
+ a(:) = 0.
+ b(:) = 0.
+ c(:) = 0.
+ d(:) = 0.
+
+ a(1) = 0.
+ do i=2,n
+ a(i) = f1*(dif_m(i-1)+dif_p(i-1))
+ end do
+ do i=1,n-1
+ c(i) = f1*(dif_m(i)+dif_p(i))
+ end do
+ c(n) = 0.
+ b(:) = -(a(:)+c(:))
+
+ ! calculate RHS
+ do i=2,n-1
+ d(i) = thk(i) - &
+ f2 * (flx_p(i-1) + flx_p(i) - flx_m(i-1) - flx_m(i)) + &
+ f3 * mb(i) - &
+ a(i)*tpg(i-1) - b(i)*tpg(i) - c(i)*tpg(i+1)
+ end do
+
+ b(:) = 1.+b(:)
+
+ end subroutine adi_tri
+
+!-------------------------------------------------------------------------
+
+ subroutine glide_calclsrf(thck,topg,eus,lsrf)
+
+ ! Calculates the elevation of the lower surface of the ice,
+ ! by considering whether it is floating or not.
+ !
+ ! NOTE: This subroutine computes over all grid cells, not just locally owned.
+ ! Halos should be updated before it is called.
+
+ use glimmer_physcon, only : rhoi, rhoo
+
+ implicit none
+
+ real(dp), intent(in), dimension(:,:) :: thck !> Ice thickness
+ real(dp), intent(in), dimension(:,:) :: topg !> Bedrock topography elevation
+ real(dp), intent(in) :: eus !> global sea level
+ real(dp), intent(out), dimension(:,:) :: lsrf !> Lower ice surface elevation
+
+ real(dp), parameter :: con = - rhoi / rhoo
+
+ where (topg - eus < con * thck)
+ lsrf = con * thck
+ elsewhere
+ lsrf = topg
+ end where
+
+ end subroutine glide_calclsrf
+
+!---------------------------------------------------------------------------------
+
+!TODO - This subroutine is not used. Remove it?
+
+ subroutine filterthck(thck,ewn,nsn)
+
+ implicit none
+
+ real(dp), dimension(:,:), intent(inout) :: thck
+ real(dp), dimension(:,:), allocatable :: smth
+ integer :: ewn,nsn
+
+ real(dp), parameter :: f = 0.1d0 / 16.0d0
+ integer :: count
+ integer :: ns,ew
+
+ allocate(smth(ewn,nsn))
+ count = 1
+
+ do ns = 3,nsn-2
+ do ew = 3,ewn-2
+
+ if (all((thck(ew-2:ew+2,ns) > 0.0d0)) .and. all((thck(ew,ns-2:ns+2) > 0.0d0))) then
+ smth(ew,ns) = thck(ew,ns) + f * &
+ (thck(ew-2,ns) - 4.0d0 * thck(ew-1,ns) + 12.0d0 * thck(ew,ns) - &
+ 4.0d0 * thck(ew+1,ns) + thck(ew+2,ns) + &
+ thck(ew,ns-2) - 4.0d0 * thck(ew,ns-1) - &
+ 4.0d0 * thck(ew,ns+1) + thck(ew,ns+2))
+ count = count + 1
+ else
+ smth(ew,ns) = thck(ew,ns)
+ end if
+
+ end do
+ end do
+
+ thck(3:ewn-2,3:nsn-2) = smth(3:ewn-2,3:nsn-2)
+ print *, count
+
+ deallocate(smth)
+
+ end subroutine filterthck
+
+!----------------------------------------------------------------------
+
+!TODO - This subroutine is not used. Remove it?
+
+ subroutine swapbndh(bc,a,b,c,d)
+
+ implicit none
+
+ real(dp), intent(out), dimension(:) :: a, c
+ real(dp), intent(in), dimension(:) :: b, d
+ integer, intent(in) :: bc
+
+ if (bc == 0) then
+ a = b
+ c = d
+ end if
+
+ end subroutine swapbndh
+
+!---------------------------------------------------------------------------------
+
+end module glide_thck
+
+!---------------------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglide/glide_thck.F90.archive b/components/cism/glimmer-cism/libglide/glide_thck.F90.archive
new file mode 100644
index 0000000000..4408515d10
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_thck.F90.archive
@@ -0,0 +1,1019 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_thck.F90.archive - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#include "glide_nan.inc"
+
+module glide_thck
+
+ use glide_types
+ use glide_velo_higher
+ use glimmer_sparse
+ use glimmer_sparse_type
+ use glide_grids
+
+ !DEBUG ONLY, these should be deleted eventually
+ use glide_stop
+ use xls
+ use glide_io
+ private
+ public :: init_thck, thck_nonlin_evolve, thck_lin_evolve, timeders, &
+ stagleapthck, geometry_derivs, &
+ geometry_derivs_unstag
+
+#ifdef DEBUG_PICARD
+ ! debugging Picard iteration
+ integer, private, parameter :: picard_unit=101
+ real, private, parameter :: picard_interval=500.
+ integer, private :: picard_max=0
+#endif
+
+contains
+
+ subroutine init_thck(model)
+ !*FD initialise work data for ice thickness evolution
+ use glimmer_log
+ implicit none
+ type(glide_global_type) :: model
+
+
+ model%pcgdwk%fc2 = (/ model%numerics%alpha * model%numerics%dt / (2.0d0 * model%numerics%dew * model%numerics%dew), &
+ model%numerics%dt, &
+ (1.0d0-model%numerics%alpha) / model%numerics%alpha, &
+ 1.0d0 / model%numerics%alpha, &
+ model%numerics%alpha * model%numerics%dt / &
+ (2.0d0 * model%numerics%dns * model%numerics%dns), &
+ 0.0d0 /)
+
+#ifdef DEBUG_PICARD
+ call write_log('Logging Picard iterations')
+ open(picard_unit,name='picard_info.data',status='unknown')
+ write(picard_unit,*) '#time max_iter'
+#endif
+
+ ! allocate memory for ADI scheme
+ if (model%options%whichevol.eq.1) then
+ allocate(model%thckwk%alpha(max(model%general%ewn, model%general%nsn)))
+ allocate(model%thckwk%beta (max(model%general%ewn, model%general%nsn)))
+ allocate(model%thckwk%gamma(max(model%general%ewn, model%general%nsn)))
+ allocate(model%thckwk%delta(max(model%general%ewn, model%general%nsn)))
+ end if
+ end subroutine init_thck
+
+!---------------------------------------------------------------------------------
+
+ subroutine thck_lin_evolve(model,newtemps)
+
+ !*FD this subroutine solves the linearised ice thickness equation by computing the
+ !*FD diffusivity from quantities of the previous time step
+
+ use glide_velo
+ use glide_thckmask
+ implicit none
+ ! subroutine arguments
+ type(glide_global_type) :: model
+ logical, intent(in) :: newtemps !*FD true when we should recalculate Glen's A
+
+ if (model%geometry%empty) then
+
+ model%geometry%thck = dmax1(0.0d0,model%geometry%thck + model%climate%acab * model%pcgdwk%fc2(2))
+#ifdef DEBUG
+ print *, "* thck empty - net accumulation added", model%numerics%time
+#endif
+ else
+
+ !EIB! added from lanl
+ call geometry_derivs(model)
+
+ ! calculate basal velos
+ if (newtemps) then
+ call slipvelo(model, &
+ 1, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+ ! calculate Glen's A if necessary
+ call velo_integrate_flwa(model%velowk,model%geomderv%stagthck,model%temper%flwa)
+ end if
+ call slipvelo(model, &
+ 2, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+
+ ! calculate diffusivity
+ call velo_calc_diffu(model%velowk,model%geomderv%stagthck,model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns,model%velocity%diffu)
+
+ !EIB! added from lanl
+ !Calculate higher-order velocities if the user asked for them
+ if (model%options%which_ho_diagnostic /= 0 ) then
+ call geometry_derivs_unstag(model)
+ call run_ho_diagnostic(model)
+ end if
+
+ if (model%options%diagnostic_run == 1) then
+ call glide_finalise_all(.true.)
+ stop
+ end if
+
+ if (model%options%which_ho_prognostic == HO_PROG_SIAONLY) then
+ ! get new thicknesses
+ call thck_evolve(model,model%velocity%diffu, model%velocity%diffu, .true.,model%geometry%thck,model%geometry%thck)
+ else if (model%options%which_ho_prognostic == HO_PROG_PATTYN) then
+ call thck_evolve(model,model%velocity_hom%diffu_x, model%velocity_hom%diffu_y, .true.,&
+ model%geometry%thck, model%geometry%thck)
+
+ end if
+ !EIB! old? from gc2
+ !call thck_evolve(model,.true.,model%geometry%thck,model%geometry%thck)
+
+ ! calculate horizontal velocity field
+ ! (These calls must appear after thck_evolve, as thck_evolve uses ubas,
+ ! which slipvelo mutates)
+ call slipvelo(model, &
+ 3, &
+ model%velocity%btrc, &
+ model%velocity%ubas, &
+ model%velocity%vbas)
+ call velo_calc_velo(model%velowk,model%geomderv%stagthck,model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns,model%temper%flwa,model%velocity%diffu,model%velocity%ubas, &
+ model%velocity%vbas,model%velocity%uvel,model%velocity%vvel,model%velocity%uflx,model%velocity%vflx,&
+ model%velocity%surfvel)
+ !EIB! old
+ !call velo_calc_velo(model%velowk,model%geomderv%stagthck,model%geomderv%dusrfdew, &
+ ! model%geomderv%dusrfdns,model%temper%flwa,model%velocity%diffu,model%velocity%ubas, &
+ ! model%velocity%vbas,model%velocity%uvel,model%velocity%vvel,model%velocity%uflx,model%velocity%vflx)
+ end if
+ end subroutine thck_lin_evolve
+
+!---------------------------------------------------------------------------------
+
+ subroutine thck_nonlin_evolve(model,newtemps)
+
+ !*FD this subroutine solves the ice thickness equation by doing an outer,
+ !*FD non-linear iteration to update the diffusivities and in inner, linear
+ !*FD iteration to calculate the new ice thickness distrib
+
+ use glimmer_global, only : dp
+ use glide_velo
+ use glide_setup
+ use glide_thckmask
+ use glide_nonlin !For unstable manifold correction
+ !EIB! use glide_deriv, only : df_field_2d_staggered
+ implicit none
+ ! subroutine arguments
+ type(glide_global_type) :: model
+ logical, intent(in) :: newtemps !*FD true when we should recalculate Glen's A
+
+ ! local variables
+ integer, parameter :: pmax=50 !*FD maximum Picard iterations
+ real(kind=dp), parameter :: tol=1.0d-6
+ real(kind=dp) :: residual
+ integer p
+ logical first_p
+
+#ifdef USE_UNSTABLE_MANIFOLD
+ ! local variables used by unstable manifold correction
+ real(kind=dp), dimension(model%general%ewn*model%general%nsn) :: umc_new_vec
+ real(kind=dp), dimension(model%general%ewn*model%general%nsn) :: umc_old_vec
+ real(kind=dp), dimension(model%general%ewn*model%general%nsn) :: umc_correction_vec
+ logical :: umc_continue_iteration
+ integer :: linearize_start
+
+ umc_correction_vec = 0
+ umc_new_vec = 0
+ umc_old_vec = 0
+#endif
+
+ if (model%geometry%empty) then
+
+ model%geometry%thck = dmax1(0.0d0,model%geometry%thck + model%climate%acab * model%pcgdwk%fc2(2))
+#ifdef DEBUG
+ print *, "* thck empty - net accumulation added", model%numerics%time
+#endif
+ else
+
+ ! calculate basal velos
+ if (newtemps) then
+ call slipvelo(model, &
+ 1, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+ ! calculate Glen's A if necessary
+ call velo_integrate_flwa(model%velowk,model%geomderv%stagthck,model%temper%flwa)
+ end if
+
+ first_p = .true.
+ model%thckwk%oldthck = model%geometry%thck
+ ! do Picard iteration
+ model%thckwk%oldthck2 = model%geometry%thck
+ do p=1,pmax
+ !EIB moved! model%thckwk%oldthck2 = model%geometry%thck
+
+ call geometry_derivs(model)
+ !EIB! old way
+ !call stagvarb(model%geometry% thck, &
+ ! model%geomderv% stagthck,&
+ ! model%general% ewn, &
+ ! model%general% nsn)
+ !call df_field_2d_staggered(model%geometry%usrf, &
+ ! model%numerics%dew, model%numerics%dns, &
+ ! model%geomderv%dusrfdew, &
+ ! model%geomderv%dusrfdns, &
+ ! .false., .false.)
+ !call df_field_2d_staggered(model%geometry%thck, &
+ ! model%numerics%dew, model%numerics%dns, &
+ ! model%geomderv%dthckdew, &
+ ! model%geomderv%dthckdns, &
+ ! .false., .false.)
+
+ call slipvelo(model, &
+ 2, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+
+ ! calculate diffusivity
+ call velo_calc_diffu(model%velowk,model%geomderv%stagthck,model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns,model%velocity%diffu)
+
+ !Calculate higher-order velocities if the user asked for them
+ if (model%options%which_ho_diagnostic /= 0 ) then
+ call geometry_derivs_unstag(model)
+ call run_ho_diagnostic(model)
+ end if
+
+ ! get new thicknesses
+ if (model%options%which_ho_prognostic == HO_PROG_SIAONLY) then
+
+ call thck_evolve(model, model%velocity%diffu, model%velocity%diffu, &
+ first_p, model%geometry%thck, model%geometry%thck)
+
+ else if (model%options%which_ho_prognostic == HO_PROG_PATTYN) then
+
+ call thck_evolve(model,model%velocity_hom%diffu_x, model%velocity_hom%diffu_y, .true.,&
+ model%geometry%thck, model%geometry%thck)
+
+ end if
+ !EIB! old way
+ ! get new thicknesses
+ !call thck_evolve(model,first_p,model%thckwk%oldthck,model%geometry%thck)
+
+ first_p = .false.
+
+#ifdef USE_UNSTABLE_MANIFOLD
+ linearize_start = 1
+ call linearize_2d(umc_new_vec, linearize_start, model%geometry%thck)
+ linearize_start = 1
+ call linearize_2d(umc_old_vec, linearize_start, model%thckwk%oldthck2)
+ umc_continue_iteration = unstable_manifold_correction(umc_new_vec, umc_old_vec, &
+ umc_correction_vec, size(umc_correction_vec),&
+ tol)
+ !Only the old thickness might change as a result of this call
+ linearize_start = 1
+ call delinearize_2d(umc_old_vec, linearize_start, model%thckwk%oldthck2)
+
+ if (umc_continue_iteration) then
+ exit
+ end if
+#else
+ residual = maxval(abs(model%geometry%thck-model%thckwk%oldthck2))
+ if (residual.le.tol) then
+ exit
+ end if
+ model%thckwk%oldthck2 = model%geometry%thck
+#endif
+ !EIB! old way
+ !residual = maxval(abs(model%geometry%thck-model%thckwk%oldthck2))
+ !if (residual.le.tol) then
+ ! exit
+ !end if
+
+ end do
+#ifdef DEBUG_PICARD
+ picard_max=max(picard_max,p)
+ if (model%numerics%tinc > mod(model%numerics%time,picard_interval)) then
+ write(picard_unit,*) model%numerics%time,p
+ picard_max = 0
+ end if
+#endif
+
+ ! calculate horizontal velocity field
+ call slipvelo(model, &
+ 3, &
+ model%velocity%btrc, &
+ model%velocity%ubas, &
+ model%velocity%vbas)
+ call velo_calc_velo(model%velowk,model%geomderv%stagthck,model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns,model%temper%flwa,model%velocity%diffu,model%velocity%ubas, &
+ model%velocity%vbas,model%velocity%uvel,model%velocity%vvel,model%velocity%uflx,model%velocity%vflx,&
+ model%velocity%surfvel)
+ !EIB! old way
+ !call velo_calc_velo(model%velowk,model%geomderv%stagthck,model%geomderv%dusrfdew, &
+ ! model%geomderv%dusrfdns,model%temper%flwa,model%velocity%diffu,model%velocity%ubas, &
+ ! model%velocity%vbas,model%velocity%uvel,model%velocity%vvel,model%velocity%uflx,model%velocity%vflx)
+ end if
+ end subroutine thck_nonlin_evolve
+
+!---------------------------------------------------------------------------------
+
+ subroutine thck_evolve(model,diffu_x, diffu_y, calc_rhs,old_thck,new_thck)
+
+ !*FD set up sparse matrix and solve matrix equation to find new ice thickness distribution
+ !*FD this routine does not override the old thickness distribution
+
+ use glide_setup, only: glide_calclsrf
+ use glimmer_global, only : dp
+ use glide_stop
+ use glimmer_log
+#if DEBUG
+ use glimmer_paramets, only: vel0, thk0
+#endif
+
+ implicit none
+
+ ! subroutine arguments -------------------------------------------------------------
+
+ type(glide_global_type) :: model
+ logical,intent(in) :: calc_rhs !*FD set to true when rhs should be calculated
+ !*FD i.e. when doing lin solution or first picard iteration
+ real(dp), intent(in), dimension(:,:) :: diffu_x
+ real(dp), intent(in), dimension(:,:) :: diffu_y
+ real(dp), intent(in), dimension(:,:) :: old_thck !*FD contains ice thicknesses from previous time step
+ real(dp), intent(inout), dimension(:,:) :: new_thck !*FD on entry contains first guess for new ice thicknesses
+ !*FD on exit contains ice thicknesses of new time step
+
+ ! local variables ------------------------------------------------------------------
+
+ real(dp), dimension(5) :: sumd
+ real(dp) :: err
+ integer :: linit
+ integer :: ew,ns
+
+ ! Zero the arrays holding the sparse matrix
+ call sparse_clear(model%pcgdwk%matrix)
+
+ ! Set the order of the matrix
+ model%pcgdwk%matrix%order = model%geometry%totpts
+
+ !EIB! old way
+ ! the number of grid points
+ !model%pcgdwk%pcgsize(1) = model%geometry%totpts
+ ! Zero the arrays holding the sparse matrix
+ !model%pcgdwk%pcgval = 0.0
+ !model%pcgdwk%pcgcol = 0
+ !model%pcgdwk%pcgrow = 0
+ !model%pcgdwk%ct = 1
+
+ ! Boundary Conditions ---------------------------------------------------------------
+ ! lower and upper BC
+ do ew = 1,model%general%ewn
+ ns=1
+ if (model%geometry%mask(ew,ns) /= 0) then
+ call sparse_insert_val(model%pcgdwk%matrix, model%geometry%mask(ew,ns), model%geometry%mask(ew,ns), 1d0)
+ !EIB! old way
+ !call putpcgc(model%pcgdwk,1.0d0, model%geometry%mask(ew,ns), model%geometry%mask(ew,ns))
+ if (calc_rhs) then
+ model%pcgdwk%rhsd(model%geometry%mask(ew,ns)) = old_thck(ew,ns)
+ end if
+ model%pcgdwk%answ(model%geometry%mask(ew,ns)) = new_thck(ew,ns)
+ end if
+ ns=model%general%nsn
+ if (model%geometry%mask(ew,ns) /= 0) then
+ call sparse_insert_val(model%pcgdwk%matrix, model%geometry%mask(ew,ns), model%geometry%mask(ew,ns), 1d0)
+ !EIB! old way
+ !call putpcgc(model%pcgdwk,1.0d0, model%geometry%mask(ew,ns), model%geometry%mask(ew,ns))
+ if (calc_rhs) then
+ model%pcgdwk%rhsd(model%geometry%mask(ew,ns)) = old_thck(ew,ns)
+ end if
+ model%pcgdwk%answ(model%geometry%mask(ew,ns)) = new_thck(ew,ns)
+ end if
+ end do
+
+ !left and right BC
+ if (model%options%periodic_ew) then
+ do ns=2,model%general%nsn-1
+ ew = 1
+ if (model%geometry%mask(ew,ns) /= 0) then
+ call findsums(model%general%ewn-2,model%general%ewn-1,ns-1,ns)
+ call generate_row(model%general%ewn-2,ew,ew+1,ns-1,ns,ns+1)
+ end if
+ ew=model%general%ewn
+ if (model%geometry%mask(ew,ns) /= 0) then
+ call findsums(1,2,ns-1,ns)
+ call generate_row(ew-1,ew,3,ns-1,ns,ns+1)
+ end if
+ end do
+ else
+ do ns=2,model%general%nsn-1
+ ew=1
+ if (model%geometry%mask(ew,ns) /= 0) then
+ call sparse_insert_val(model%pcgdwk%matrix, model%geometry%mask(ew,ns), model%geometry%mask(ew,ns), 1d0)
+ !EIB! old way
+ !call putpcgc(model%pcgdwk,1.0d0, model%geometry%mask(ew,ns), model%geometry%mask(ew,ns))
+ if (calc_rhs) then
+ model%pcgdwk%rhsd(model%geometry%mask(ew,ns)) = old_thck(ew,ns)
+ end if
+ model%pcgdwk%answ(model%geometry%mask(ew,ns)) = new_thck(ew,ns)
+ end if
+ ew=model%general%ewn
+ if (model%geometry%mask(ew,ns) /= 0) then
+ call sparse_insert_val(model%pcgdwk%matrix, model%geometry%mask(ew,ns), model%geometry%mask(ew,ns), 1d0)
+ !EIB! old way
+ !call putpcgc(model%pcgdwk,1.0d0, model%geometry%mask(ew,ns), model%geometry%mask(ew,ns))
+ if (calc_rhs) then
+ model%pcgdwk%rhsd(model%geometry%mask(ew,ns)) = old_thck(ew,ns)
+ end if
+ model%pcgdwk%answ(model%geometry%mask(ew,ns)) = new_thck(ew,ns)
+ end if
+ end do
+ end if
+
+ ! ice body -------------------------------------------------------------------------
+
+ do ns = 2,model%general%nsn-1
+ do ew = 2,model%general%ewn-1
+
+ if (model%geometry%mask(ew,ns) /= 0) then
+
+ call findsums(ew-1,ew,ns-1,ns)
+ call generate_row(ew-1,ew,ew+1,ns-1,ns,ns+1)
+
+ end if
+ end do
+ end do
+
+ !EIB! still needed?
+ ! Calculate the total number of points
+ !model%pcgdwk%pcgsize(2) = model%pcgdwk%ct - 1
+
+ ! Solve the system using SLAP
+ !EIB! call slapsolv(model,linit,err)
+ call sparse_easy_solve(model%pcgdwk%matrix, model%pcgdwk%rhsd, model%pcgdwk%answ, &
+ err, linit)
+
+ ! Rejig the solution onto a 2D array
+ do ns = 1,model%general%nsn
+ do ew = 1,model%general%ewn
+ if (model%geometry%mask(ew,ns) /= 0) then
+ new_thck(ew,ns) = model%pcgdwk%answ(model%geometry%mask(ew,ns))
+ end if
+
+ end do
+ end do
+
+ new_thck = max(0.0d0, new_thck)
+
+#ifdef DEBUG
+ print *, "* thck ", model%numerics%time, linit, model%geometry%totpts, &
+ real(thk0*new_thck(model%general%ewn/2+1,model%general%nsn/2+1)), &
+ real(vel0*maxval(abs(model%velocity%ubas))), real(vel0*maxval(abs(model%velocity%vbas)))
+#endif
+
+ ! calculate upper and lower surface
+ call glide_calclsrf(model%geometry%thck, model%geometry%topg, model%climate%eus, model%geometry%lsrf)
+ model%geometry%usrf = max(0.d0,model%geometry%thck + model%geometry%lsrf)
+
+ contains
+
+ subroutine generate_row(ewm,ew,ewp,nsm,ns,nsp)
+ ! calculate row of sparse matrix equation
+ implicit none
+ integer, intent(in) :: ewm,ew,ewp ! ew index to left, central, right node
+ integer, intent(in) :: nsm,ns,nsp ! ns index to lower, central, upper node
+
+ !fill matrix using the new API
+ call sparse_insert_val(model%pcgdwk%matrix, model%geometry%mask(ew,ns), model%geometry%mask(ewm,ns), sumd(1)) ! point (ew-1,ns)
+ call sparse_insert_val(model%pcgdwk%matrix, model%geometry%mask(ew,ns), model%geometry%mask(ewp,ns), sumd(2)) ! point (ew+1,ns)
+ call sparse_insert_val(model%pcgdwk%matrix, model%geometry%mask(ew,ns), model%geometry%mask(ew,nsm), sumd(3)) ! point (ew,ns-1)
+ call sparse_insert_val(model%pcgdwk%matrix, model%geometry%mask(ew,ns), model%geometry%mask(ew,nsp), sumd(4)) ! point (ew,ns+1)
+ call sparse_insert_val(model%pcgdwk%matrix, model%geometry%mask(ew,ns), model%geometry%mask(ew,ns), 1d0 + sumd(5))! point (ew,ns)
+ !EIB! old way
+ ! fill sparse matrix
+ ! call putpcgc(model%pcgdwk,sumd(1), model%geometry%mask(ewm,ns), model%geometry%mask(ew,ns)) ! point (ew-1,ns)
+ ! call putpcgc(model%pcgdwk,sumd(2), model%geometry%mask(ewp,ns), model%geometry%mask(ew,ns)) ! point (ew+1,ns)
+ ! call putpcgc(model%pcgdwk,sumd(3), model%geometry%mask(ew,nsm), model%geometry%mask(ew,ns)) ! point (ew,ns-1)
+ ! call putpcgc(model%pcgdwk,sumd(4), model%geometry%mask(ew,nsp), model%geometry%mask(ew,ns)) ! point (ew,ns+1)
+ ! call putpcgc(model%pcgdwk,1.0d0 + sumd(5), model%geometry%mask(ew,ns), model%geometry%mask(ew,ns))! point (ew,ns)
+
+ ! calculate RHS
+ if (calc_rhs) then
+ model%pcgdwk%rhsd(model%geometry%mask(ew,ns)) = &
+ old_thck(ew,ns) * (1.0d0 - model%pcgdwk%fc2(3) * sumd(5)) &
+ - model%pcgdwk%fc2(3) * (old_thck(ewm,ns) * sumd(1) &
+ + old_thck(ewp,ns) * sumd(2) &
+ + old_thck(ew,nsm) * sumd(3) &
+ + old_thck(ew,nsp) * sumd(4)) &
+ - model%pcgdwk%fc2(4) * (model%geometry%lsrf(ew,ns) * sumd(5) &
+ + model%geometry%lsrf(ewm,ns) * sumd(1) &
+ + model%geometry%lsrf(ewp,ns) * sumd(2) &
+ + model%geometry%lsrf(ew,nsm) * sumd(3) &
+ + model%geometry%lsrf(ew,nsp) * sumd(4)) &
+ + model%climate%acab(ew,ns) * model%pcgdwk%fc2(2)
+ end if
+ !EIB! old way
+ ! calculate RHS
+ !if (calc_rhs) then
+ ! model%pcgdwk%rhsd(model%geometry%mask(ew,ns)) = &
+ ! old_thck(ew,ns) * (1.0d0 - model%pcgdwk%fc2(3) * sumd(5)) &
+ ! - model%pcgdwk%fc2(3) * (old_thck(ewm,ns) * sumd(1) &
+ ! + old_thck(ewp,ns) * sumd(2) &
+ ! + old_thck(ew,nsm) * sumd(3) &
+ ! + old_thck(ew,nsp) * sumd(4)) &
+ ! - model%pcgdwk%fc2(4) * (model%geometry%lsrf(ew,ns) * sumd(5) &
+ ! + model%geometry%lsrf(ewm,ns) * sumd(1) &
+ ! + model%geometry%lsrf(ewp,ns) * sumd(2) &
+ ! + model%geometry%lsrf(ew,nsm) * sumd(3) &
+ ! + model%geometry%lsrf(ew,nsp) * sumd(4)) &
+ ! + model%climate%acab(ew,ns) * model%pcgdwk%fc2(2)
+ ! if(model%options%basal_mbal==1) then
+ ! model%pcgdwk%rhsd(model%geometry%mask(ew,ns)) = &
+ ! model%pcgdwk%rhsd(model%geometry%mask(ew,ns)) &
+ ! - model%temper%bmlt(ew,ns) * model%pcgdwk%fc2(2) ! basal melt is +ve for mass loss
+ ! end if
+ !end if
+
+ model%pcgdwk%answ(model%geometry%mask(ew,ns)) = new_thck(ew,ns)
+
+ end subroutine generate_row
+
+ subroutine findsums(ewm,ew,nsm,ns)
+ ! calculate diffusivities
+ implicit none
+ integer, intent(in) :: ewm,ew ! ew index to left, right
+ integer, intent(in) :: nsm,ns ! ns index to lower, upper
+
+ ! calculate sparse matrix elements
+ sumd(1) = model%pcgdwk%fc2(1) * (&
+ (diffu_x(ewm,nsm) + diffu_x(ewm,ns)) + &
+ (model%velocity%ubas (ewm,nsm) + model%velocity%ubas (ewm,ns)))
+ sumd(2) = model%pcgdwk%fc2(1) * (&
+ (diffu_x(ew,nsm) + diffu_x(ew,ns)) + &
+ (model%velocity%ubas (ew,nsm) + model%velocity%ubas (ew,ns)))
+ sumd(3) = model%pcgdwk%fc2(5) * (&
+ (diffu_y(ewm,nsm) + diffu_y(ew,nsm)) + &
+ (model%velocity%ubas (ewm,nsm) + model%velocity%ubas (ew,nsm)))
+ sumd(4) = model%pcgdwk%fc2(5) * (&
+ (diffu_y(ewm,ns) + diffu_y(ew,ns)) + &
+ (model%velocity%ubas (ewm,ns) + model%velocity%ubas (ew,ns)))
+ sumd(5) = - (sumd(1) + sumd(2) + sumd(3) + sumd(4))
+ !EIB! old way
+ !sumd(1) = model%pcgdwk%fc2(1) * (&
+ ! (model%velocity%diffu(ewm,nsm) + model%velocity%diffu(ewm,ns)) + &
+ ! (model%velocity%ubas (ewm,nsm) + model%velocity%ubas (ewm,ns)))
+ !sumd(2) = model%pcgdwk%fc2(1) * (&
+ ! (model%velocity%diffu(ew,nsm) + model%velocity%diffu(ew,ns)) + &
+ ! (model%velocity%ubas (ew,nsm) + model%velocity%ubas (ew,ns)))
+ !sumd(3) = model%pcgdwk%fc2(5) * (&
+ ! (model%velocity%diffu(ewm,nsm) + model%velocity%diffu(ew,nsm)) + &
+ ! (model%velocity%ubas (ewm,nsm) + model%velocity%ubas (ew,nsm)))
+ !sumd(4) = model%pcgdwk%fc2(5) * (&
+ ! (model%velocity%diffu(ewm,ns) + model%velocity%diffu(ew,ns)) + &
+ ! (model%velocity%ubas (ewm,ns) + model%velocity%ubas (ew,ns)))
+ !sumd(5) = - (sumd(1) + sumd(2) + sumd(3) + sumd(4))
+
+ end subroutine findsums
+ end subroutine thck_evolve
+
+
+
+
+!---------------------------------------------------------------
+
+subroutine geometry_derivs(model)
+ use glide_mask, only: upwind_from_mask
+ implicit none
+
+ !*FD Computes derivatives of the ice and bed geometry, as well as averaging
+ !*FD them onto the staggered grid
+ type(glide_global_type), intent(inout) :: model
+
+ call stagthickness(model%geometry% thck, &
+ model%geomderv%stagthck,&
+ model%general%ewn, &
+ model%general%nsn, &
+ model%geometry%usrf, &
+ model%numerics%thklim, &
+ model%geometry%thkmask)
+
+ call stagvarb(model%geometry%lsrf, &
+ model%geomderv%staglsrf,&
+ model%general%ewn, &
+ model%general%nsn)
+
+ call stagvarb(model%geometry%topg, &
+ model%geomderv%stagtopg,&
+ model%general%ewn, &
+ model%general%nsn)
+
+
+ model%geomderv%stagusrf = model%geomderv%staglsrf + model%geomderv%stagthck
+
+
+ call df_field_2d_staggered(model%geometry%usrf, &
+ model%numerics%dew, model%numerics%dns, &
+ model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns, &
+ .false., .false.)
+
+ call df_field_2d_staggered(model%geometry%thck, &
+ model%numerics%dew, model%numerics%dns, &
+ model%geomderv%dthckdew, &
+ model%geomderv%dthckdns, &
+ .false., .false.)
+
+ !Make sure that the derivatives are 0 where staggered thickness is 0
+ where (model%geomderv%stagthck == 0)
+ model%geomderv%dusrfdew = 0
+ model%geomderv%dusrfdns = 0
+ model%geomderv%dthckdew = 0
+ model%geomderv%dthckdns = 0
+ endwhere
+
+ !TODO: correct signs
+ model%geomderv%dlsrfdew = model%geomderv%dusrfdew - model%geomderv%dthckdew
+ model%geomderv%dlsrfdns = model%geomderv%dusrfdns - model%geomderv%dthckdns
+
+ !Compute second derivatives.
+ !TODO: Turn this on and off conditionally based on whether the computation
+ !is requred
+
+ !Compute seond derivatives
+ !TODO: maybe turn this on and off conditionally?
+ call d2f_field_stag(model%geometry%usrf, model%numerics%dew, model%numerics%dns, &
+ model%geomderv%d2usrfdew2, model%geomderv%d2usrfdns2, &
+ .false., .false.)
+
+ call d2f_field_stag(model%geometry%thck, model%numerics%dew, model%numerics%dns, &
+ model%geomderv%d2thckdew2, model%geomderv%d2thckdns2, &
+ .false., .false.)
+
+end subroutine
+
+!*FD Computes derivatives of the geometry onto variables on a nonstaggered
+!*FD grid. Used for some higher-order routines
+subroutine geometry_derivs_unstag(model)
+ implicit none
+ type(glide_global_type) :: model
+
+ !Fields allow us to upwind derivatives at the ice sheet lateral boundaries
+ !so that we're not differencing out of the domain
+ real(dp), dimension(model%general%ewn, model%general%nsn) :: direction_x, direction_y
+
+ call upwind_from_mask(model%geometry%thkmask, direction_x, direction_y)
+ call write_xls("direction_x_unstag.txt", direction_x)
+ call write_xls("direction_y_unstag.txt", direction_y)
+ !Compute first derivatives of geometry
+ call df_field_2d(model%geometry%usrf, model%numerics%dew, model%numerics%dns, &
+ model%geomderv%dusrfdew_unstag, model%geomderv%dusrfdns_unstag, &
+ .false., .false., direction_x, direction_y)
+
+ call df_field_2d(model%geometry%lsrf, model%numerics%dew, model%numerics%dns, &
+ model%geomderv%dlsrfdew_unstag, model%geomderv%dlsrfdns_unstag, &
+ .false., .false., direction_x, direction_y)
+
+ call df_field_2d(model%geometry%thck, model%numerics%dew, model%numerics%dns, &
+ model%geomderv%dthckdew_unstag, model%geomderv%dthckdns_unstag, &
+ .false., .false., direction_x, direction_y)
+
+ call d2f_field(model%geometry%usrf, model%numerics%dew, model%numerics%dns, &
+ model%geomderv%d2usrfdew2_unstag, model%geomderv%d2usrfdns2_unstag, &
+ direction_x, direction_y)
+
+ call d2f_field(model%geometry%thck, model%numerics%dew, model%numerics%dns, &
+ model%geomderv%d2thckdew2_unstag, model%geomderv%d2thckdns2_unstag, &
+ direction_x, direction_y)
+
+
+end subroutine
+
+!---------------------------------------------------------------------------------
+
+ subroutine timeders(thckwk,ipvr,opvr,mask,time,which)
+
+ !*FD Calculates the time-derivative of a field. This subroutine is used by
+ !*FD the temperature solver only.
+
+ use glimmer_global, only : dp, sp
+ use glimmer_paramets, only : conv
+
+ implicit none
+
+ type(glide_thckwk) :: thckwk !*FD Derived-type containing work data
+ real(dp), intent(out), dimension(:,:) :: opvr !*FD Input field
+ real(dp), intent(in), dimension(:,:) :: ipvr !*FD Output (derivative) field
+ real(sp), intent(in) :: time !*FD current time
+ integer, intent(in), dimension(:,:) :: mask !*FD mask for calculation
+ integer, intent(in) :: which !*FD selector for stored field
+
+ real(sp) :: factor
+
+ factor = (time - thckwk%oldtime)
+ if (factor .eq.0) then
+ opvr = 0.0d0
+ else
+ factor = 1./factor
+ where (mask /= 0)
+ opvr = conv * (ipvr - thckwk%olds(:,:,which)) * factor
+ elsewhere
+ opvr = 0.0d0
+ end where
+ end if
+
+ thckwk%olds(:,:,which) = ipvr
+
+ if (which == thckwk%nwhich) then
+ thckwk%oldtime = time
+ end if
+
+ end subroutine timeders
+
+!---------------------------------------------------------------------------------
+
+ subroutine filterthck(thck,ewn,nsn)
+
+ use glimmer_global, only : dp ! ew, ewn, ns, nsn
+
+ implicit none
+
+ real(dp), dimension(:,:), intent(inout) :: thck
+ real(dp), dimension(:,:), allocatable :: smth
+ integer :: ewn,nsn
+
+ real(dp), parameter :: f = 0.1d0 / 16.0d0
+ integer :: count
+ integer :: ns,ew
+
+ allocate(smth(ewn,nsn))
+ count = 1
+
+ do ns = 3,nsn-2
+ do ew = 3,ewn-2
+
+ if (all((thck(ew-2:ew+2,ns) > 0.0d0)) .and. all((thck(ew,ns-2:ns+2) > 0.0d0))) then
+ smth(ew,ns) = thck(ew,ns) + f * &
+ (thck(ew-2,ns) - 4.0d0 * thck(ew-1,ns) + 12.0d0 * thck(ew,ns) - &
+ 4.0d0 * thck(ew+1,ns) + thck(ew+2,ns) + &
+ thck(ew,ns-2) - 4.0d0 * thck(ew,ns-1) - &
+ 4.0d0 * thck(ew,ns+1) + thck(ew,ns+2))
+ count = count + 1
+ else
+ smth(ew,ns) = thck(ew,ns)
+ end if
+
+ end do
+ end do
+
+ thck(3:ewn-2,3:nsn-2) = smth(3:ewn-2,3:nsn-2)
+ print *, count
+
+ deallocate(smth)
+
+ end subroutine filterthck
+
+!----------------------------------------------------------------------
+
+ subroutine swapbndh(bc,a,b,c,d)
+
+ use glimmer_global, only : dp
+
+ implicit none
+
+ real(dp), intent(out), dimension(:) :: a, c
+ real(dp), intent(in), dimension(:) :: b, d
+ integer, intent(in) :: bc
+
+ if (bc == 0) then
+ a = b
+ c = d
+ end if
+
+ end subroutine swapbndh
+
+ !-----------------------------------------------------------------------------
+ ! ADI routines
+ !-----------------------------------------------------------------------------
+
+ subroutine stagleapthck(model,newtemps)
+
+ !*FD this subroutine solves the ice sheet thickness equation using the ADI scheme
+ !*FD diffusivities are updated for each half time step
+
+ use glide_setup, only: glide_calclsrf
+ use glide_velo
+ use glimmer_utils
+ implicit none
+ ! subroutine arguments
+ type(glide_global_type) :: model
+ logical, intent(in) :: newtemps !*FD true when we should recalculate Glen's A
+
+ ! local variables
+ integer ew,ns, n
+
+ if (model%geometry%empty) then
+
+ model%geometry%thck = dmax1(0.0d0,model%geometry%thck + model%climate%acab * model%pcgdwk%fc2(2))
+#ifdef DEBUG
+ print *, "* thck empty - net accumulation added", model%numerics%time
+#endif
+ else
+
+ ! calculate basal velos
+ if (newtemps) then
+ call slipvelo(model, &
+ 1, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+ ! calculate Glen's A if necessary
+ call velo_integrate_flwa(model%velowk,model%geomderv%stagthck,model%temper%flwa)
+ end if
+ call slipvelo(model, &
+ 2, &
+ model%velocity% btrc, &
+ model%velocity% ubas, &
+ model%velocity% vbas)
+
+ ! calculate diffusivity
+ call velo_calc_diffu(model%velowk,model%geomderv%stagthck,model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns,model%velocity%diffu)
+
+ model%velocity%total_diffu(:,:) = model%velocity%diffu(:,:) + model%velocity%ubas(:,:)
+
+ ! first ADI step, solve thickness equation along rows j
+ n = model%general%ewn
+ do ns=2,model%general%nsn-1
+ call adi_tri ( model%thckwk%alpha, &
+ model%thckwk%beta, &
+ model%thckwk%gamma, &
+ model%thckwk%delta, &
+ model%geometry%thck(:,ns), &
+ model%geometry%lsrf(:,ns), &
+ model%climate%acab(:,ns), &
+ model%velocity%vflx(:,ns), &
+ model%velocity%vflx(:,ns-1), &
+ model%velocity%total_diffu(:,ns), &
+ model%velocity%total_diffu(:,ns-1), &
+ model%numerics%dt, &
+ model%numerics%dew, &
+ model%numerics%dns )
+ !EIB! gc2 acab input, not sure why the difference
+ !model%climate%acab(:,ns)-real(model%options%basal_mbal)*real(model%temper%bmlt(:,ns),sp), &
+ call tridiag(model%thckwk%alpha(1:n), &
+ model%thckwk%beta(1:n), &
+ model%thckwk%gamma(1:n), &
+ model%thckwk%oldthck(:,ns), &
+ model%thckwk%delta(1:n))
+ end do
+
+ model%thckwk%oldthck(:,:) = max(model%thckwk%oldthck(:,:), 0.d0)
+
+ ! second ADI step, solve thickness equation along columns i
+ n = model%general%nsn
+ do ew=2,model%general%ewn-1
+ call adi_tri ( model%thckwk%alpha, &
+ model%thckwk%beta, &
+ model%thckwk%gamma, &
+ model%thckwk%delta, &
+ model%thckwk%oldthck(ew,:), &
+ model%geometry%lsrf(ew, :), &
+ model%climate%acab(ew, :), &
+ model%velocity%uflx(ew,:), &
+ model%velocity%uflx(ew-1,:), &
+ model%velocity%total_diffu(ew,:), &
+ model%velocity%total_diffu(ew-1,:), &
+ model%numerics%dt, &
+ model%numerics%dns, &
+ model%numerics%dew )
+ !EIB! again, input difference
+ !model%climate%acab(ew, :)-real(model%options%basal_mbal)*real(model%temper%bmlt(ew, :),sp), &
+
+ call tridiag(model%thckwk%alpha(1:n), &
+ model%thckwk%beta(1:n), &
+ model%thckwk%gamma(1:n), &
+ model%geometry%thck(ew, :), &
+ model%thckwk%delta(1:n))
+ end do
+
+ model%geometry%thck(:,:) = max(model%geometry%thck(:,:), 0.d0)
+
+ ! Apply boundary conditions
+ model%geometry%thck(1,:) = 0.0
+ model%geometry%thck(model%general%ewn,:) = 0.0
+ model%geometry%thck(:,1) = 0.0
+ model%geometry%thck(:,model%general%nsn) = 0.0
+
+ ! calculate horizontal velocity field
+ call slipvelo(model, &
+ 3, &
+ model%velocity%btrc, &
+ model%velocity%ubas, &
+ model%velocity%vbas)
+ call velo_calc_velo(model%velowk,model%geomderv%stagthck,model%geomderv%dusrfdew, &
+ model%geomderv%dusrfdns,model%temper%flwa,model%velocity%diffu,model%velocity%ubas, &
+ model%velocity%vbas,model%velocity%uvel,model%velocity%vvel,model%velocity%uflx,model%velocity%vflx,&
+ model%velocity%surfvel)
+ !EIB! old way
+ !call velo_calc_velo(model%velowk,model%geomderv%stagthck,model%geomderv%dusrfdew, &
+ ! model%geomderv%dusrfdns,model%temper%flwa,model%velocity%diffu,model%velocity%ubas, &
+ ! model%velocity%vbas,model%velocity%uvel,model%velocity%vvel,model%velocity%uflx,model%velocity%vflx)
+ end if
+
+ !------------------------------------------------------------
+ ! calculate upper and lower surface
+ !------------------------------------------------------------
+ call glide_calclsrf(model%geometry%thck, model%geometry%topg, model%climate%eus, model%geometry%lsrf)
+ model%geometry%usrf = max(0.d0,model%geometry%thck + model%geometry%lsrf)
+
+ end subroutine stagleapthck
+
+!---------------------------------------------------------------------------------
+
+ subroutine adi_tri(a,b,c,d,thk,tpg,mb,flx_p,flx_m,dif_p,dif_m,dt,ds1, ds2)
+ !*FD construct tri-diagonal matrix system for a column/row
+ use glimmer_global, only : dp, sp
+ implicit none
+
+ real(dp), dimension(:), intent(out) :: a !*FD alpha (subdiagonal)
+ real(dp), dimension(:), intent(out) :: b !*FD alpha (diagonal)
+ real(dp), dimension(:), intent(out) :: c !*FD alpha (superdiagonal)
+ real(dp), dimension(:), intent(out) :: d !*FD right-hand side
+
+ real(dp), dimension(:), intent(in) :: thk !*FD ice thickness
+ real(dp), dimension(:), intent(in) :: tpg !*FD lower surface of ice
+ real(sp), dimension(:), intent(in) :: mb !*FD mass balance
+ real(dp), dimension(:), intent(in) :: flx_p !*FD flux +1/2
+ real(dp), dimension(:), intent(in) :: flx_m !*FD flux -1/2
+ real(dp), dimension(:), intent(in) :: dif_p !*FD diffusivity +1/2
+ real(dp), dimension(:), intent(in) :: dif_m !*FD diffusivity -1/2
+
+ real(dp), intent(in) :: dt !*FD time step
+ real(dp), intent(in) :: ds1, ds2 !*FD spatial steps inline and transversal
+
+ ! local variables
+ real(dp) :: f1, f2, f3
+ integer :: i,n
+
+ n = size(thk)
+
+ f1 = dt/(4*ds1*ds1)
+ f2 = dt/(4*ds2)
+ f3 = dt/2.
+
+ a(:) = 0.
+ b(:) = 0.
+ c(:) = 0.
+ d(:) = 0.
+
+ a(1) = 0.
+ do i=2,n
+ a(i) = f1*(dif_m(i-1)+dif_p(i-1))
+ end do
+ do i=1,n-1
+ c(i) = f1*(dif_m(i)+dif_p(i))
+ end do
+ c(n) = 0.
+ b(:) = -(a(:)+c(:))
+
+ ! calculate RHS
+ do i=2,n-1
+ d(i) = thk(i) - &
+ f2 * (flx_p(i-1) + flx_p(i) - flx_m(i-1) - flx_m(i)) + &
+ f3 * mb(i) - &
+ a(i)*tpg(i-1) - b(i)*tpg(i) - c(i)*tpg(i+1)
+ end do
+
+ b(:) = 1.+b(:)
+
+ end subroutine adi_tri
+
+end module glide_thck
+
diff --git a/components/cism/glimmer-cism/libglide/glide_types.F90 b/components/cism/glimmer-cism/libglide/glide_types.F90
new file mode 100644
index 0000000000..05ae316839
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_types.F90
@@ -0,0 +1,1992 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_types.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glide_types
+
+ !> Holds type definitions for the derived types used by each
+ !> instance of the ice model. Originally, each of these types
+ !> was a module containing variables, which were used as containers
+ !> for global variables. However, the need to allow for multiple
+ !> ice model instances meant that the nested derived types were instituted
+ !> instead. However, there is probably one too many levels in this scheme.
+ !> It would be better if the different types here were contained in the
+ !> higher-level instance type (\texttt{glint\_instance}), rather than
+ !> the intermediate model type (\texttt{glide\_global\_type}).
+ !>
+ !> Note that this \emph{is} now where the defaults are defined for these
+ !> variables.
+
+!TODO - Clean up the glide_global type so it holds fewer subtypes?
+! For example, we could replace some work types (tempwk, velowk) with local arrays and parameters.
+
+ use glimmer_sparse_type
+ use glimmer_global, only: sp, dp
+ use glimmer_ncdf
+ use profile
+ use glimmer_coordinates, only: coordsystem_type
+ use glimmer_map_types
+ use glimmer_physcon
+
+ implicit none
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ ! Constants that describe the options available
+ ! We use these integer parameters elsewhere in the code to avoid
+ ! hardwiring of option numbers
+
+ ! basic Glimmer/Glide options
+
+ integer, parameter :: GLOBAL_BC_PERIODIC = 0 ! doubly periodic
+ integer, parameter :: GLOBAL_BC_OUTFLOW = 1 ! free outflow; scalars in global halo set to zero
+
+ integer, parameter :: DYCORE_GLIDE = 0 ! old shallow-ice dycore from Glimmer
+ integer, parameter :: DYCORE_GLAM = 1 ! Payne-Price finite-difference solver
+ integer, parameter :: DYCORE_GLISSADE = 2 ! prototype finite-element solver
+ integer, parameter :: DYCORE_ALBANYFELIX = 3 ! External Albany-Felix finite-element solver
+ integer, parameter :: DYCORE_BISICLES = 4 ! BISICLES-Chombo external FVM solver
+
+ integer, parameter :: EVOL_PSEUDO_DIFF = 0 ! glide only
+ integer, parameter :: EVOL_ADI = 1 ! glide only
+ integer, parameter :: EVOL_DIFFUSION = 2 ! glide only
+ integer, parameter :: EVOL_INC_REMAP = 3 ! glam/glissade only
+ integer, parameter :: EVOL_UPWIND = 4 ! glam/glissade only
+ integer, parameter :: EVOL_NO_THICKNESS = 5 ! glam/glissade only
+
+ !NOTE: Use option 1 for prognostic temperature with any dycore
+ ! Option 3 is under construction
+
+ integer, parameter :: TEMP_SURFACE_AIR_TEMP = 0
+ integer, parameter :: TEMP_PROGNOSTIC = 1
+ integer, parameter :: TEMP_STEADY = 2
+ integer, parameter :: TEMP_ENTHALPY = 3
+
+ integer, parameter :: TEMP_INIT_ZERO = 0
+ integer, parameter :: TEMP_INIT_ARTM = 1
+ integer, parameter :: TEMP_INIT_LINEAR = 2
+
+ integer, parameter :: FLWA_CONST_FLWA = 0
+ integer, parameter :: FLWA_PATERSON_BUDD_CONST_TEMP = 1
+ integer, parameter :: FLWA_PATERSON_BUDD = 2
+
+ integer, parameter :: BTRC_ZERO = 0
+ integer, parameter :: BTRC_CONSTANT = 1
+ integer, parameter :: BTRC_CONSTANT_BWAT = 2
+ integer, parameter :: BTRC_CONSTANT_TPMP = 3
+ integer, parameter :: BTRC_LINEAR_BMLT = 4
+ integer, parameter :: BTRC_TANH_BWAT = 5
+
+ integer, parameter :: BWATER_NONE = 0
+ integer, parameter :: BWATER_LOCAL = 1
+ integer, parameter :: BWATER_FLUX = 2
+ integer, parameter :: BWATER_CONST = 3
+ integer, parameter :: BWATER_OCEAN_PENETRATION = 4 ! effective pressure calculation with pw=ocean pressure for grounding line parameterisation (Leguy, et al., TC, 2014)
+ !integer, parameter :: BWATER_BASAL_PROC = 4 ! not currently supported
+
+ integer, parameter :: BASAL_MBAL_NO_CONTINUITY = 0
+ integer, parameter :: BASAL_MBAL_CONTINUITY = 1
+
+ integer, parameter :: GTHF_UNIFORM = 0
+ integer, parameter :: GTHF_PRESCRIBED_2D = 1
+ integer, parameter :: GTHF_COMPUTE = 2
+
+ integer, parameter :: RELAXED_TOPO_NONE = 0 ! Do nothing
+ integer, parameter :: RELAXED_TOPO_INPUT = 1 ! Input topo is relaxed
+ integer, parameter :: RELAXED_TOPO_COMPUTE = 2 ! Input topo in isostatic equilibrium
+ ! compute relaxed topo
+
+ integer, parameter :: ISOSTASY_NONE = 0
+ integer, parameter :: ISOSTASY_COMPUTE = 1
+
+ integer, parameter :: LITHOSPHERE_LOCAL = 0
+ integer, parameter :: LITHOSPHERE_ELASTIC = 1
+
+ integer, parameter :: ASTHENOSPHERE_FLUID = 0
+ integer, parameter :: ASTHENOSPHERE_RELAXING = 1
+
+ integer, parameter :: MARINE_NONE = 0
+ integer, parameter :: MARINE_FLOAT_ZERO = 1
+ integer, parameter :: MARINE_FLOAT_FRACTION = 2
+ integer, parameter :: MARINE_RELX_THRESHOLD = 3
+ integer, parameter :: MARINE_TOPG_THRESHOLD = 4
+ integer, parameter :: MARINE_HUYBRECHTS = 5
+
+ integer, parameter :: VERTINT_STANDARD = 0
+ integer, parameter :: VERTINT_KINEMATIC_BC = 1
+
+ integer, parameter :: SIGMA_COMPUTE_GLIDE = 0
+ integer, parameter :: SIGMA_EXTERNAL = 1
+ integer, parameter :: SIGMA_CONFIG = 2
+ integer, parameter :: SIGMA_COMPUTE_EVEN = 3
+ integer, parameter :: SIGMA_COMPUTE_PATTYN = 4
+
+ integer, parameter :: RESTART_FALSE = 0
+ integer, parameter :: RESTART_TRUE = 1
+
+ !basal proc option disabled for now
+ integer, parameter :: BAS_PROC_DISABLED = 0
+!! integer, parameter :: BAS_PROC_FULLCALC = 1
+!! integer, parameter :: BAS_PROC_FASTCALC = 2
+
+ ! higher-order options
+
+ integer, parameter :: HO_EFVS_CONSTANT = 0
+ integer, parameter :: HO_EFVS_FLOWFACT = 1
+ integer, parameter :: HO_EFVS_NONLINEAR = 2
+
+ integer, parameter :: HO_DISP_NONE = -1
+ integer, parameter :: HO_DISP_SIA = 0
+ integer, parameter :: HO_DISP_FIRSTORDER = 1
+
+ integer, parameter :: HO_BABC_CONSTANT = 0
+ integer, parameter :: HO_BABC_SIMPLE = 1
+ integer, parameter :: HO_BABC_YIELD_PICARD = 2
+ integer, parameter :: HO_BABC_BETA_BWAT = 3
+ integer, parameter :: HO_BABC_LARGE_BETA = 4
+ integer, parameter :: HO_BABC_EXTERNAL_BETA = 5
+ integer, parameter :: HO_BABC_NO_SLIP = 6
+ integer, parameter :: HO_BABC_YIELD_NEWTON = 7
+ integer, parameter :: HO_BABC_ISHOMC = 8
+ integer, parameter :: HO_BABC_POWERLAW = 9
+ integer, parameter :: HO_BABC_COULOMB_FRICTION = 10
+
+ integer, parameter :: HO_NONLIN_PICARD = 0
+ integer, parameter :: HO_NONLIN_JFNK = 1
+
+ integer, parameter :: HO_RESID_MAXU = 0
+ integer, parameter :: HO_RESID_MAXU_NO_UBAS = 1
+ integer, parameter :: HO_RESID_MEANU = 2
+ integer, parameter :: HO_RESID_L2NORM = 3
+ integer, parameter :: HO_RESID_L2NORM_RELATIVE = 4
+
+ integer, parameter :: HO_SPARSE_PCG_INCH = -1
+ integer, parameter :: HO_SPARSE_BICG = 0
+ integer, parameter :: HO_SPARSE_GMRES = 1
+ integer, parameter :: HO_SPARSE_PCG_STANDARD = 2
+ integer, parameter :: HO_SPARSE_PCG_CHRONGEAR = 3
+ integer, parameter :: HO_SPARSE_TRILINOS = 4
+
+ integer, parameter :: HO_APPROX_LOCAL_SIA = -1
+ integer, parameter :: HO_APPROX_SIA = 0
+ integer, parameter :: HO_APPROX_SSA = 1
+ integer, parameter :: HO_APPROX_BP = 2
+ integer, parameter :: HO_APPROX_L1L2 = 3
+
+ integer, parameter :: HO_PRECOND_NONE = 0
+ integer, parameter :: HO_PRECOND_DIAG = 1
+ integer, parameter :: HO_PRECOND_SIA = 2
+
+ integer, parameter :: HO_GRADIENT_CENTERED = 0
+ integer, parameter :: HO_GRADIENT_UPSTREAM = 1
+
+ integer, parameter :: HO_GRADIENT_MARGIN_ALL = 0
+ integer, parameter :: HO_GRADIENT_MARGIN_ICE_LAND = 1
+ integer, parameter :: HO_GRADIENT_MARGIN_ICE_ONLY = 2
+
+ integer, parameter :: HO_ASSEMBLE_BETA_STANDARD = 0
+ integer, parameter :: HO_ASSEMBLE_BETA_LOCAL = 1
+
+ integer, parameter :: HO_GROUND_NO_GLP = 0
+ integer, parameter :: HO_GROUND_GLP = 1
+ integer, parameter :: HO_GROUND_ALL = 2
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_general
+
+ !> Holds fundamental parameters of the ice model geometry.
+
+ integer :: ewn = 0 !> The number of grid-points in the E-W direction.
+ integer :: nsn = 0 !> The number of grid-points in the N-S direction.
+ integer :: upn = 1 !> The number of vertical levels in the model.
+
+ type(coordsystem_type) :: ice_grid !> coordinate system of the ice grid
+ type(coordsystem_type) :: velo_grid !> coordinate system of the velocity grid
+
+ real(dp), dimension(:),pointer :: x0 => null() !original x0 grid
+ real(dp), dimension(:),pointer :: y0 => null() !original y0 grid
+ real(dp), dimension(:),pointer :: x1 => null() !original x1 grid
+ real(dp), dimension(:),pointer :: y1 => null() !original y1 grid
+
+ integer :: global_bc = 0 ! 0 for periodic, 1 for outflow
+
+ end type glide_general
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_options
+
+ !> Holds user options controlling the methods used in the ice-model integration.
+
+ !-----------------------------------------------------------------------
+ ! standard options
+ !-----------------------------------------------------------------------
+
+ integer :: whichdycore = 2
+
+ ! Choice of two Glimmer dycores:
+ !> \begin{description}
+ !> \item[0] Glide dycore (SIA, serial (SLAP) only)
+ !> \item[1] SEACISM/Glam dycore (1st-order, FDM, serial (SLAP) or parallel (Trilinos))
+ !> \item[2] Glissade dycore (1st-order, FEM, serial (SLAP) or parallel (F90 native PCG solver) )
+ !> \item[3] FELIX-Albany dycore (1st-order, FEM, using Trilino/Albany, mesh information from Glissade)
+ !> \item[4] BISICLES dycore (L1L2, FVM, parallel using Chombo AMR)
+ !> \end{description}
+
+ integer :: whichevol = 0
+
+ !> Thickness evolution method:
+ !> \begin{description}
+ !> \item[0] Pseudo-diffusion
+ !> \item[1] Alternating direction implicit (ADI)
+ !> \item[2] Diffusion (also calculates velocities)
+ !> \item[3] Incremental remapping
+ !> \item[4] 1st-order upwind scheme
+ !> \item[5] Temperature evolves but thickness does not
+ !> \end{description}
+
+ integer :: whichtemp = 1
+
+ !> Method of ice temperature calculation:
+ !> \begin{description}
+ !> \item[0] Set column to surface air temperature
+ !> \item[1] Prognostic temperature solution
+ !> \item[2] Do NOTHING - hold temperatures steady at initial value
+ !> \item[3] Prognostic enthalpy solution
+ !> \end{description}
+
+ integer :: temp_init = 1
+
+ ! Temperature initialization:
+ !> \begin{description}
+ !> \item[0] Initialize temperature to 0 C
+ !> \item[1] Initialize temperature to surface air temperature
+ !> \item[2] Initialize temperature with a linear profile in each column
+ !> \end{description}
+
+ !> Method for calculating flow factor $A$:
+
+ integer :: whichflwa = 2
+
+ !> \begin{description}
+ !> \item[0] Set equal to $1\times 10^{-16}\,\mathrm{yr}^{-1}
+ !> \item[1] \emph{Paterson and Budd} relationship,
+ !> with temperature set to $-5^{\circ}\mathrm{C}$
+ !> \item[2] \emph{Paterson and Budd} relationship
+ !> \,\mathrm{Pa}^{-n}$
+ !> \end{description}
+
+ integer :: whichbtrc = 0
+
+ !> Basal slip coefficient:
+ !> \begin{description}
+ !> \item[0] Set equal to zero everywhere
+ !> \item[1] Set to (non--zero) constant
+ !> \item[2] Set to (non--zero) constant where basal water is present, otherwise to zero
+ !> \item[3] Set to (non--zero) constant where temperature is at pressure melting point of ice, otherwise to zero
+ !> \item[4] linear function of basal melt rate
+ !> \item[5] \texttt{tanh} function of basal water depth
+ !> \end{description}
+
+ integer :: whichbwat = 0
+
+ !> Basal water depth:
+ !> \begin{description}
+ !> \item[0] Set to zero everywhere
+ !> \item[1] Compute from local basal water balance
+ !> \item[2] Compute the basal water flux, then find depth via calculation
+ !> \item[3] Set to constant (10 m) everywhere, to force T = Tpmp.
+ !> \item[4] Calculated from till water content, in the basal processes module
+ !> \end{description}
+
+ integer :: basal_mbal = 0
+
+ !> basal melt rate:
+ !> \begin{description}
+ !> \item[0] Basal melt rate not included in continuity equation
+ !> \item[1] Basal melt rate included in continuity equation
+ !> \end{description}
+
+ integer :: gthf = 0
+
+ !> geothermal heat flux:
+ !> \begin{description}
+ !> \item[0] prescribed uniform geothermal heat flux
+ !> \item[1] read 2D geothermal flux field from input file (if present)
+ !> \item[2] calculate geothermal flux using 3d diffusion
+ !> \end{description}
+
+ ! This replaces model%isos%do_isos
+ integer :: isostasy = 0
+
+ !> isostasy:
+ !> \begin{description}
+ !> \item[0] no isostatic adjustment
+ !> \item[1] compute isostatic adjustment using lithosphere/asthenosphere model
+ !> \end{description}
+
+ !TODO - Should whichrelaxed move from the options to the isostasy section?
+ integer :: whichrelaxed = 0
+
+ !> relaxed topography:
+ !> \begin{description}
+ !> \item[0] get relaxed topo from separate variable (in practice, do nothing)
+ !> \item[1] first time slice of input topo is relaxed
+ !> \item[2] first time slice of input topo is in isostatic equilibrium
+ !> \end{description}
+
+ integer :: whichmarn = 1
+
+ !> Marine limit:
+ !> \begin{description}
+ !> \item[0] No action
+ !> \item[1] Set thickness to zero if floating
+ !> \item[2] Lose fraction of ice when edge cell
+ !> \item[3] Set thickness to zero if relaxed bedrock is more than
+ !> certain water depth (variable "mlimit" in glide_types)
+ !> \item[4] Set thickness to zero if present bedrock is more than
+ !> certain water depth (variable "mlimit" in glide_types)
+ !> \item[5] Huybrechts grounding line scheme for Greenland initialization
+ !> \end{description}
+
+ integer :: whichwvel = 0
+
+ !> Vertical velocities:
+ !> \begin{description}
+ !> \item[0] Usual vertical integration
+ !> \item[1] Vertical integration constrained so that
+ !> upper kinematic B.C. obeyed
+ !> \end{description}
+
+ integer :: which_sigma = 0
+
+ !> \begin{description}
+ !> \item[0] compute standard Glimmer sigma coordinates
+ !> \item[1] sigma coordinates are given in external file
+ !> \item[2] sigma coordinates are given in configuration file
+ !> \item[3] evenly spaced levels, as required for glam dycore
+ !> \item[4] compute Pattyn sigma coordinates
+ !> \end{description}
+
+ !TODO - Make is_restart a logical variable?
+
+ integer :: is_restart = 0
+ !> if the run is a restart of a previous run
+ !> \begin{description}
+ !> \item[0] normal start-up (take init fields from .nc input file OR if absent, use default options)
+ !> \item[1] restart model from previous run (do not calc. temp, rate factor, or vel)
+ !> \end{description}
+
+ ! This is a Glimmer serial option
+ ! The parallel code enforces periodic EW and NS boundary conditions by default
+ logical :: periodic_ew = .false.
+
+ !> \begin{description}
+ !> \item[0] no periodic EW boundary conditions
+ !> \item[1] periodic EW boundary conditions
+ !> \end{description}
+
+ !-----------------------------------------------------------------------
+ ! Higher-order options
+ ! Associated with Payne-Price dycore (glam) and newer glissade dycore
+ !-----------------------------------------------------------------------
+
+ integer :: which_ho_efvs = 2
+
+ !> Flag that indicates how effective viscosity is computed
+ !> \begin{description}
+ !> \item[0] constant value
+ !> \item[1] multiple of flow factor
+ !> \item[2] compute from effective strain rate
+
+ integer :: which_ho_disp = 1
+
+ !> Flag that indicates method for computing the dissipation during the temperature calc.
+ !> \begin{description}
+ !> \item[-1] for no dissipation
+ !> \item[0] for 0-order SIA approx
+ !> \item[1] for first-order dissipation (Blatter-Pattyn)
+ !>
+ !> \end{description}
+
+ integer :: which_ho_babc = 4
+
+ !> Flag that describes basal boundary condition for HO dyn core:
+ !> \begin{description}
+ !> \item[0] spatially uniform value (low value of 10 Pa/yr by default)
+ !> \item[1] simple hard-coded pattern (useful for debugging)
+ !> \item[2] treat beta value as a till yield stress (in Pa) using Picard iteration
+ !> \item[3] linear (inverse) function of bwat
+ !> \item[4] very large value for beta to enforce no slip everywhere
+ !> \item[5] beta field passed in from .nc input file as part of standard i/o
+ !> \item[6] no slip everywhere (using Dirichlet BC rather than large beta)
+ !> \item[7] treat beta value as till yield stress (in Pa) using Newton-type iteration (in development)
+ !> \item[8] beta field as prescribed for ISMIP-HOM test C (serial only)
+ !> \item[9] power law based using effective pressure
+ !> \item[10] Coulomb friction law using effective pressure
+ !> \end{description}
+
+ integer :: which_ho_nonlinear = 0
+ !> Flag that indicates method for solving the nonlinear iteration when solving
+ !> the first-order momentum balance
+ !> \item[0] use the standard Picard iteration
+ !> \item[1] use Jacobian Free Newton Krylov (JFNK) method
+
+ integer :: which_ho_resid = 3
+ !> Flag that indicates method for computing residual in PP dyn core:
+ !> \begin{description}
+ !> \item[0] maxval
+ !> \item[1] maxval ignoring basal velocity
+ !> \item[2] mean value
+ !> \item[3] L2 norm of system residual, Ax-b=resid
+ !> \item[4] L2 norm of system residual relative to rhs, |Ax-b|/|b|
+ !> \begin{description}
+
+ integer :: which_ho_sparse = 0
+ !> Flag that indicates method for solving the sparse linear system
+ !> that arises from the higher-order solver
+ !> \begin{description}
+ !> \item[-1] SLAP (serial): Preconditioned conjugate gradient, incomplete Cholesky preconditioner
+ !> \item[0] SLAP (serial): Biconjugate gradient, incomplete LU preconditioner
+ !> \item[1] SLAP (serial): GMRES, incomplete LU preconditioner
+ !> \item[2] Native PCG, parallel-enabled, standard solver
+ !> \item[3] Native PCG, parallel-enabled, Chronopoulos-Gear solver
+ !> \item[4] standalone interface to Trilinos
+ !> \end{description}
+
+ ! parameters to store external dycore options/information -- Doug Ranken 04/20/12
+ integer*4 :: external_dycore_type = 0
+ integer*4 :: external_dycore_model_index = -1
+ !> Flag to select an external dynamic core.
+ !> \begin{description}
+ !> \item[0] Do not use an external dynamic core
+ !> \item[1] Use the BISICLES external dynamic core
+ !> \item[2] Use the ALBANY_FELIX external dynamic core
+ !> \end{description}
+
+ character(fname_length) :: dycore_input_file=''
+ !FD Name of a file containing external dycore settings.
+
+ integer :: which_ho_approx = 2
+ !> Flag that indicates which Stokes approximation to use with the glissade dycore.
+ !> Not valid for other dycores
+ !> Compute Blatter-Pattyn HO momentum balance by default.
+ !> Note: There are two SIA options:
+ !> Option -1 uses module glissade_velo_sia to compute local SIA velocities, similar to Glide
+ !> Option 0 uses module glissade_velo_higher to compute SIA velocities via an iterative solve
+ !> \begin{description}
+ !> \item[-1] Shallow-ice approximation, Glide-type calculation (uses glissade_velo_sia)
+ !> \item[0] Shallow-ice approximation, vertical-shear stresses only (uses glissade_velo_higher)
+ !> \item[1] Shallow-shelf approximation, horizontal-plane stresses only (uses glissade_velo_higher)
+ !> \item[2] Blatter-Pattyn approximation with both vertical-shear and horizontal-plane stresses (uses glissade_velo_higher)
+ !> \item[3] Vertically integrated 'L1L2' approximation with vertical-shear and horizontal-plane stresses (uses glissade_velo_higher)
+ !> \end{description}
+
+ integer :: which_ho_precond = 2
+ !> Flag that indicates which Stokes preconditioner to use in the glissade dycore.
+ !> Not valid for other dycores
+ !> \begin{description}
+ !> \item[0] No preconditioner
+ !> \item[1] Diagonal preconditioner
+ !> \item[2] Physics-based shallow-ice preconditioner
+ !> \end{description}
+
+ integer :: which_ho_gradient = 0
+ !> Flag that indicates which gradient operator to use in the glissade dycore.
+ !> Not valid for other dycores
+ !> NOTE: Option 1 may be better for ice evolution because it damps checkerboard noise.
+ !> \begin{description}
+ !> \item[0] Centered gradient
+ !> \item[1] Upstream gradient
+
+ integer :: which_ho_gradient_margin = 1
+ !> Flag that indicates how to compute the gradient at the ice margin in the glissade dycore.
+ !> Not valid for other dycores
+ !> \begin{description}
+ !> \item[0] Use info from all neighbor cells, ice-covered or ice-free
+ !> \item[1] Use info from ice-covered and/or land cells, not ice-free ocean
+ !> \item[2] Use info from ice-covered cells only
+
+ integer :: which_ho_assemble_beta = 0
+
+ !> Flag that describes how beta terms are assembled in the glissade finite-element calculation
+ !> \begin{description}
+ !> \item[0] standard finite-element calculation (which effectively smooths beta)
+ !> \item[1] apply local beta value at each vertex
+
+ integer :: which_ho_ground = 0
+ !> Flag that indicates how to compute the grounded fraction of each gridcell in the glissade dycore.
+ !> Not valid for other dycores
+ !> \begin{description}
+ !> \item[0] fground = 0 in floating cells (based on flotation condition), else fground = 1
+ !> \item[1] fground = 1 in all cells
+ !> \item[2] 0 <= fground <= 1, based on a grounding line parameterization
+
+ integer :: glissade_maxiter = 100
+ !> maximum number of nonlinear iterations to be used by the Glissade velocity solver
+
+ ! The remaining options are not currently supported
+
+ !integer :: which_bproc = 0
+ !Options for the basal processes code
+ !> \begin{description}
+ !> \item[0] Disabled
+ !> \item[1] Full calculation, with at least 3 nodes to represent the till layer
+ !> \item[2] Fast calculation, using Tulaczyk empirical parametrization
+ !> \end{description}
+
+ !integer :: use_plume = 0 !! Option to be supported in future releases
+ !> \begin{description}
+ !> \item[0] standard bmlt calculation
+ !> \item[1] use plume to calculate bmlt
+ !> \end{description}
+
+ end type glide_options
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_geometry
+
+ !> Holds fields and other information relating to the
+ !> geometry of the ice sheet and bedrock.
+
+ real(dp),dimension(:,:),pointer :: thck => null()
+ !> The thickness of the ice, divided by \texttt{thk0}.
+
+ real(dp),dimension(:,:),pointer :: usrf => null()
+ !> The elevation of the upper ice surface, divided by \texttt{thk0}.
+
+ real(dp),dimension(:,:),pointer :: lsrf => null()
+ !> The elevation of the lower ice surface, divided by \texttt{thk0}.
+
+ real(dp),dimension(:,:),pointer :: topg => null()
+ !> The elevation of the topography, divided by \texttt{thk0}.
+
+ real(dp),dimension(:,:),pointer :: f_ground => null()
+ !> The fractional area at each vertex which is grounded
+ ! (computed by glissade dycore only)
+
+ real(dp),dimension(:,:,:),pointer :: age => null()
+ !> The age of a given ice layer, divided by \texttt{tim0}.
+
+ integer, dimension(:,:),pointer :: thkmask => null()
+ !> see glide_mask.f90 for possible values
+
+ integer, dimension(:,:),pointer :: stagmask => null()
+ !> see glide_mask.f90 for possible values
+
+ !TODO - Consider moving BISICLES variables to their own type at some point
+ !* (DFM ----------------- added for BISICLES interface --------------)
+ real(dp),dimension(:,:),pointer :: floating_mask => null()
+ !*(DFM) Real-valued mask indicated where ice is grounded or floating
+
+ !* (DFM ----------------- added for BISICLES interface --------------)
+ real(dp),dimension(:,:),pointer :: ice_mask => null()
+ !*(DFM) Real-valued mask indicating where ice is present or absent
+
+
+ !* (DFM ----------------- added for BISICLES interface --------------)
+ real(dp),dimension(:,:),pointer :: lower_cell_loc => null()
+ !*(DFM) The z-location of the center of the lowest ice cell center
+
+ !* (DFM ----------------- added for BISICLES interface --------------)
+ real(dp),dimension(:,:),pointer :: lower_cell_temp => null()
+ !*(DFM) The temperature in the cell located at lower_cell_loc
+
+ integer, dimension(:,:),pointer :: thck_index => null()
+ ! Set to nonzero integer for ice-covered cells (thck > 0), cells adjacent to ice-covered cells,
+ ! and cells with acab > 0. The non-zero points are numbered in sequence from the bottom left
+ ! to the top right, going along the rows.
+
+ integer :: totpts = 0 ! total number of points with nonzero thck_index
+ logical :: empty = .true. ! true if totpts = 0
+
+ real(dp) :: ivol, iarea,iareag, iareaf !> ice volume and ice area
+
+ end type glide_geometry
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_geomderv
+
+ !> Holds the horizontal and temporal derivatives of the thickness and
+ !> upper surface elevation, as well as the thickness on the staggered grid.
+
+ !*tb* Added a bunch of stuff here to clean up the higher order code that
+ !I've been writing. Might be worth it to add a mechanism to conditionally
+ !allocate these depending on whether they are needed by the SIA core or by
+ !the higher-order extensions
+
+ !First derivatives on a staggered grid
+ real(dp),dimension(:,:),pointer :: dthckdew => null() !> E-W derivative of thickness.
+ real(dp),dimension(:,:),pointer :: dusrfdew => null() !> E-W derivative of upper surface elevation.
+ real(dp),dimension(:,:),pointer :: dthckdns => null() !> N-S derivative of thickness.
+ real(dp),dimension(:,:),pointer :: dusrfdns => null() !> N-S derivative of upper surface elevation.
+ real(dp),dimension(:,:),pointer :: dlsrfdew => null() !*tb* added
+ real(dp),dimension(:,:),pointer :: dlsrfdns => null() !*tb* added
+
+ !Second derivatives on a staggered grid
+ !*tb* added all of these
+ ! Used by glam_strs2
+ real(dp),dimension(:,:),pointer :: d2usrfdew2 => null()
+ real(dp),dimension(:,:),pointer :: d2usrfdns2 => null()
+ real(dp),dimension(:,:),pointer :: d2thckdew2 => null()
+ real(dp),dimension(:,:),pointer :: d2thckdns2 => null()
+
+ !Time derivatives
+ real(dp),dimension(:,:),pointer :: dthckdtm => null() !> Temporal derivative of thickness.
+ real(dp),dimension(:,:),pointer :: dusrfdtm => null() !> Temporal derivative of upper surface elevation.
+
+ !TODO - Move staggered variables from glide_geomderv type to glide_geometry?
+
+ !Staggered grid versions of geometry variables
+ real(dp),dimension(:,:),pointer :: stagthck => null() !> Thickness averaged onto the staggered grid.
+ real(dp),dimension(:,:),pointer :: stagusrf => null() !> Upper surface averaged onto the staggered grid
+ real(dp),dimension(:,:),pointer :: staglsrf => null() !> Lower surface averaged onto the staggered grid
+ real(dp),dimension(:,:),pointer :: stagtopg => null() !> Bedrock topography averaged onto the staggered grid
+
+ end type glide_geomderv
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_tensor
+ real(dp), dimension(:,:,:), pointer :: scalar => null()
+ real(dp), dimension(:,:,:), pointer :: xz => null()
+ real(dp), dimension(:,:,:), pointer :: yz => null()
+ real(dp), dimension(:,:,:), pointer :: xx => null()
+ real(dp), dimension(:,:,:), pointer :: yy => null()
+ real(dp), dimension(:,:,:), pointer :: xy => null()
+ end type glide_tensor
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_velocity
+
+ !> Holds the velocity fields in 2D and 3D. At least some of these fields
+ real(dp),dimension(:,:,:),pointer :: uvel => null() !> 3D $x$-velocity.
+ real(dp),dimension(:,:,:),pointer :: vvel => null() !> 3D $y$-velocity.
+ real(dp),dimension(:,:,:),pointer :: velnorm => null() ! horizontal ice speed
+ real(dp),dimension(:,:,:),pointer :: wvel => null() !> 3D $z$-velocity.
+ real(dp),dimension(:,:,:),pointer :: wgrd => null() !> 3D grid vertical velocity.
+ real(dp),dimension(:,:,:),pointer :: wvel_ho => null()!> 3D $z$-velocity.from higher-order dycores
+ real(dp),dimension(:,:) ,pointer :: uflx => null() !>
+ real(dp),dimension(:,:) ,pointer :: vflx => null() !>
+ real(dp),dimension(:,:) ,pointer :: diffu => null() !>
+ real(dp),dimension(:,:) ,pointer :: diffu_x => null() !*sfp* moved from velocity_hom deriv type
+ real(dp),dimension(:,:) ,pointer :: diffu_y => null()
+ real(dp),dimension(:,:) ,pointer :: total_diffu => null() !> total diffusivity
+
+ real(dp),dimension(:,:) ,pointer :: ubas => null() !>
+ real(dp),dimension(:,:) ,pointer :: ubas_tavg => null()
+ real(dp),dimension(:,:) ,pointer :: vbas => null() !>
+ real(dp),dimension(:,:) ,pointer :: vbas_tavg => null()
+
+ !! next 3 used for output of residual fields (when relevant code in glam_strs2 is active)
+! real(dp),dimension(:,:,:),pointer :: ures => null() !> 3D $x$-residual.
+! real(dp),dimension(:,:,:),pointer :: vres => null() !> 3D $y$-residual.
+! real(dp),dimension(:,:,:),pointer :: magres => null() !> 3D $magnitude$-residual.
+
+ ! Note: uvel_extend and vvel_extend can be used for output of uvel, vvel on a staggered grid
+ ! that is the same size as the unstaggered grid (e.g., for ISMIP-HOM problems with periodic BC,
+ ! where the number of velocity points is equal to the number of grid cells.)
+ real(dp),dimension(:,:,:),pointer :: uvel_extend => null() !> 3D $x$-velocity on extended staggered grid
+ real(dp),dimension(:,:,:),pointer :: vvel_extend => null() !> 3D $y$-velocity on extended staggered grid
+
+ real(dp),dimension(:,:) ,pointer :: bed_softness => null() !> bed softness parameter
+ real(dp),dimension(:,:) ,pointer :: btrc => null() !> basal traction (scaler field)
+ real(dp),dimension(:,:,:),pointer :: btraction => null() !> x(1,:,:) and y(2,:,:) "consistent" basal traction fields
+ real(dp),dimension(:,:) ,pointer :: beta => null() !> basal shear coefficient on velo grid (Pa yr/m by default)
+ real(dp),dimension(:,:) ,pointer :: unstagbeta => null() !> basal shear coefficient on ice grid (Pa yr/m by default)
+ real(dp),dimension(:,:) ,pointer :: tau_x => null() !> SIA basal shear stress, x-dir
+ real(dp),dimension(:,:) ,pointer :: tau_y => null() !> SIA basal shear stress, y-dir
+
+ !> A mask similar to glide_geometry%thck_index, but on the velocity grid instead of the
+ !> ice grid. This is to aid in converging higher-order velocities
+!! integer, dimension(:,:), pointer :: velmask => null() ! No longer used
+
+ !> A mask that specifies where the velocity being read in should be held constant as a dirichlet condition
+ integer, dimension(:,:), pointer :: kinbcmask => null()
+
+ !*sfp* mask on vel grid showing which dyn bc is applied at each grid cell (mainly for debugging)
+ integer, dimension(:,:), pointer :: dynbcmask => null()
+
+ ! for viewing the spatial pattern of residuals
+ real(dp),dimension(:,:,:),pointer :: resid_u => null() ! u component of residual Ax - b where x is the velocity
+ real(dp),dimension(:,:,:),pointer :: resid_v => null() ! v component of residual Ax - b where x is the velocity
+
+ ! for viewing the driving stress on the RHS
+ real(dp),dimension(:,:,:),pointer :: rhs_u => null() ! u component of b in Ax = b
+ real(dp),dimension(:,:,:),pointer :: rhs_v => null() ! v component of b in Ax = b
+
+ end type glide_velocity
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_stress_t
+
+ type(glide_tensor) :: tau ! HO only
+ real(dp),dimension(:,:,:),pointer :: efvs => null() !> effective viscosity
+ real(dp),dimension(:,:), pointer :: btractx => null() !> basal traction (Pa), x comp
+ real(dp),dimension(:,:), pointer :: btracty => null() !> basal traction (Pa), y comp
+
+ end type glide_stress_t
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+!TODO - Should calving and eus be part of some type other than glide_climate?
+
+!TODO - Rename acab in glide_climate type to avoid confusion over units? (e.g., acab_ice?)
+! Here, acab has units of m/y ice, whereas in Glint, acab has units of m/y water equiv.
+
+ type glide_climate
+ !> Holds fields used to drive the model
+ real(dp),dimension(:,:),pointer :: acab => null() !> Annual mass balance (m/y ice)
+ real(dp),dimension(:,:),pointer :: acab_tavg => null() !> Annual mass balance (time average).
+ real(dp),dimension(:,:),pointer :: artm => null() !> Annual mean air temperature (degC)
+ real(dp),dimension(:,:),pointer :: calving => null() !> Calving flux
+ ! (scaled as mass balance, thickness, etc)
+
+ real(dp) :: eus = 0.d0 !> eustatic sea level
+ end type glide_climate
+
+
+ type eismint_climate_type
+
+ ! holds parameters for the eismint climate
+
+ ! For EISMINT2:
+ ! airt(1) = Tmin = summit surface temperature (K)
+ ! airt(2) = S_T = horizontal temperature gradient (K/m)
+ ! nmsb(1) = M_max = max accumulation (m/yr)
+ ! nmsb(2) = S_b = horizontal smb gradient (m/yr/m)
+ ! nmsb(3) = R_el = radial distance from summit where mass balance = 0 (m)
+ !
+
+ integer :: eismint_type = 0
+ !> select EISMINT experiment
+ !> \begin{description}
+ !> \item[{\bf 1}] EISMINT-1 fixed margin
+ !> \item[{\bf 2}] EISMINT-1 moving margin
+ !> \item[{\bf 3}] EISMINT-2
+ !> \item[{\bf 4}] MISMIP-1 (not EISMINT but has similar climate parameters)
+ !> \item[{\bf 5}] Exact verification (not EISMINT but has similar climate parameters)
+ !> \end{description}
+
+ ! NOTE: The initial nmsb values in the declarations below are appropriate
+ ! for EISMINT-2, but the initial airt values are not.
+ ! TODO: Change default airt values in eismint_type to be consistent with EISMINT-2?
+
+ !> air temperature parameterisation K, K km$^{-3}$
+ real(dp), dimension(2) :: airt = (/ -3.15d0, 1.d-2 /)
+
+ !> mass balance parameterisation:
+ real(dp), dimension(3) :: nmsb = (/ 0.5d0, 1.05d-5, 450.0d3 /)
+
+ !> EISMINT time-dep climate forcing period, switched off when set to 0
+ real(dp) :: period = 0.d0
+
+ !> EISMINT amplitude of mass balance time-dep climate forcing
+ real(dp) :: mb_amplitude = 0.2d0
+
+ end type eismint_climate_type
+
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_temper
+
+ !> Holds fields relating to temperature.
+
+ !Note: In the Glide dycore, temp, flwa and dissip live on the unstaggered vertical grid
+ ! at layer interfaces and have vertical dimension (1:upn).
+ ! In the Glam/Glissade dycore, with remapping advection of temperature,
+ ! temp, flwa and dissip live on the staggered vertical grid at layer midpoints.
+ ! The vertical dimensions are (0:upn) for temp and (1:upn-1) for flwa and dissip.
+ !
+ ! bheatflx, ucondflx, and lcondflx are defined as positive down,
+ ! so they will often be < 0.
+ ! However, bfricflx and dissipcol are defined to be >= 0.
+ !
+ ! If bheatflx is read from a data file, be careful about the sign!
+ ! In input data, the geothermal heat flux is likely to be defined as positive upward.
+ !
+ !TODO: Create separate fields for basal melt beneath grounded and floating ice.
+
+ real(dp),dimension(:,:,:),pointer :: temp => null() !> 3D temperature field.
+ real(dp),dimension(:,:), pointer :: bheatflx => null() !> basal heat flux (W/m^2) (geothermal, positive down)
+ real(dp),dimension(:,:,:),pointer :: flwa => null() !> Glen's flow factor $A$.
+ real(dp),dimension(:,:,:),pointer :: dissip => null() !> interior heat dissipation rate, divided by rhoi*Ci (deg/s)
+ real(dp),dimension(:,:), pointer :: bwat => null() !> Basal water depth
+ real(dp),dimension(:,:), pointer :: bwatflx => null() !> Basal water flux
+ real(dp),dimension(:,:), pointer :: stagbwat => null() !> Basal water depth on velo grid
+ real(dp),dimension(:,:), pointer :: bmlt => null() !> Basal melt-rate (> 0 for melt, < 0 for freeze-on)
+ real(dp),dimension(:,:), pointer :: bmlt_tavg => null() !> Basal melt-rate
+ real(dp),dimension(:,:), pointer :: stagbtemp => null() !> Basal temperature on velo grid
+ real(dp),dimension(:,:), pointer :: bpmp => null() !> Basal pressure melting point
+ real(dp),dimension(:,:), pointer :: stagbpmp => null() !> Basal pressure melting point on velo grid
+ real(dp),dimension(:,:), pointer :: bfricflx => null() !> basal heat flux (W/m^2) from friction (>= 0)
+ real(dp),dimension(:,:,:),pointer :: waterfrac => null() !> fractional water content in layer (0 <= waterfrac <= 1)
+ real(dp),dimension(:,:,:),pointer :: enthalpy => null() !> specific enthalpy in layer (J m-3)
+ !> = rhoi * Ci * T for cold ice
+ !TODO - Remove ucondflx, lcondflx, dissipcol; make these local to glissade_therm
+ real(dp),dimension(:,:), pointer :: ucondflx => null() !> conductive heat flux (W/m^2) at upper sfc (positive down)
+ real(dp),dimension(:,:), pointer :: lcondflx => null() !> conductive heat flux (W/m^2) at lower sfc (positive down)
+ real(dp),dimension(:,:), pointer :: dissipcol => null() !> total heat dissipation rate (W/m^2) in column (>= 0)
+ integer :: niter = 0
+ real(dp) :: perturb = 0.d0
+ real(dp) :: grid = 0.d0
+ integer :: tpt = 0 !> Pointer to time series data
+ logical :: first1 = .true. !>
+ logical :: newtemps = .false. !> new temperatures
+ end type glide_temper
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_basal_physics
+ !< Holds variables related to basal physics associated with ice dynamics
+
+ ! see glissade_basal_traction.F90 for usage details
+ ! Note: It may make sense to move effecpress to a hydrology model when one is available.
+ real(dp), dimension(:,:), pointer :: effecpress => null() !< effective pressure
+ real(dp), dimension(:,:), pointer :: effecpress_stag => null() !< effective pressure staggered grid
+ ! paramter for friction law
+ real(dp) :: friction_powerlaw_k = 8.4e-9 !< the friction coefficient for the power-law friction law (m y^-1 Pa^-2). The default value is that given in Bindschadler (1983) based on fits to observations, converted to CISM units.
+ ! Parameters for Coulomb friction sliding law (default values from Pimentel et al. 2010)
+ real(dp) :: Coulomb_C = 0.84d0*0.5d0 !< basal stress constant (no dimension)
+ real(dp) :: Coulomb_Bump_Wavelength = 2.0d0 !< bed rock wavelength at subgrid scale precision (m)
+ real(dp) :: Coulomb_Bump_max_slope = 0.5d0 !< maximum bed bump slope at subgrid scale precision (no dimension)
+ end type glide_basal_physics
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_lithot_type
+ !> holds variables for temperature calculations in the lithosphere
+
+ real(dp),dimension(:,:,:),pointer :: temp => null() !> Three-dimensional temperature field.
+ logical, dimension(:,:), pointer :: mask => null() !> whether the point has been ice covered at some time
+
+ integer :: num_dim = 1 !> either 1 or 3 for 1D/3D calculations
+
+ ! The sparse matrix and linearised arrays
+ type(sparse_matrix_type) :: fd_coeff, fd_coeff_slap
+ integer :: all_bar_top
+ real(dp), dimension(:), pointer :: rhs
+ real(dp), dimension(:), pointer :: answer
+ real(dp), dimension(:), pointer :: supd,diag,subd
+
+ ! work arrays for solver
+ real(dp), dimension(:), pointer :: rwork
+ integer, dimension(:), pointer :: iwork
+ integer mxnelt
+
+ real(dp), dimension(:), pointer :: deltaz => null() !> array holding grid spacing in z
+ real(dp), dimension(:,:), pointer :: zfactors => null()!> array holding factors for finite differences of vertical diffu
+ real(dp) :: xfactor,yfactor !> factors for finite differences of horizontal diffu
+
+
+ real(dp) :: surft = 2.d0 !> surface temperature, used for calculating initial temperature distribution
+ real(dp) :: mart = 2.d0 !> sea floor temperature
+ integer :: nlayer = 20 !> number of layers in lithosphere
+ real(dp) :: rock_base = -5000.d0 !> depth below sea-level at which geothermal heat gradient is applied
+
+ integer :: numt = 0 !> number time steps for spinning up GTHF calculations
+
+ real(dp) :: rho_r = 3300.0d0 !> The density of lithosphere (kg m$^{-3}$)
+ real(dp) :: shc_r = 1000.0d0 !> specific heat capcity of lithosphere (J kg$^{-1}$ K$^{-1}$)
+ real(dp) :: con_r = 3.3d0 !> thermal conductivity of lithosphere (W m$^{-1}$ K$^{-1}$)
+
+ real(dp) :: diffu = 0. !> diffusion coefficient
+
+ end type glide_lithot_type
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type isos_elastic
+ !> Holds data used by isostatic adjustment calculations
+
+ real(dp) :: d = 0.24d25 !> flexural rigidity !TODO - What are units of d?
+ real(dp) :: lr !> radius of relative stiffness
+ real(dp) :: a !> radius of disk
+ real(dp) :: c1,c2,cd3,cd4 !> coefficients
+ real(dp), dimension(:,:), pointer :: w !> matrix operator for lithosphere deformation
+ integer :: wsize !> size of operator (0:rbel_wsize, 0:rbel_wsize), operator is axis symmetric
+ end type isos_elastic
+
+ type isostasy_type
+ !> contains isostasy configuration
+
+ integer :: lithosphere = 0
+ !> method for calculating equilibrium bedrock depression
+ !> \begin{description}
+ !> \item[0] local lithosphere, equilibrium bedrock depression is found using Archimedes' principle
+ !> \item[1] elastic lithosphere, flexural rigidity is taken into account
+ !> \end{description}
+
+ integer :: asthenosphere = 0
+ !> method for approximating the mantle
+ !> \begin{description}
+ !> \item[0] fluid mantle, isostatic adjustment happens instantaneously
+ !> \item[1] relaxing mantle, mantle is approximated by a half-space
+ !> \end{description}
+
+ real(dp) :: relaxed_tau = 4000.d0 ! characteristic time constant of relaxing mantle (yr)
+ real(dp) :: period = 500.d0 ! lithosphere update period (yr)
+ real(dp) :: next_calc ! when to update lithosphere
+ logical :: new_load = .false. ! set to true if there is a new surface load
+ type(isos_elastic) :: rbel ! structure holding elastic lithosphere setup
+
+ real(dp),dimension(:,:),pointer :: relx => null() ! elevation of relaxed topography, by \texttt{thck0}.
+ real(dp),dimension(:,:),pointer :: load => null() ! load imposed on lithosphere
+ real(dp),dimension(:,:),pointer :: load_factors => null() ! temporary used for load calculation
+
+ end type isostasy_type
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_funits
+ character(fname_length) :: sigfile='' !> sigma coordinates file
+ character(fname_length) :: ncfile='' !> configuration file for netCDF I/O
+ type(glimmer_nc_output),pointer :: out_first=>NULL() !> first element of linked list defining netCDF outputs
+ type(glimmer_nc_input), pointer :: in_first=>NULL() !> first element of linked list defining netCDF inputs
+ type(glimmer_nc_input), pointer :: frc_first=>NULL() !> first element of linked list defining netCDF forcings
+ ! Note: forcing files are of the same type as input files since they share a lot in common.
+ end type glide_funits
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_numerics
+
+ !> Parameters relating to the model numerics
+ real(dp) :: tstart = 0.d0 !> starting time
+ real(dp) :: tend = 1000.d0 !> end time
+ real(dp) :: time = 0.d0 !> main loop counter in years
+ real(dp) :: tinc = 1.d0 !> time step of main loop in years
+ real(dp) :: ntem = 1.d0 !> multiplier of main time step; allows longer temperature time step
+ real(dp) :: alpha = 0.5d0 !> richard suggests 1.5 - was a parameter in original
+ real(dp) :: alphas = 0.5d0 !> was a parameter in the original
+ real(dp) :: thklim = 100.d0 ! min thickness for computing ice dynamics (m)
+ real(dp) :: thklim_temp = 1.d0 ! min thickness for computing vertical temperature (m) (higher-order only)
+ real(dp) :: mlimit = -200.d0
+ real(dp) :: calving_fraction = 0.8d0
+ real(dp) :: dew = 20.d3
+ real(dp) :: dns = 20.d3
+ real(dp) :: dt = 0.d0 ! ice dynamics timestep
+ real(dp) :: dttem = 0.d0 ! temperature timestep
+ real(dp) :: dt_transport = 0.d0 ! timestep for subcycling transport within the dynamics timestep dt
+ real(dp) :: nshlf = 0.d0 !TODO - not currently used; remove?
+ integer :: subcyc = 1
+ real(dp) :: periodic_offset_ew = 0.d0 ! optional periodic_offsets for ismip-hom and similar tests
+ real(dp) :: periodic_offset_ns = 0.d0 ! These may be needed to ensure continuous ice geometry at
+ ! the edges of the global domain.
+
+ integer :: timecounter = 0 !> count time steps
+
+ ! Vertical coordinate ---------------------------------------------------
+
+ real(dp),dimension(:),pointer :: sigma => null() !> Sigma values for vertical spacing of
+ !> model levels
+ real(dp),dimension(:),pointer :: stagsigma => null() !> Staggered values of sigma (layer midpts)
+ real(dp),dimension(:),pointer :: stagwbndsigma => null() !> Staggered values of sigma (layer midpts) with boundaries
+
+ integer :: profile_period = 100 ! profile frequency
+
+ !TODO - Compute ndiag as a function of dt_diag and pass to glide_diagnostics?
+ ! This is more robust than computing mods of real numbers.
+
+ real(dp) :: dt_diag = 0.d0 ! diagnostic time interval (write diagnostics every dt_diag years)
+ integer :: ndiag = -999 ! diagnostic period (write output every ndiag steps)
+ integer :: idiag = 1 ! global grid indices for diagnostic point
+ integer :: jdiag = 1 !
+ integer :: idiag_local = 1 ! local grid indices for diagnostic point
+ integer :: jdiag_local = 1
+ integer :: rdiag_local = 0 ! task number for diagnostic point
+
+ real(dp) :: adv_cfl_dt = 0.0d0 ! maximum allowable dt (yrs) based on advective CFL (calculated by model for each time step)
+ real(dp) :: diff_cfl_dt = 0.0d0 ! maximum allowable dt (yrs) based on diffusive CFL (calculated by model for each time step)
+ end type glide_numerics
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ !TODO - Is the glide_grnd type still needed?
+ type glide_grnd
+ ! variables for tracking the grounding line
+ real(dp),dimension(:,:),pointer :: gl_ew => null()
+ real(dp),dimension(:,:),pointer :: gl_ns => null()
+ real(dp),dimension(:,:),pointer :: gline_flux => null() !> flux at the
+ !grounding line
+ end type glide_grnd
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_velowk
+ real(dp),dimension(:), pointer :: depth => null()
+ real(dp),dimension(:), pointer :: dupsw => null()
+ real(dp),dimension(:), pointer :: depthw => null()
+ real(dp),dimension(:), pointer :: suvel => null()
+ real(dp),dimension(:), pointer :: svvel => null()
+ real(dp),dimension(:,:),pointer :: fslip => null()
+ real(dp),dimension(:,:),pointer :: dintflwa => null()
+ real(dp),dimension(:), pointer :: dups => null()
+ real(dp),dimension(4) :: fact
+ real(dp),dimension(4) :: c = 0.d0
+ real(dp) :: watwd = 3.0d0
+ real(dp) :: watct = 10.0d0
+ real(dp) :: trc0 = 0.d0
+ real(dp) :: trcmin = 0.0d0
+ real(dp) :: marine = 1.0d0
+ real(dp) :: trcmax = 10.0d0
+ real(dp) :: btrac_const = 0.0d0 !TODO - Remove from glide_velowk type; already in glide_paramets type.
+ real(dp) :: btrac_slope = 0.0d0
+ real(dp) :: btrac_max = 0.d0
+ end type glide_velowk
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_thckwk
+ real(dp),dimension(:,:), pointer :: oldthck => null()
+ real(dp),dimension(:,:), pointer :: oldthck2 => null()
+!! real(dp),dimension(:,:),pointer :: float => null() ! no longer used
+ real(dp),dimension(:,:,:),pointer :: olds => null()
+ integer :: nwhich = 2
+ real(dp) :: oldtime = 0.d0
+
+ ! next four are for ADI evolution only
+ real(dp), dimension(:), pointer :: alpha => null()
+ real(dp), dimension(:), pointer :: beta => null()
+ real(dp), dimension(:), pointer :: gamma => null()
+ real(dp), dimension(:), pointer :: delta => null()
+
+ end type glide_thckwk
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !WHL - Moved dissip to glide_temper
+ type glide_tempwk
+ real(dp),dimension(:,:,:),pointer :: inittemp => null()
+ real(dp),dimension(:,:,:),pointer :: compheat => null()
+ real(dp),dimension(:,:,:),pointer :: initadvt => null()
+ real(dp),dimension(:), pointer :: dupa => null()
+ real(dp),dimension(:), pointer :: dupb => null()
+ real(dp),dimension(:), pointer :: dupc => null()
+ real(dp),dimension(:), pointer :: c1 => null()
+ real(dp),dimension(:,:), pointer :: dups => null()
+ real(dp),dimension(:,:), pointer :: wphi => null()
+ real(dp),dimension(:,:), pointer :: smth => null()
+ real(dp),dimension(:,:,:),pointer :: hadv_u => null()
+ real(dp),dimension(:,:,:),pointer :: hadv_v => null()
+
+ !*sfp** added space to the next 2 (cons, f) for use w/ HO and SSA dissip. calc.
+ real(dp),dimension(5) :: cons = 0.d0
+ real(dp),dimension(5) :: f = 0.d0
+ real(dp),dimension(8) :: c = 0.d0
+ real(dp),dimension(2) :: slide_f
+ real(dp) :: noflow = -1
+ real(dp),dimension(2) :: advconst = 0.d0
+ real(dp) :: zbed = 0.d0
+ real(dp) :: dupn = 0.d0
+ real(dp) :: wmax = 0.d0
+ real(dp) :: dt_wat = 0.d0
+ real(dp) :: watvel = 0.d0
+ integer :: nwat = 0
+ end type glide_tempwk
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_paramets
+ real(dp),dimension(5) :: bpar = (/ 0.2d0, 0.5d0, 0.0d0 ,1.0d-2, 1.0d0/)
+ real(dp) :: btrac_const = 0.d0 ! m yr^{-1} Pa^{-1} (gets scaled during init)
+ real(dp) :: btrac_slope = 0.0d0 ! Pa^{-1} (gets scaled during init)
+ real(dp) :: btrac_max = 0.d0 ! m yr^{-1} Pa^{-1} (gets scaled during init)
+ real(dp) :: geot = -5.0d-2 ! W m^{-2}, positive down
+ real(dp) :: flow_enhancement_factor = 1.0d0 ! flow enhancement parameter for the Arrhenius relationship;
+ ! typically used in SIA model to speed up the ice
+ ! (NOTE change relative to prev. versions of code - used to be 3)
+ real(dp) :: slip_ratio = 1.0d0 ! Slip ratio, used only in higher order code when the slip ratio beta computation is requested
+ real(dp) :: hydtim = 1000.0d0 ! years, converted to s^{-1} and scaled
+ ! 0 if no drainage
+ real(dp) :: bwat_smooth = 0.01d0 ! basal water field smoothing strength
+ real(dp) :: default_flwa = 1.0d-16 ! Glen's A to use in isothermal case, in units Pa^{-n} yr^{-1}
+ ! (would change to e.g. 4.6e-18 in EISMINT-ROSS case)
+ real(dp) :: efvs_constant = 2336041.d0 ! value of efvs to use in constant efvs case, in units Pa yr
+ ! = 0.5*A^(-1), where A = 2.140373 Pa^(-1) yr^(1) is the value used in ISMIP-HOM Test F
+ real(dp) :: ho_beta_const = 10.d0 ! spatially uniform beta for HO dycores, Pa yr m^{-1} (gets scaled during init)
+ real(dp) :: p_ocean_penetration = 0.0d0 ! p-exponent parameter for ocean penetration parameterization
+
+ end type glide_paramets
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !TODO - Should the glide_basalproc type be removed?
+ ! Keeping it for now because glam_strs2 uses mintauf (but this could be moved to another type).
+ type glide_basalproc
+ !Tuneables, set in the config file
+! real(dp):: fric=0.45d0 ! Till coeff of internal friction: ND
+! real(dp):: etillo=0.7d0 ! Till void ratio at No
+! real(dp):: No=1000.d0 ! Reference value of till effective stress
+! real(dp):: Comp=0.12d0 ! Till coeff of compressibility: ND
+! real(dp):: Cv = 1.0d-8 ! Till hydraulic diffusivity: m2/s
+! real(dp):: Kh = 1.0d-10 !Till hydraulic conductivity: m/s
+! real(dp):: Zs = 3.0d0 ! Solid till thickness: m
+! real(dp):: aconst=994000000d0 ! Constant in till strength eq. (Pa)
+! real(dp):: bconst=21.7d0 ! Constant in till strength eq. (ND)
+! integer:: till_hot = 0
+! integer:: tnodes = 5
+
+ real(dp), dimension (:) , pointer :: till_dz => null() !holds inital till layer spacing -
+
+ !Model variables that will be passed to other subroutines
+ real(dp),dimension(:,:) ,pointer :: mintauf => null() !Bed strength calculated with basal proc. mod.
+! real(dp),dimension(:,:) ,pointer :: Hwater => null() !Water available from till layer (m)
+ !Model variabled necessary for restart
+! real(dp),dimension(:,:,:) ,pointer :: u => null() !Till excess pore pressure (Pa)
+! real(dp),dimension(:,:,:) ,pointer :: etill => null() !Till void ratio (ND)
+
+ end type glide_basalproc
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ type glide_prof_type
+ integer :: geomderv
+ integer :: hvelos
+ integer :: ice_mask1
+ integer :: temperature
+ integer :: ice_evo
+ integer :: ice_mask2
+ integer :: isos_water
+ integer :: isos
+ end type glide_prof_type
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+!TODO - Remove the glide_phaml type? Commented out for now
+!! type glide_phaml
+!! real(dp),dimension(:,:),pointer :: uphaml => null()
+!! real(dp),dimension(:,:),pointer :: init_phaml => null()
+!! real(dp),dimension(:,:),pointer :: rs_phaml => null()
+!! !maybe put the x/y vectors here too just for simplicity
+!! end type glide_phaml
+
+ !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ ! for JFNK, NOX Trilinos solver
+ type, public :: glissade_solver
+
+ integer ,dimension(:,:) ,allocatable :: ui
+ integer ,dimension(:,:) ,allocatable :: um
+ real(dp) ,dimension(:,:) ,allocatable :: d2thckcross
+ real(dp) ,dimension(:,:) ,allocatable :: d2usrfcross
+ integer ,dimension(2) :: pcgsize
+ integer ,dimension(:) ,allocatable :: gxf
+ real(dp) :: L2norm
+ type(sparse_matrix_type) :: matrix
+ type(sparse_matrix_type) :: matrixA
+ type(sparse_matrix_type) :: matrixC
+ real(dp),dimension(:),pointer :: rhsd => null()
+ real(dp),dimension(:),pointer :: answ => null()
+ integer :: ct = 0
+
+ !TODO - KJE - Remove ewn/nsn from glissade_solver type once new glide_global_type is working and we can use those ewn/nsn
+ integer :: ewn
+ integer :: nsn
+
+ end type glissade_solver
+
+
+ type glide_global_type ! type containing all of the above for an ice sheet model instance
+ integer :: model_id !> Used in the global model list for error handling purposes
+ type(glide_general) :: general
+ type(glide_options) :: options
+ type(glide_geometry) :: geometry
+ type(glide_geomderv) :: geomderv
+ type(glide_velocity) :: velocity
+ type(glide_stress_t) :: stress
+ type(glide_climate) :: climate
+ type(eismint_climate_type) :: eismint_climate
+ type(glide_temper) :: temper
+ type(glide_basal_physics) :: basal_physics
+ type(glide_lithot_type) :: lithot
+ type(glide_funits) :: funits
+ type(glide_numerics) :: numerics
+ type(glide_velowk) :: velowk
+ type(glide_thckwk) :: thckwk
+ type(glide_tempwk) :: tempwk
+ type(glide_paramets) :: paramets
+ type(glimmap_proj) :: projection
+ type(glide_basalproc):: basalproc
+ type(profile_type) :: profile
+ type(glide_prof_type):: glide_prof
+ type(isostasy_type) :: isostasy
+!! type(glide_phaml) :: phaml
+ type(glide_grnd) :: ground
+ type(glissade_solver):: solver_data
+
+ end type glide_global_type
+
+contains
+
+ subroutine glide_allocarr(model)
+
+ !> Allocates the model arrays, and initialises some of them to zero.
+ !> These are the arrays allocated, and their dimensions:
+
+ !TODO - Make sure the itemized lists in subroutine glide_allocarr are complete.
+
+ !> In \texttt{model\%temper}:
+ !> \begin{itemize}
+ !> \item \texttt{temp(upn,0:ewn+1,0:nsn+1))} !WHL - 2 choices
+ !> \item \texttt{bheatflx(ewn,nsn))}
+ !> \item \texttt{flwa(upn,ewn,nsn))} !WHL - 2 choices
+ !> \item \texttt{dissip(upn,ewn,nsn))} !WHL - 2 choices
+ !> \item \texttt{bwat(ewn,nsn))}
+ !> \item \texttt{bmlt(ewn,nsn))}
+ !> \item \texttt{bfricflx(ewn,nsn))}
+ !> \item \texttt{ucondflx(ewn,nsn))}
+ !> \item \texttt{lcondflx(ewn,nsn))}
+ !> \item \texttt{dissipcol(ewn,nsn))}
+ !> \item \texttt{waterfrac(upn-1ewn,nsn))} ! for enthalpy scheme under construction
+ !> \item \texttt{enthalpy(0:upn,ewn,nsn))} ! for enthalpy scheme under construction
+ !> \end{itemize}
+
+ !> In \texttt{model\%velocity}:
+ !> \begin{itemize}
+ !> \item \texttt{uvel(upn,ewn-1,nsn-1))}
+ !> \item \texttt{vvel(upn,ewn-1,nsn-1))}
+ !> \item \texttt{wvel(upn,ewn,nsn))}
+ !> \item \texttt{wgrd(upn,ewn,nsn))}
+ !> \item \texttt{uflx(ewn-1,nsn-1))}
+ !> \item \texttt{vflx(ewn-1,nsn-1))}
+ !> \item \texttt{diffu(ewn,nsn))}
+ !> \item \texttt{btrc(ewn,nsn))}
+ !> \item \texttt{ubas(ewn,nsn))}
+ !> \item \texttt{vbas(ewn,nsn))}
+ !> \end{itemize}
+
+ !> In \texttt{model\%climate}:
+ !> \begin{itemize}
+ !> \item \texttt{acab(ewn,nsn))}
+ !> \item \texttt{artm(ewn,nsn))}
+ !> \end{itemize}
+
+ !> In \texttt{model\%geomderv}:
+ !> \begin{itemize}
+ !> \item \texttt{dthckdew(ewn,nsn))}
+ !> \item \texttt{dusrfdew(ewn,nsn))}
+ !> \item \texttt{dthckdns(ewn,nsn))}
+ !> \item \texttt{dusrfdns(ewn,nsn))}
+ !> \item \texttt{dthckdtm(ewn,nsn))}
+ !> \item \texttt{dusrfdtm(ewn,nsn))}
+ !> \item \texttt{stagthck(ewn-1,nsn-1))}
+ !> \end{itemize}
+
+ !> In \texttt{model\%geometry}:
+ !> \begin{itemize}
+ !> \item \texttt{thck(ewn,nsn))}
+ !> \item \texttt{usrf(ewn,nsn))}
+ !> \item \texttt{lsrf(ewn,nsn))}
+ !> \item \texttt{topg(ewn,nsn))}
+ !> \item \texttt{mask(ewn,nsn))}
+ !> \item \texttt{age(ewn,nsn))}
+ !> \item \texttt{f_ground(ewn-1,nsn-1)}
+ !* (DFM) added floating_mask, ice_mask, lower_cell_loc, and lower_cell_temp
+ !> \item \texttt{floating_mask(ewn,nsn))}
+ !> \item \texttt{ice_mask(ewn,nsn))}
+ !> \item \texttt{lower_cell_loc(ewn,nsn))}
+ !> \item \texttt{lower_cell_temp(ewn,nsn))}
+ !> \end{itemize}
+
+ !> In \texttt{model\%thckwk}:
+ !> \begin{itemize}
+ !> \item \texttt{olds(ewn,nsn,thckwk\%nwhich))}
+ !> \end{itemize}
+
+ !> In \texttt{model\%numerics}:
+ !> \begin{itemize}
+ !> \item \texttt{sigma(upn))}
+ !> \end{itemize}
+
+ !> In \texttt{model\%numerics}:
+ !> \begin{itemize}
+ !> \item \texttt{stagsigma(upn-1))}
+ !> \end{itemize}
+
+ use glimmer_log
+ use glimmer_coordinates, only: coordsystem_allocate
+ use glimmer_paramets, only: unphys_val
+
+ implicit none
+
+ type(glide_global_type),intent(inout) :: model
+
+ integer :: ewn,nsn,upn
+
+ ! for simplicity, copy these values...
+
+ ewn = model%general%ewn
+ nsn = model%general%nsn
+ upn = model%general%upn
+
+ ! horizontal coordinates
+
+ allocate(model%general%x0(ewn-1))!; model%general%x0 = 0.d0 ! velocity grid
+ allocate(model%general%y0(nsn-1))!; model%general%y0 = 0.d0
+ allocate(model%general%x1(ewn))!; model%general%x1 = 0.d0 ! ice grid (for scalars)
+ allocate(model%general%y1(nsn))!; model%general%y1 = 0.d0
+
+ ! vertical sigma coordinates
+ ! If we already have sigma, don't reallocate
+
+ if (associated(model%numerics%sigma)) then
+ if (size(model%numerics%sigma) /= upn) then
+ call write_log('Wrong number of sigma levels given',GM_FATAL)
+ end if
+ else
+ allocate(model%numerics%sigma(upn))
+ endif
+
+ allocate(model%numerics%stagsigma(upn-1))
+ allocate(model%numerics%stagwbndsigma(0:upn)) !MJH added (0:upn) as separate variable
+
+ ! temperature arrays
+
+ !NOTE: In the glide dycore (whichdycore = DYCORE_GLIDE), the temperature and
+ ! flow factor live on the unstaggered vertical grid, and extra rows and columns
+ ! (with indices 0:ewn+1, 0:nsn+1) are needed.
+ ! In the glam/glissade dycore, the temperature and flow factor live on
+ ! the staggered vertical grid, with temp and flwa defined at the
+ ! center of each layer k = 1:upn-1. The temperature (but not flwa)
+ ! is defined at the upper surface (k = 0) and lower surface (k = upn).
+
+ if (model%options%whichdycore == DYCORE_GLIDE) then
+ allocate(model%temper%temp(upn,0:ewn+1,0:nsn+1))
+ call coordsystem_allocate(model%general%ice_grid, upn, model%temper%flwa)
+ call coordsystem_allocate(model%general%ice_grid, upn, model%temper%dissip)
+ else ! glam/glissade dycore
+ allocate(model%temper%temp(0:upn,1:ewn,1:nsn))
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%temper%flwa)
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%temper%dissip)
+ endif
+
+ ! MJH - Set temp and flwa to physically unrealistic values so we can tell later if
+ ! arrays were initialized correctly
+ model%temper%temp(:,:,:) = unphys_val ! unphys_val = -999.d0
+ model%temper%flwa(:,:,:) = unphys_val
+ model%temper%dissip(:,:,:) = 0.d0
+
+ call coordsystem_allocate(model%general%ice_grid, model%temper%bheatflx)
+ call coordsystem_allocate(model%general%ice_grid, model%temper%bwat)
+ call coordsystem_allocate(model%general%ice_grid, model%temper%bwatflx)
+ call coordsystem_allocate(model%general%velo_grid, model%temper%stagbwat)
+ call coordsystem_allocate(model%general%ice_grid, model%temper%bmlt)
+ call coordsystem_allocate(model%general%ice_grid, model%temper%bmlt_tavg)
+ call coordsystem_allocate(model%general%ice_grid, model%temper%bpmp)
+ call coordsystem_allocate(model%general%velo_grid, model%temper%stagbpmp)
+ call coordsystem_allocate(model%general%velo_grid, model%temper%stagbtemp)
+ call coordsystem_allocate(model%general%ice_grid, model%temper%ucondflx)
+
+ if (model%options%whichdycore /= DYCORE_GLIDE) then ! glam/glissade only
+ call coordsystem_allocate(model%general%ice_grid, model%temper%bfricflx)
+ call coordsystem_allocate(model%general%ice_grid, model%temper%lcondflx)
+ call coordsystem_allocate(model%general%ice_grid, model%temper%dissipcol)
+ ! water fraction and enthalpy live at the midpoint of each layer (with temp and flwa)
+ ! enthalpy (like temp) is defined at the upper and lower surfaces as well
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%temper%waterfrac)
+ allocate(model%temper%enthalpy(0:upn,1:ewn,1:nsn))
+ model%temper%enthalpy(:,:,:) = 0.d0
+ endif
+
+ ! velocity arrays
+
+ call coordsystem_allocate(model%general%velo_grid, upn, model%velocity%uvel)
+ call coordsystem_allocate(model%general%velo_grid, upn, model%velocity%vvel)
+ call coordsystem_allocate(model%general%velo_grid, upn, model%velocity%velnorm)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%uflx)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%vflx)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%bed_softness)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%btrc)
+ call coordsystem_allocate(model%general%velo_grid, 2, model%velocity%btraction)
+ call coordsystem_allocate(model%general%velo_grid, upn, model%velocity%resid_u)
+ call coordsystem_allocate(model%general%velo_grid, upn, model%velocity%resid_v)
+ call coordsystem_allocate(model%general%velo_grid, upn, model%velocity%rhs_u)
+ call coordsystem_allocate(model%general%velo_grid, upn, model%velocity%rhs_v)
+
+ ! These two are on the extended staggered grid, which is the same size as the ice grid.
+ call coordsystem_allocate(model%general%ice_grid, upn, model%velocity%uvel_extend)
+ call coordsystem_allocate(model%general%ice_grid, upn, model%velocity%vvel_extend)
+
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%ubas)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%ubas_tavg)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%vbas)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%vbas_tavg)
+
+ if (model%options%whichdycore == DYCORE_GLIDE) then
+ call coordsystem_allocate(model%general%ice_grid, upn, model%velocity%wvel)
+ call coordsystem_allocate(model%general%ice_grid, upn, model%velocity%wgrd)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%diffu)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%diffu_x)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%diffu_y)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%total_diffu)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%tau_x)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%tau_y)
+ else ! glam/glissade dycore
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%beta)
+ ! Set beta to a physically unrealistic value so we can tell later if it was read
+ ! correctly from an input file
+ model%velocity%beta(:,:) = -999.0d0
+ call coordsystem_allocate(model%general%ice_grid, model%velocity%unstagbeta)
+ ! WHL - Set unstagbeta to a physically unrealistic values so we can tell later if
+ ! it was read correctly from an input file
+ model%velocity%unstagbeta(:,:) = unphys_val
+
+ call coordsystem_allocate(model%general%ice_grid, upn, model%velocity%wvel_ho)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%kinbcmask)
+ call coordsystem_allocate(model%general%velo_grid, model%velocity%dynbcmask)
+ ! next 3 used for output of residual fields (when relevant code in glam_strs2 is active)
+! call coordsystem_allocate(model%general%velo_grid, upn, model%velocity%ures)
+! call coordsystem_allocate(model%general%velo_grid, upn, model%velocity%vres)
+! call coordsystem_allocate(model%general%velo_grid, upn, model%velocity%magres)
+ endif
+
+ ! higher-order stress arrays
+
+ if (model%options%whichdycore /= DYCORE_GLIDE) then ! glam/glissade dycore
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%stress%efvs)
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%stress%tau%scalar)
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%stress%tau%xz)
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%stress%tau%yz)
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%stress%tau%xx)
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%stress%tau%yy)
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%stress%tau%xy)
+ call coordsystem_allocate(model%general%velo_grid, model%stress%btractx)
+ call coordsystem_allocate(model%general%velo_grid, model%stress%btracty)
+ endif
+
+ ! geometry arrays
+ call coordsystem_allocate(model%general%ice_grid, model%geometry%thck)
+ call coordsystem_allocate(model%general%ice_grid, model%geometry%usrf)
+ call coordsystem_allocate(model%general%ice_grid, model%geometry%lsrf)
+ call coordsystem_allocate(model%general%ice_grid, model%geometry%topg)
+ call coordsystem_allocate(model%general%ice_grid, model%geometry%thkmask)
+ call coordsystem_allocate(model%general%velo_grid, model%geometry%stagmask)
+
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%stagthck)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%dthckdew)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%dthckdns)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%dusrfdew)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%dusrfdns)
+
+ !* (DFM) -- added floating_mask, ice_mask, lower_cell_loc, and lower_cell_temp here
+ call coordsystem_allocate(model%general%ice_grid, model%geometry%floating_mask)
+ call coordsystem_allocate(model%general%ice_grid, model%geometry%ice_mask)
+ call coordsystem_allocate(model%general%ice_grid, model%geometry%lower_cell_loc)
+ call coordsystem_allocate(model%general%ice_grid, model%geometry%lower_cell_temp)
+
+ if (model%options%whichdycore == DYCORE_GLIDE) then
+ call coordsystem_allocate(model%general%ice_grid, model%geometry%thck_index)
+ call coordsystem_allocate(model%general%ice_grid, model%geomderv%dthckdtm)
+ call coordsystem_allocate(model%general%ice_grid, model%geomderv%dusrfdtm)
+ allocate(model%thckwk%olds(ewn,nsn,model%thckwk%nwhich))
+ model%thckwk%olds = 0.d0
+ call coordsystem_allocate(model%general%ice_grid, model%thckwk%oldthck)
+ call coordsystem_allocate(model%general%ice_grid, model%thckwk%oldthck2)
+ else ! glam/glissade dycore
+ call coordsystem_allocate(model%general%ice_grid, upn-1, model%geometry%age)
+ call coordsystem_allocate(model%general%velo_grid, model%geometry%f_ground)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%dlsrfdew)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%dlsrfdns)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%staglsrf)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%stagusrf)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%stagtopg)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%d2usrfdew2)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%d2usrfdns2)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%d2thckdew2)
+ call coordsystem_allocate(model%general%velo_grid, model%geomderv%d2thckdns2)
+ endif
+
+ ! Basal Physics
+ if ( (model%options%which_ho_babc == HO_BABC_POWERLAW) .or. &
+ (model%options%which_ho_babc == HO_BABC_COULOMB_FRICTION) .or. &
+ (model%options%whichbwat == BWATER_OCEAN_PENETRATION) ) then
+ call coordsystem_allocate(model%general%ice_grid, model%basal_physics%effecpress)
+ call coordsystem_allocate(model%general%velo_grid, model%basal_physics%effecpress_stag)
+ endif
+
+ ! climate arrays
+ call coordsystem_allocate(model%general%ice_grid, model%climate%acab)
+ call coordsystem_allocate(model%general%ice_grid, model%climate%acab_tavg)
+ call coordsystem_allocate(model%general%ice_grid, model%climate%artm)
+ call coordsystem_allocate(model%general%ice_grid, model%climate%calving)
+
+ ! matrix solver arrays
+
+ allocate (model%solver_data%rhsd(ewn*nsn))
+ allocate (model%solver_data%answ(ewn*nsn))
+
+ call new_sparse_matrix(ewn*nsn, 5*ewn*nsn, model%solver_data%matrix)
+
+ !TODO - In model%lithot%temp, put the vertical index 3rd as in model%temper%temp?
+
+ ! lithosphere arrays
+
+ if (model%options%gthf == GTHF_COMPUTE) then
+ allocate(model%lithot%temp(1:ewn,1:nsn,model%lithot%nlayer)); model%lithot%temp = 0.d0
+ call coordsystem_allocate(model%general%ice_grid, model%lithot%mask)
+ endif
+
+ ! isostasy arrays
+
+ call coordsystem_allocate(model%general%ice_grid, model%isostasy%relx) ! MJH: relx needs to be allocated always.
+ if (model%options%isostasy == ISOSTASY_COMPUTE) then
+ call coordsystem_allocate(model%general%ice_grid, model%isostasy%load)
+ call coordsystem_allocate(model%general%ice_grid, model%isostasy%load_factors)
+ endif
+
+ ! The remaining arrays are not currently used (except mintauf)
+ ! phaml arrays
+!! call coordsystem_allocate(model%general%ice_grid, model%phaml%init_phaml)
+!! call coordsystem_allocate(model%general%ice_grid, model%phaml%rs_phaml)
+!! call coordsystem_allocate(model%general%ice_grid, model%phaml%uphaml)
+
+ ! grounding line arrays (not currently supported)
+
+!! if (model%options%whichdycore /= DYCORE_GLIDE) then ! glam/glissade dycore
+!! allocate (model%ground%gl_ew(ewn-1,nsn))
+!! allocate (model%ground%gl_ns(ewn,nsn-1))
+!! allocate (model%ground%gline_flux(ewn,nsn))
+!! endif
+
+ ! basal process arrays
+ ! not currently supported, except that glam_strs2 uses mintauf
+
+ if (model%options%whichdycore /= DYCORE_GLIDE) then ! glam/glissade dycore
+!! call coordsystem_allocate(model%general%ice_grid, model%basalproc%Hwater)
+ call coordsystem_allocate(model%general%velo_grid, model%basalproc%mintauf)
+!! allocate(model%basalproc%u (ewn-1,nsn-1,model%basalproc%tnodes)); model%basalproc%u=41.0d3
+!! allocate(model%basalproc%etill (ewn-1,nsn-1,model%basalproc%tnodes));model%basalproc%etill=0.5d0
+ endif
+
+ end subroutine glide_allocarr
+
+
+ subroutine glide_deallocarr(model)
+
+ !> deallocate model arrays
+ !TODO - Check that all arrays allocated above are deallocated here.
+
+ implicit none
+ type(glide_global_type),intent(inout) :: model
+
+ ! horizontal coordinates
+
+ if (associated(model%general%x0)) &
+ deallocate(model%general%x0)
+ if (associated(model%general%y0)) &
+ deallocate(model%general%y0)
+ if (associated(model%general%x1)) &
+ deallocate(model%general%x1)
+ if (associated(model%general%y1)) &
+ deallocate(model%general%y1)
+
+ ! vertical sigma coordinates
+
+ if (associated(model%numerics%sigma)) &
+ deallocate(model%numerics%sigma)
+ if (associated(model%numerics%stagsigma)) &
+ deallocate(model%numerics%stagsigma)
+ if (associated(model%numerics%stagwbndsigma)) &
+ deallocate(model%numerics%stagwbndsigma)
+
+ ! temperature arrays
+
+ if (associated(model%temper%temp)) &
+ deallocate(model%temper%temp)
+ if (associated(model%temper%bheatflx)) &
+ deallocate(model%temper%bheatflx)
+ if (associated(model%temper%bwat)) &
+ deallocate(model%temper%bwat)
+ if (associated(model%temper%bwatflx)) &
+ deallocate(model%temper%bwatflx)
+ if (associated(model%temper%stagbwat)) &
+ deallocate(model%temper%stagbwat)
+ if (associated(model%temper%bmlt)) &
+ deallocate(model%temper%bmlt)
+ if (associated(model%temper%bmlt_tavg)) &
+ deallocate(model%temper%bmlt_tavg)
+ if (associated(model%temper%bpmp)) &
+ deallocate(model%temper%bpmp)
+ if (associated(model%temper%stagbpmp)) &
+ deallocate(model%temper%stagbpmp)
+ if (associated(model%temper%stagbtemp)) &
+ deallocate(model%temper%stagbtemp)
+ if (associated(model%temper%bfricflx)) &
+ deallocate(model%temper%bfricflx)
+ if (associated(model%temper%ucondflx)) &
+ deallocate(model%temper%ucondflx)
+ if (associated(model%temper%lcondflx)) &
+ deallocate(model%temper%lcondflx)
+ if (associated(model%temper%dissipcol)) &
+ deallocate(model%temper%dissipcol)
+ if (associated(model%temper%waterfrac)) &
+ deallocate(model%temper%waterfrac)
+ if (associated(model%temper%enthalpy)) &
+ deallocate(model%temper%enthalpy)
+ if (associated(model%temper%flwa)) &
+ deallocate(model%temper%flwa)
+ if (associated(model%temper%dissip)) &
+ deallocate(model%temper%dissip)
+
+ ! velocity arrays
+
+ if (associated(model%velocity%uvel)) &
+ deallocate(model%velocity%uvel)
+ if (associated(model%velocity%vvel)) &
+ deallocate(model%velocity%vvel)
+ if (associated(model%velocity%velnorm)) &
+ deallocate(model%velocity%velnorm)
+ if (associated(model%velocity%wvel)) &
+ deallocate(model%velocity%wvel)
+ if (associated(model%velocity%uflx)) &
+ deallocate(model%velocity%uflx)
+ if (associated(model%velocity%vflx)) &
+ deallocate(model%velocity%vflx)
+ if (associated(model%velocity%bed_softness)) &
+ deallocate(model%velocity%bed_softness)
+ if (associated(model%velocity%btrc)) &
+ deallocate(model%velocity%btrc)
+ if (associated(model%velocity%btraction)) &
+ deallocate(model%velocity%btraction)
+ if (associated(model%velocity%uvel_extend)) &
+ deallocate(model%velocity%uvel_extend)
+ if (associated(model%velocity%vvel_extend)) &
+ deallocate(model%velocity%vvel_extend)
+ if (associated(model%velocity%resid_u)) &
+ deallocate(model%velocity%resid_u)
+ if (associated(model%velocity%resid_v)) &
+ deallocate(model%velocity%resid_v)
+ if (associated(model%velocity%rhs_u)) &
+ deallocate(model%velocity%rhs_u)
+ if (associated(model%velocity%rhs_v)) &
+ deallocate(model%velocity%rhs_v)
+
+ if (associated(model%velocity%ubas)) &
+ deallocate(model%velocity%ubas)
+ if (associated(model%velocity%ubas_tavg)) &
+ deallocate(model%velocity%ubas_tavg)
+ if (associated(model%velocity%vbas)) &
+ deallocate(model%velocity%vbas)
+ if (associated(model%velocity%vbas_tavg)) &
+ deallocate(model%velocity%vbas_tavg)
+
+ if (associated(model%velocity%wgrd)) &
+ deallocate(model%velocity%wgrd)
+ if (associated(model%velocity%diffu)) &
+ deallocate(model%velocity%diffu)
+ if (associated(model%velocity%diffu_x)) &
+ deallocate(model%velocity%diffu_x)
+ if (associated(model%velocity%diffu_y)) &
+ deallocate(model%velocity%diffu_y)
+ if (associated(model%velocity%total_diffu)) &
+ deallocate(model%velocity%total_diffu)
+ if (associated(model%velocity%tau_x)) &
+ deallocate(model%velocity%tau_x)
+ if (associated(model%velocity%tau_y)) &
+ deallocate(model%velocity%tau_y)
+
+!! if (associated(model%velocity%velmask)) & ! no longer used
+!! deallocate(model%velocity%velmask)
+
+ if (associated(model%velocity%beta)) &
+ deallocate(model%velocity%beta)
+ if (associated(model%velocity%unstagbeta)) &
+ deallocate(model%velocity%unstagbeta)
+ if (associated(model%velocity%wvel_ho)) &
+ deallocate(model%velocity%wvel_ho)
+ if (associated(model%velocity%kinbcmask)) &
+ deallocate(model%velocity%kinbcmask)
+ if (associated(model%velocity%dynbcmask)) &
+ deallocate(model%velocity%dynbcmask)
+
+ !! next 3 used for output of residual fields (when relevant code in glam_strs2 is active)
+! if (associated(model%velocity%ures)) &
+! deallocate(model%velocity%ures)
+! if (associated(model%velocity%vres)) &
+! deallocate(model%velocity%vres)
+! if (associated(model%velocity%magres)) &
+! deallocate(model%velocity%magres)
+
+ ! higher-order stress arrays
+
+ if (associated(model%stress%efvs)) &
+ deallocate(model%stress%efvs)
+ if (associated(model%stress%tau%scalar)) &
+ deallocate(model%stress%tau%scalar)
+ if (associated(model%stress%tau%xz)) &
+ deallocate(model%stress%tau%xz)
+ if (associated(model%stress%tau%yz)) &
+ deallocate(model%stress%tau%yz)
+ if (associated(model%stress%tau%xx)) &
+ deallocate(model%stress%tau%xx)
+ if (associated(model%stress%tau%yy)) &
+ deallocate(model%stress%tau%yy)
+ if (associated(model%stress%tau%xy)) &
+ deallocate(model%stress%tau%xy)
+ if (associated(model%stress%btractx)) &
+ deallocate(model%stress%btractx)
+ if (associated(model%stress%btracty)) &
+ deallocate(model%stress%btracty)
+
+ ! basal physics arrays
+ if (associated(model%basal_physics%effecpress)) &
+ deallocate(model%basal_physics%effecpress)
+ if (associated(model%basal_physics%effecpress_stag)) &
+ deallocate(model%basal_physics%effecpress_stag)
+
+ ! geometry arrays
+
+ if (associated(model%geometry%thck)) &
+ deallocate(model%geometry%thck)
+ if (associated(model%geometry%usrf)) &
+ deallocate(model%geometry%usrf)
+ if (associated(model%geometry%lsrf)) &
+ deallocate(model%geometry%lsrf)
+ if (associated(model%geometry%topg)) &
+ deallocate(model%geometry%topg)
+ if (associated(model%geometry%thkmask)) &
+ deallocate(model%geometry%thkmask)
+ if (associated(model%geometry%stagmask)) &
+ deallocate(model%geometry%stagmask)
+ if (associated(model%geomderv%stagthck)) &
+ deallocate(model%geomderv%stagthck)
+ if (associated(model%geomderv%dthckdew)) &
+ deallocate(model%geomderv%dthckdew)
+ if (associated(model%geomderv%dthckdns)) &
+ deallocate(model%geomderv%dthckdns)
+ if (associated(model%geomderv%dusrfdew)) &
+ deallocate(model%geomderv%dusrfdew)
+ if (associated(model%geomderv%dusrfdns)) &
+ deallocate(model%geomderv%dusrfdns)
+!! if (associated(model%geometry%marine_bc_normal)) &
+!! deallocate(model%geometry%marine_bc_normal)
+
+ !*SFP: fields that need to be passed to POP for ice ocean coupling
+ !* (DFM -- deallocate floating_mask, ice_mask, lower_cell_loc, and lower_cell_temp)
+ if (associated(model%geometry%floating_mask)) &
+ deallocate(model%geometry%floating_mask)
+ if (associated(model%geometry%ice_mask)) &
+ deallocate(model%geometry%ice_mask)
+ if (associated(model%geometry%lower_cell_loc)) &
+ deallocate(model%geometry%lower_cell_loc)
+ if (associated(model%geometry%lower_cell_temp)) &
+ deallocate(model%geometry%lower_cell_temp)
+
+ if (associated(model%geometry%thck_index)) &
+ deallocate(model%geometry%thck_index)
+ if (associated(model%geomderv%dthckdtm)) &
+ deallocate(model%geomderv%dthckdtm)
+ if (associated(model%geomderv%dusrfdtm)) &
+ deallocate(model%geomderv%dusrfdtm)
+ if (associated(model%thckwk%olds)) &
+ deallocate(model%thckwk%olds)
+ if (associated(model%thckwk%oldthck)) &
+ deallocate(model%thckwk%oldthck)
+ if (associated(model%thckwk%oldthck2)) &
+ deallocate(model%thckwk%oldthck2)
+!! if (associated(model%thckwk%float)) & ! no longer used
+!! deallocate(model%thckwk%float)
+
+ if (associated(model%geometry%age)) &
+ deallocate(model%geometry%age)
+ if (associated(model%geometry%f_ground)) &
+ deallocate(model%geometry%f_ground)
+ if (associated(model%geomderv%dlsrfdew)) &
+ deallocate(model%geomderv%dlsrfdew)
+ if (associated(model%geomderv%dlsrfdns)) &
+ deallocate(model%geomderv%dlsrfdns)
+ if (associated(model%geomderv%staglsrf)) &
+ deallocate(model%geomderv%staglsrf)
+ if (associated(model%geomderv%stagusrf)) &
+ deallocate(model%geomderv%stagusrf)
+ if (associated(model%geomderv%stagtopg)) &
+ deallocate(model%geomderv%stagtopg)
+ if (associated(model%geomderv%d2usrfdew2)) &
+ deallocate(model%geomderv%d2usrfdew2)
+ if (associated(model%geomderv%d2usrfdns2)) &
+ deallocate(model%geomderv%d2usrfdns2)
+ if (associated(model%geomderv%d2thckdew2)) &
+ deallocate(model%geomderv%d2thckdew2)
+ if (associated(model%geomderv%d2thckdns2)) &
+ deallocate(model%geomderv%d2thckdns2)
+
+ ! climate arrays
+
+ if (associated(model%climate%acab)) &
+ deallocate(model%climate%acab)
+ if (associated(model%climate%acab_tavg)) &
+ deallocate(model%climate%acab_tavg)
+ if (associated(model%climate%artm)) &
+ deallocate(model%climate%artm)
+ if (associated(model%climate%calving)) &
+ deallocate(model%climate%calving)
+
+ ! matrix solver arrays
+
+ if (associated(model%solver_data%rhsd)) &
+ deallocate(model%solver_data%rhsd)
+ if (associated(model%solver_data%answ)) &
+ deallocate(model%solver_data%answ)
+
+ !KJE do we need this here? The parts within are allocated in glam_strs2
+ call del_sparse_matrix(model%solver_data%matrix)
+
+ ! lithosphere arrays
+
+ if (associated(model%lithot%temp)) &
+ deallocate(model%lithot%temp)
+ if (associated(model%lithot%mask)) &
+ deallocate(model%lithot%mask)
+
+ ! isostasy arrays
+
+ if (associated(model%isostasy%relx)) &
+ deallocate(model%isostasy%relx)
+ if (associated(model%isostasy%load)) &
+ deallocate(model%isostasy%load)
+ if (associated(model%isostasy%load_factors)) &
+ deallocate(model%isostasy%load_factors)
+
+ ! The remaining arrays are not currently used (except mintauf)
+ ! phaml arrays
+
+!! if (associated(model%phaml%init_phaml)) &
+!! deallocate(model%phaml%init_phaml)
+!! if (associated(model%phaml%rs_phaml)) &
+!! deallocate(model%phaml%rs_phaml)
+!! if (associated(model%phaml%uphaml)) &
+!! deallocate(model%phaml%uphaml)
+
+ ! grounding line arrays (not currently supported)
+
+!! if (associated(model%ground%gl_ns)) &
+!! deallocate(model%ground%gl_ns)
+!! if (associated(model%ground%gl_ew)) &
+!! deallocate(model%ground%gl_ew)
+!! if (associated(model%ground%gline_flux)) &
+!! deallocate(model%ground%gline_flux)
+
+ ! basal process arrays
+ ! not currently supported, except that glam_strs2 uses mintauf
+
+!! if (associated(model%basalproc%Hwater)) &
+!! deallocate(model%basalproc%Hwater)
+ if (associated(model%basalproc%mintauf)) &
+ deallocate(model%basalproc%mintauf)
+!! if (associated(model%basalproc%u)) &
+!! deallocate(model%basalproc%u)
+!! if (associated(model%basalproc%etill)) &
+!! deallocate(model%basalproc%etill)
+
+ end subroutine glide_deallocarr
+
+
+ ! some accessor functions
+ function get_dew(model)
+ !> return scaled x node spacing
+ use glimmer_paramets, only : len0
+ implicit none
+ real(dp) :: get_dew
+ type(glide_global_type) :: model
+
+ get_dew = model%numerics%dew * len0
+ end function get_dew
+
+ function get_dns(model)
+ !> return scaled y node spacing
+ use glimmer_paramets, only : len0
+ implicit none
+ real(dp) :: get_dns
+ type(glide_global_type) :: model
+
+ get_dns = model%numerics%dns * len0
+ end function get_dns
+
+ function get_tstart(model)
+ !> return start time
+ implicit none
+ real(dp) :: get_tstart
+ type(glide_global_type) :: model
+
+ get_tstart = model%numerics%tstart
+ end function get_tstart
+
+ function get_tend(model)
+ !> return end time
+ implicit none
+ real(dp) :: get_tend
+ type(glide_global_type) :: model
+
+ get_tend = model%numerics%tend
+ end function get_tend
+
+ function get_tinc(model)
+ !> return time increment
+ implicit none
+ real(dp) :: get_tinc
+ type(glide_global_type) :: model
+
+ get_tinc = model%numerics%tinc
+ end function get_tinc
+
+ function get_ewn(model)
+ !> get number of nodes in x dir
+ implicit none
+ integer get_ewn
+ type(glide_global_type) :: model
+
+ get_ewn = model%general%ewn
+ end function get_ewn
+
+ function get_nsn(model)
+ !> get number of nodes in y dir
+ implicit none
+ integer get_nsn
+ type(glide_global_type) :: model
+
+ get_nsn = model%general%nsn
+ end function get_nsn
+
+ subroutine set_time(model,time)
+ !> Set the model time counter --- useful for
+ !> fractional year output
+ implicit none
+ type(glide_global_type) :: model
+ real(dp) :: time
+
+ model%numerics%time = time
+ end subroutine set_time
+
+end module glide_types
diff --git a/components/cism/glimmer-cism/libglide/glide_vars.def b/components/cism/glimmer-cism/libglide/glide_vars.def
new file mode 100644
index 0000000000..416b59956c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_vars.def
@@ -0,0 +1,821 @@
+#[]
+#dimensions: time, level, y1, x1
+#units: The documentation will complain if units have LaTeX characters like ^
+#long_name:
+#data:
+#factor:
+
+# setup for code generator
+[VARSET]
+# prefix of the generated module
+name: glide
+# f90 type containing all necessary data
+datatype: glide_global_type
+# module where type is defined
+datamod: glide_types
+
+[x0]
+dimensions: x0
+units: meter
+long_name: Cartesian x-coordinate, velocity grid
+axis: X
+data: data%general%x0
+dimlen: global_ewn-1
+
+[y0]
+dimensions: y0
+units: meter
+long_name: Cartesian y-coordinate, velocity grid
+axis: Y
+data: data%general%y0
+dimlen: global_nsn-1
+
+[x1]
+dimensions: x1
+units: meter
+long_name: Cartesian x-coordinate
+axis: X
+data: data%general%x1
+dimlen: global_ewn
+load:1
+
+[y1]
+dimensions: y1
+units: meter
+long_name: Cartesian y-coordinate
+axis: Y
+data: data%general%y1
+dimlen: global_nsn
+load:1
+
+[level]
+dimensions: level
+units: 1
+long_name: sigma layers
+standard_name: land_ice_sigma_coordinate
+#formula_terms: sigma: level topo: topg thick: thk
+positive: down
+dimlen: model%general%upn
+
+[staglevel]
+dimensions: staglevel
+units: 1
+long_name: stag sigma layers
+standard_name: land_ice_stag_sigma_coordinate
+positive: down
+dimlen: model%general%upn-1
+
+[stagwbndlevel]
+dimensions: stagwbndlevel
+units: 1
+long_name: stag sigma layers with boundaries
+standard_name: land_ice_stag_sigma_coordinate_with_bnd
+positive: down
+dimlen: model%general%upn+1
+
+[lithoz]
+dimensions: lithoz
+units: meter
+long_name: vertical coordinate of lithosphere layer
+dimlen: model%lithot%nlayer
+
+[relx]
+dimensions: time, y1, x1
+units: meter
+long_name: relaxed bedrock topography
+data: data%isostasy%relx
+factor: thk0
+load: 1
+coordinates: lon lat
+
+[eus]
+dimensions: time
+units: meter
+long_name: global average sea level
+data: data%climate%eus
+factor: thk0
+standard_name: global_average_sea_level_change
+
+[uflx]
+dimensions: time, y0, x0
+units: meter2/year
+long_name: flux in x direction (NOTE: Glide and Glam only)
+data: data%velocity%uflx
+factor: scale_uflx
+
+[vflx]
+dimensions: time, y0, x0
+units: meter2/year
+long_name: flux in x direction (NOTE: Glide and Glam only)
+data: data%velocity%vflx
+factor: scale_uflx
+
+[diffu]
+dimensions: time, y0, x0
+units: meter2/year
+long_name: apparent diffusivity
+data: data%velocity%diffu
+factor: scale_diffu
+
+[btrc]
+dimensions: time, y0, x0
+units: meter/pascal/year
+long_name: basal slip coefficient
+data: data%velocity%btrc
+factor: scale_btrc
+
+[soft]
+dimensions: time, y0, x0
+units: meter/pascal/year
+long_name: bed softness parameter
+data: data%velocity%bed_softness
+factor: scale_btrc
+load: 1
+
+[beta]
+dimensions: time, y0, x0
+units: Pa yr/m
+long_name: higher-order bed stress coefficient
+data: data%velocity%beta
+factor: scale_beta
+load: 1
+
+#WHL - added unstagbeta, which lives on the unstaggered ice grid
+# (unlike beta, which lives on the staggered velocity grid)
+[unstagbeta]
+dimensions: time, y1, x1
+units: Pa yr/m
+long_name: higher-order bed stress coefficient on the unstaggered grid (NOTE: this will overwrite beta if both are input)
+data: data%velocity%unstagbeta
+factor: scale_beta
+load: 1
+
+[tauf]
+dimensions: time, y0, x0
+units: Pa
+long_name: higher-order basal yield stress
+data: data%basalproc%mintauf
+factor: scale_tau
+load: 1
+
+[btractx]
+dimensions: time, y0, x0
+units: Pa
+long_name: basal traction (x-direction comp)
+data: data%stress%btractx(:,:)
+factor: scale_tau
+
+[btracty]
+dimensions: time, y0, x0
+units: Pa
+long_name: basal traction (y-direction comp)
+data: data%stress%btracty(:,:)
+factor: scale_tau
+
+[ubas]
+dimensions: time, y0, x0
+units: meter/year
+long_name: basal slip velocity in x direction
+data: data%velocity%ubas
+factor: scale_uvel
+standard_name: land_ice_basal_x_velocity
+load: 1
+#average: 1
+
+[vbas]
+dimensions: time, y0, x0
+units: meter/year
+long_name: basal slip velocity in y direction
+data: data%velocity%vbas
+factor: scale_uvel
+standard_name: land_ice_basal_y_velocity
+load: 1
+#average: 1
+
+[taux]
+dimensions: time, y0, x0
+units: kilopascal
+long_name: basal shear stress in x direction (NOTE: Glide only)
+data: data%velocity%tau_x
+factor: 1e-3*thk0*thk0/len0
+
+[tauy]
+dimensions: time, y0, x0
+units: kilopascal
+long_name: basal shear stress in y direction
+data: data%velocity%tau_y
+factor: 1e-3*thk0*thk0/len0
+
+[thk]
+dimensions: time, y1, x1
+units: meter
+long_name: ice thickness
+data: data%geometry%thck
+factor: thk0
+load: 1
+standard_name: land_ice_thickness
+coordinates: lon lat
+
+[stagthk]
+dimensions: time, y0, x0
+units: meter
+long_name: staggered ice thickness
+data: data%geomderv%stagthck
+factor: thk0
+standard_name: stag_land_ice_thickness
+load: 0
+coordinates: lon lat
+
+[calving]
+dimensions: time, y1, x1
+units: meter
+long_name: ice margin calving
+data: data%climate%calving
+factor: thk0
+coordinates: lon lat
+
+[ivol]
+dimensions: time
+units: km3
+factor: thk0*len0*len0*1.e-9
+long_name: ice volume
+data: data%geometry%ivol
+
+[iarea]
+dimensions: time
+units: km2
+long_name: area covered by ice
+factor: len0*len0*1.e-6
+data: data%geometry%iarea
+
+[iareag]
+dimensions: time
+units: km2
+long_name: area covered by grounded ice
+factor: len0*len0*1.e-6
+data: data%geometry%iareag
+
+[iareaf]
+dimensions: time
+units: km2
+long_name: area covered by floating ice
+factor: len0*len0*1.e-6
+data: data%geometry%iareaf
+
+[thkmask]
+dimensions: time, y1, x1
+long_name: mask
+units: 1
+data: data%geometry%thkmask
+type: int
+coordinates: lon lat
+load: 1
+
+[usurf]
+dimensions: time, y1, x1
+units: meter
+long_name: ice upper surface elevation
+data: data%geometry%usrf
+factor: thk0
+load: 1
+standard_name: surface_altitude
+coordinates: lon lat
+
+[lsurf]
+dimensions: time, y1, x1
+units: meter
+long_name: ice lower surface elevation
+data: data%geometry%lsrf
+factor: thk0
+coordinates: lon lat
+
+[topg]
+dimensions: time, y1, x1
+units: meter
+long_name: bedrock topography
+data: data%geometry%topg
+factor: thk0
+load: 1
+standard_name: bedrock_altitude
+coordinates: lon lat
+
+## D. Martin added - fields that need to be passed to POP for ice-ocean coupling
+#[floating_mask]
+#dimensions: time, y1, x1
+#units: 1
+#long_name: real-valued mask denoting grounded/floating
+#data: data%geometry%floating_mask
+#factor: 1.0
+#coordinates: lon lat
+
+# D. Martin added - fields that need to be passed to POP for ice-ocean coupling
+[ice_mask]
+dimensions: time, y1, x1
+units: 1
+long_name: real-valued mask denoting ice (1) or no ice (0)
+data: data%geometry%ice_mask
+factor: 1.0
+coordinates: lon lat
+
+## D. Martin added - fields that need to be passed to POP for ice-ocean coupling
+#[lower_cell_loc]
+#dimensions: time, y1, x1
+#units: meter
+#long_name: location in z of lower cell location
+#data: data%geometry%lower_cell_loc
+#factor: 1.0
+#coordinates: lon lat
+
+## D. Martin added - fields that need to be passed to POP for ice-ocean coupling
+#[lower_cell_temp]
+#dimensions: time, y1, x1
+#units: degrees K
+#long_name: temperature at lower_cell_loc
+#data: data%geometry%lower_cell_temp
+#factor: 1.0
+#coordinates: lon lat
+
+[acab]
+dimensions: time, y1, x1
+units: meter/year
+long_name: accumulation, ablation rate
+data: data%climate%acab
+factor: scale_acab
+standard_name: land_ice_surface_specific_mass_balance
+coordinates: lon lat
+#average: 1
+load: 1
+
+#WHL: scale_bflux = -1, to reverse sign convention from + up to + down
+[bheatflx]
+dimensions: time, y1, x1
+units: watt/meter2
+long_name: upward basal heat flux
+data: data%temper%bheatflx
+factor: scale_bflx
+load: 1
+coordinates: lon lat
+
+[bmlt]
+dimensions: time, y1, x1
+units: meter/year
+long_name: basal melt rate
+data: data%temper%bmlt
+factor: scale_acab
+standard_name: land_ice_basal_melt_rate
+load: 1
+coordinates: lon lat
+#average: 1
+
+[bfricflx]
+dimensions: time, y1, x1
+units: watt/meter2
+long_name: basal friction heat flux
+data: data%temper%bfricflx
+factor: 1.0
+load: 1
+coordinates: lon lat
+
+[bwat]
+dimensions: time, y1, x1
+units: meter
+long_name: basal water depth
+data: data%temper%bwat
+factor: thk0
+load: 1
+coordinates: lon lat
+
+[bwatflx]
+dimensions: time, y1, x1
+units: meter3/year
+long_name: basal water flux
+data: data%temper%bwatflx
+factor: thk0
+coordinates: lon lat
+
+[effecpress]
+dimensions: time, y1, x1
+units: Pa
+long_name: effective pressure
+data: data%basal_physics%effecpress
+load: 1
+coordinates: lon lat
+
+[artm]
+dimensions: time, y1, x1
+units: degree_Celsius
+long_name: annual mean air temperature
+data: data%climate%artm
+standard_name: surface_temperature
+cell_methods: time: mean
+coordinates: lon lat
+load: 1
+
+[surftemp]
+dimensions: time, y1, x1
+units: degree_Celsius
+long_name: annual mean surface temperature
+data: data%climate%artm
+standard_name: surface_temperature
+cell_methods: time: mean
+coordinates: lon lat
+load: 1
+
+[btemp]
+dimensions: time, y1, x1
+units: degree_Celsius
+long_name: basal ice temperature
+data: data%temper%temp(data%general%upn,1:data%general%ewn,1:data%general%nsn)
+standard_name: land_ice_temperature
+coordinates: lon lat
+
+[dusrfdtm]
+dimensions: time, y1, x1
+units: meter/year
+long_name: rate of upper ice surface elevation change (NOTE: Glide only)
+data: data%geomderv%dusrfdtm
+factor: scale_acab
+coordinates: lon lat
+
+[dthckdtm]
+dimensions: time, y1,x1
+units: meter/year
+long_name: tendency of ice thickness (NOTE: Glide only)
+data: data%geomderv%dthckdtm
+factor: scale_acab
+coordinates: lon lat
+
+[uvel]
+dimensions: time, level, y0, x0
+units: meter/year
+long_name: ice velocity in x direction
+data: data%velocity%uvel(up,:,:)
+factor: scale_uvel
+standard_name: land_ice_x_velocity
+load: 1
+
+[vvel]
+dimensions: time, level, y0, x0
+units: meter/year
+long_name: ice velocity in y direction
+data: data%velocity%vvel(up,:,:)
+factor: scale_uvel
+standard_name: land_ice_y_velocity
+load: 1
+
+[uvel_extend]
+dimensions: time, level, y1, x1
+units: meter/year
+long_name: ice velocity in x direction (extended grid)
+data: data%velocity%uvel_extend(up,:,:)
+factor: scale_uvel
+standard_name: land_ice_x_velocity
+hot: 0
+
+[vvel_extend]
+dimensions: time, level, y1, x1
+units: meter/year
+long_name: ice velocity in y direction (extended grid)
+data: data%velocity%vvel_extend(up,:,:)
+factor: scale_uvel
+standard_name: land_ice_y_velocity
+hot: 0
+
+[resid_u]
+dimensions: time, level, y0, x0
+units: Pa/m
+long_name: u component of residual Ax - b (NOTE: Glam only)
+data: data%velocity%resid_u(up,:,:)
+factor: scale_resid
+
+[resid_v]
+dimensions: time, level, y0, x0
+units: Pa/m
+long_name: v component of residual Ax - b (NOTE: Glam only)
+data: data%velocity%resid_v(up,:,:)
+factor: scale_resid
+
+[rhs_u]
+dimensions: time, level, y0, x0
+units: Pa/m
+long_name: u component of b in Ax = b
+data: data%velocity%rhs_u(up,:,:)
+factor: scale_resid
+
+[rhs_v]
+dimensions: time, level, y0, x0
+units: Pa/m
+long_name: v component of b in Ax = b
+data: data%velocity%rhs_v(up,:,:)
+factor: scale_resid
+
+# used for output of residual
+#[ures]
+#dimensions: time, level, y0, x0
+#units: meter/year
+#long_name: ice velocity resid. in x direction
+#data: data%velocity%ures(up,:,:)
+#factor: scale_uvel
+#standard_name: land_ice_x_velocity_resid
+
+# used for output of residual
+#[vres]
+#dimensions: time, level, y0, x0
+#units: meter/year
+#long_name: ice velocity resid. in y direction
+#data: data%velocity%vres(up,:,:)
+#factor: scale_uvel
+#standard_name: land_ice_y_velocity_resid
+
+# used for output of res fields
+#[magres]
+#dimensions: time, level, y0, x0
+#units: meter/year
+#long_name: ice velocity resid. magnitude
+#data: data%velocity%magres(up,:,:)
+#factor: scale_uvel
+#standard_name: land_ice_y_velocity_resid
+
+[kinbcmask]
+dimensions: time, y0, x0
+units: 1
+long_name: Mask of locations where uvel, vvel value should be held
+data: data%velocity%kinbcmask(:,:)
+type: int
+load: 1
+
+[dynbcmask]
+dimensions: time, y0, x0
+units: 1
+long_name: 2d array of higher-order model boundary condition mask values (NOTE: Glam ONLY)
+data: data%velocity%dynbcmask
+type: int
+load: 0
+
+[velnorm]
+dimensions: time, level, y0, x0
+units: meter/year
+long_name: Horizontal ice velocity magnitude
+data: data%velocity%velnorm(up,:,:)
+factor: scale_uvel
+coordinates: lon lat
+
+[tau_eff]
+dimensions: time, staglevel, y1, x1
+units: Pa
+long_name: effective stress
+data: data%stress%tau%scalar(up,:,:)
+factor: scale_tau
+
+[tau_xz]
+dimensions: time, staglevel, y1, x1
+units: Pa
+long_name: X component vertical shear stress
+data: data%stress%tau%xz(up,:,:)
+factor: scale_tau
+
+[tau_yz]
+dimensions: time, staglevel, y1, x1
+units: Pa
+long_name: Y component vertical shear stress
+data: data%stress%tau%yz(up,:,:)
+factor: scale_tau
+
+[tau_xx]
+dimensions: time, staglevel, y1, x1
+units: Pa
+long_name: x component of horiz. normal stress
+data: data%stress%tau%xx(up,:,:)
+factor: scale_tau
+
+[tau_yy]
+dimensions: time, staglevel, y1, x1
+units: Pa
+long_name: y component of horiz. normal stress
+data: data%stress%tau%yy(up,:,:)
+factor: scale_tau
+
+[tau_xy]
+dimensions: time, staglevel, y1, x1
+units: Pa
+long_name: horiz. shear stress
+data: data%stress%tau%xy(up,:,:)
+factor: scale_tau
+
+[wvel]
+dimensions: time, level, y1, x1
+units: meter/year
+long_name: vertical ice velocity
+data: data%velocity%wvel(up,:,:)
+factor: scale_wvel
+coordinates: lon lat
+load: 1
+
+[wvel_ho]
+dimensions: time, level, y1, x1
+units: meter/year
+long_name: vertical ice velocity relative to ice sheet base from higher-order model (NOTE: Glam only)
+data: data%velocity%wvel_ho(up,:,:)
+factor: scale_wvel
+coordinates: lon lat
+load: 1
+
+[wgrd]
+dimensions: time, level, y1, x1
+units: meter/year
+long_name: Vertical grid velocity
+data: data%velocity%wgrd(up,:,:)
+factor: scale_wvel
+coordinates: lon lat
+load: 1
+
+[waterfrac]
+dimensions: time, staglevel, y1, x1
+units: unitless [0,1]
+long_name: internal water fraction
+data: data%temper%waterfrac(up,:,:)
+coordinates: lon lat
+load: 1
+
+[enthalpy]
+dimensions: time, stagwbndlevel, y1, x1
+units: J/m^3
+long_name: specific enthalpy
+data: data%temper%enthalpy(up,:,:)
+coordinates: lon lat
+
+[flwa]
+dimensions: time, level, y1, x1
+units: pascal**(-n) year**(-1)
+long_name: Pre-exponential flow law parameter
+data: data%temper%flwa(up,:,:)
+factor: scale_flwa
+load: 1
+coordinates: lon lat
+
+[flwastag]
+dimensions: time, staglevel, y1, x1
+units: pascal**(-n) year**(-1)
+long_name: Pre-exponential flow law parameter
+data: data%temper%flwa(up,:,:)
+factor: scale_flwa
+# hot=0 because we do not want the stag version to be in the hotvars definition.
+# The conversion to stag happens if needed in check_for_tempstag
+# but we do want it loadable in case flwa is listed as hot.
+load: 1
+coordinates: lon lat
+
+[efvs]
+dimensions: time, staglevel, y1, x1
+units: Pascal * years
+long_name: effective viscosity
+data: data%stress%efvs(up,:,:)
+factor: scale_efvs
+coordinates: lon lat
+
+[temp]
+dimensions: time, level, y1, x1
+units: degree_Celsius
+long_name: ice temperature
+data: data%temper%temp(up,1:data%general%ewn,1:data%general%nsn)
+standard_name: land_ice_temperature
+load: 1
+coordinates: lon lat
+
+[tempstag]
+dimensions: time, stagwbndlevel, y1, x1
+units: degree_Celsius
+long_name: ice temperature on staggered vertical levels with boundaries
+data: data%temper%temp(up,1:data%general%ewn,1:data%general%nsn)
+standard_name: land_ice_temperature_stag
+# hot=0 because we do not want the stag version to be in the hotvars definition.
+# The conversion to stag happens if needed in check_for_tempstag.
+load: 1
+coordinates: lon lat
+
+[litho_temp]
+dimensions: time, lithoz, y1, x1
+units: degree_Celsius
+long_name: lithosphere temperature
+data: data%lithot%temp
+load: 1
+coordinates: lon lat
+
+#[age]
+#dimensions: time, level, y1, x1
+#units: year
+#long_name: ice age
+#data: data%geometry%age(up,:,:)
+#standard_name: land_ice_age
+#factor: tim0/scyr
+#load: 0
+#coordinates: lon lat
+
+#[f_ground]
+#dimensions: time, y0, x0
+#units: unitless [0,1]
+#long_name: grounded ice fraction
+#data: data%geometry%f_ground
+#standard_name: grounded_fraction
+#load: 0
+#coordinates: lon lat
+
+#TODO - Are these gline fields needed?
+#[gl_ew]
+#dimensions: time, y1, x0
+#units: meter
+#long_name: grounding line movement in ew directions
+#data: data%ground%gl_ew
+#coordinates: lon lat
+
+#[gl_ns]
+#dimensions: time, y0, x1
+#units: meter
+#long_name: grounding line movement in ns directions
+#data: data%ground%gl_ns
+#coordinates: lon lat
+
+#[gline_flux]
+#dimensions: time, y1, x1
+#units: meter2/year
+#long_name: grounding line fluxu
+#data: data%ground%gline_flux
+#coordinates: lon f
+
+[rho_ice]
+dimensions: time
+units: kg/meter3
+long_name: ice density
+data: rhoi
+factor: noscale
+standard_name: rho_ice
+
+[rho_seawater]
+dimensions: time
+units: kg/meter3
+long_name: seawater density
+data: rhoo
+factor: noscale
+standard_name: rho_seawater
+
+[gravity]
+dimensions: time
+units: meter/s/s
+long_name: gravitational acceleration
+data: grav
+factor: noscale
+standard_name: gravity
+
+[seconds_per_year]
+dimensions: time
+units: s/yr
+long_name: seconds per year
+data: scyr
+factor: noscale
+standard_name: seconds_per_year
+
+[ice_specific_heat]
+dimensions: time
+units: J/kg/K
+long_name: ice specific heat
+data: shci
+factor: noscale
+standard_name: ice_specific_heat
+
+[ice_thermal_conductivity]
+dimensions: time
+units: J/(K kg)
+long_name: ice thermal conductivity
+data: coni
+factor: noscale
+standard_name: ice_thermal_conductivity
+
+[dissip]
+dimensions: time, level, y1, x1
+units: deg C/yr
+long_name: dissipation rate (W m-3) divided by rhoi Ci
+data: data%temper%dissip(up,:,:)
+factor: scyr
+load: 1
+coordinates: lon lat
+
+[dissipstag]
+dimensions: time, staglevel, y1, x1
+units: deg C/yr
+long_name: dissipation rate (W m-3) divided by rhoi Ci
+data: data%temper%dissip(up,:,:)
+factor: scyr
+load: 1
+coordinates: lon lat
+
+[adv_cfl_dt]
+dimensions: time
+units: years
+long_name: advective CFL maximum time step
+data: data%numerics%adv_cfl_dt
+
+[diff_cfl_dt]
+dimensions: time
+units: years
+long_name: diffusive CFL maximum time step
+data: data%numerics%diff_cfl_dt
diff --git a/components/cism/glimmer-cism/libglide/glide_velo.F90 b/components/cism/glimmer-cism/libglide/glide_velo.F90
new file mode 100644
index 0000000000..9994e3f8e6
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_velo.F90
@@ -0,0 +1,1181 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_velo.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glide_velo
+
+ !> Contains routines which handle various aspects of velocity in the model,
+ !> not only the bulk ice velocity, but also basal sliding, and vertical grid
+ !> velocities, etc.
+
+ use glide_types
+ use glimmer_global, only : dp
+ use glimmer_physcon, only : rhoi, grav, gn
+ use glimmer_paramets, only : thk0, len0, vis0, vel0
+
+ implicit none
+
+ private vertintg
+
+ ! some private parameters
+ integer, private, parameter :: p1 = gn+1
+ integer, private, parameter :: p2 = gn-1
+ integer, private, parameter :: p3 = 2*gn+1
+ integer, private, parameter :: p4 = gn+2
+
+ real(dp),private, parameter :: cflow = -2.0d0*vis0*(rhoi*grav)**gn*thk0**p3/(8.0d0*vel0*len0**gn)
+
+contains
+
+!TODO - Pretty sure that none of the arrays in init_velo are needed for Glissade,
+! so we may not need to call this subroutine from glissade_initialise.
+! Move allocation/deallocation to subroutines in glide_types?
+! Some velowk arrays are used in wvelintg, but could rewrite wvelintg without these arrays.
+
+ subroutine init_velo(model)
+ !> initialise velocity module
+ use glimmer_physcon, only : arrmll, arrmlh, gascon, actenl, actenh, scyr, pi
+ implicit none
+ type(glide_global_type) :: model
+
+ integer ewn, nsn, upn
+ integer up
+
+ ewn=model%general%ewn
+ nsn=model%general%nsn
+ upn=model%general%upn
+
+ allocate(model%velowk%fslip(ewn-1,nsn-1))
+
+ allocate(model%velowk%depth(upn))
+ allocate(model%velowk%dintflwa(ewn-1,nsn-1))
+
+ model%velowk%depth = (/ (((model%numerics%sigma(up+1)+model%numerics%sigma(up))/2.0d0)**gn &
+ *(model%numerics%sigma(up+1)-model%numerics%sigma(up)),up=1,upn-1),0.0d0 /)
+
+ allocate(model%velowk%dups(upn))
+ model%velowk%dups = (/ (model%numerics%sigma(up+1) - model%numerics%sigma(up), up=1,upn-1),0.0d0 /)
+
+ allocate(model%velowk%dupsw (upn))
+ allocate(model%velowk%depthw(upn))
+ allocate(model%velowk%suvel (upn))
+ allocate(model%velowk%svvel (upn))
+
+ ! Calculate the differences between adjacent sigma levels -------------------------
+
+ model%velowk%dupsw = (/ (model%numerics%sigma(up+1)-model%numerics%sigma(up), up=1,upn-1), 0.0d0 /)
+
+ ! Calculate the value of sigma for the levels between the standard ones -----------
+
+ model%velowk%depthw = (/ ((model%numerics%sigma(up+1)+model%numerics%sigma(up)) / 2.0d0, up=1,upn-1), 0.0d0 /)
+
+ model%velowk%fact = (/ model%paramets%flow_enhancement_factor* arrmlh / vis0, & ! Value of a when T* is above -263K
+ model%paramets%flow_enhancement_factor* arrmll / vis0, & ! Value of a when T* is below -263K
+ -actenh / gascon, & ! Value of -Q/R when T* is above -263K
+ -actenl / gascon/) ! Value of -Q/R when T* is below -263K
+
+ model%velowk%watwd = model%paramets%bpar(1)
+ model%velowk%watct = model%paramets%bpar(2)
+ model%velowk%trcmin = model%paramets%bpar(3) / scyr
+ model%velowk%trcmax = model%paramets%bpar(4) / scyr
+ model%velowk%marine = model%paramets%bpar(5)
+ model%velowk%trcmax = model%velowk%trcmax / model%velowk%trc0
+ model%velowk%trcmin = model%velowk%trcmin / model%velowk%trc0
+ model%velowk%c(1) = (model%velowk%trcmax + model%velowk%trcmin) / 2.0d0
+ model%velowk%c(2) = (model%velowk%trcmax - model%velowk%trcmin) / 2.0d0
+ model%velowk%c(3) = (thk0 * pi) / model%velowk%watwd
+ model%velowk%c(4) = pi*(model%velowk%watct / model%velowk%watwd)
+
+
+ end subroutine init_velo
+
+
+
+ !*****************************************************************************
+ ! new velo functions come here
+ !*****************************************************************************
+
+ subroutine velo_integrate_flwa(velowk,stagthck,flwa)
+
+ !> this routine calculates the part of the vertically averaged velocity
+ !> field which solely depends on the temperature
+ !> (The integral in eq. 3.22d)
+
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+ type(glide_velowk), intent(inout) :: velowk
+ real(dp),dimension(:,:), intent(in) :: stagthck !> ice thickness on staggered grid
+ real(dp),dimension(:,:,:),intent(in) :: flwa !> ice flow factor
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+ real(dp),dimension(size(flwa,1)) :: hrzflwa, intflwa
+ integer :: ew,ns,up,ewn,nsn,upn
+
+ upn=size(flwa,1) ; ewn=size(flwa,2) ; nsn=size(flwa,3)
+
+ do ns = 1,nsn-1
+ do ew = 1,ewn-1
+ if (stagthck(ew,ns) /= 0.0d0) then
+
+ hrzflwa = flwa(:,ew,ns) + flwa(:,ew,ns+1) + flwa(:,ew+1,ns) + flwa(:,ew+1,ns+1)
+ intflwa(upn) = 0.0d0
+
+ !Perform inner integration.
+ do up = upn-1, 1, -1
+ intflwa(up) = intflwa(up+1) + velowk%depth(up) * (hrzflwa(up)+hrzflwa(up+1))
+ end do
+
+ velowk%dintflwa(ew,ns) = cflow * vertintg(velowk,intflwa)
+
+ else
+
+ velowk%dintflwa(ew,ns) = 0.0d0
+
+ end if
+ end do
+ end do
+ end subroutine velo_integrate_flwa
+
+ !*****************************************************************************
+
+ subroutine velo_calc_diffu(velowk,stagthck,dusrfdew,dusrfdns,diffu)
+
+ !> calculate diffusivities
+
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+ type(glide_velowk), intent(inout) :: velowk
+ real(dp),dimension(:,:), intent(in) :: stagthck
+ real(dp),dimension(:,:), intent(in) :: dusrfdew
+ real(dp),dimension(:,:), intent(in) :: dusrfdns
+ real(dp),dimension(:,:), intent(out) :: diffu
+
+
+ where (stagthck /= 0.0d0)
+ diffu = velowk%dintflwa * stagthck**p4 * sqrt(dusrfdew**2 + dusrfdns**2)**p2
+ elsewhere
+ diffu = 0.0d0
+ end where
+
+ end subroutine velo_calc_diffu
+
+ !*****************************************************************************
+
+ subroutine velo_calc_velo(velowk, stagthck, &
+ dusrfdew, dusrfdns, &
+ flwa, diffu, &
+ ubas, vbas, &
+ uvel, vvel, &
+ uflx, vflx, &
+ velnorm)
+
+ !> calculate 3D horizontal velocity field and 2D flux field from diffusivity
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+ type(glide_velowk), intent(inout) :: velowk
+ real(dp),dimension(:,:), intent(in) :: stagthck
+ real(dp),dimension(:,:), intent(in) :: dusrfdew
+ real(dp),dimension(:,:), intent(in) :: dusrfdns
+ real(dp),dimension(:,:,:),intent(in) :: flwa
+ real(dp),dimension(:,:), intent(in) :: diffu
+ real(dp),dimension(:,:), intent(in) :: ubas
+ real(dp),dimension(:,:), intent(in) :: vbas
+ real(dp),dimension(:,:,:),intent(out) :: uvel
+ real(dp),dimension(:,:,:),intent(out) :: vvel
+ real(dp),dimension(:,:), intent(out) :: uflx
+ real(dp),dimension(:,:), intent(out) :: vflx
+ real(dp),dimension(:,:,:), intent(out) :: velnorm
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+ real(dp),dimension(size(flwa,1)) :: hrzflwa
+ real(dp) :: factor
+ real(dp),dimension(3) :: const
+ integer :: ew,ns,up,ewn,nsn,upn
+
+ upn=size(flwa,1) ; ewn=size(stagthck,1) ; nsn=size(stagthck,2)
+
+ ! Note: Here (confusingly), nsn = size(stagthck,2) = model%general%nsn-1
+ ! ewn = size(stagthck,1) = model%general%ewn-1
+
+ do ns = 1,nsn
+ do ew = 1,ewn
+
+ if (stagthck(ew,ns) /= 0.0d0) then
+
+ vflx(ew,ns) = diffu(ew,ns) * dusrfdns(ew,ns) + vbas(ew,ns) * stagthck(ew,ns)
+ uflx(ew,ns) = diffu(ew,ns) * dusrfdew(ew,ns) + ubas(ew,ns) * stagthck(ew,ns)
+
+ uvel(upn,ew,ns) = ubas(ew,ns)
+ vvel(upn,ew,ns) = vbas(ew,ns)
+
+ hrzflwa = flwa(:,ew,ns) + flwa(:,ew,ns+1) + flwa(:,ew+1,ns) + flwa(:,ew+1,ns+1)
+
+ factor = velowk%dintflwa(ew,ns)*stagthck(ew,ns)
+ if (factor /= 0.0d0) then
+
+ const(2) = cflow * diffu(ew,ns) / factor
+
+ const(3) = const(2) * dusrfdns(ew,ns)
+ const(2) = const(2) * dusrfdew(ew,ns)
+ else
+ const(2:3) = 0.0d0
+ end if
+
+ do up = upn-1, 1, -1
+ const(1) = velowk%depth(up) * (hrzflwa(up)+hrzflwa(up+1))
+ uvel(up,ew,ns) = uvel(up+1,ew,ns) + const(1) * const(2)
+ vvel(up,ew,ns) = vvel(up+1,ew,ns) + const(1) * const(3)
+ end do
+
+ else
+
+ uvel(:,ew,ns) = 0.0d0
+ vvel(:,ew,ns) = 0.0d0
+ uflx(ew,ns) = 0.0d0
+ vflx(ew,ns) = 0.0d0
+
+ end if
+ end do
+ end do
+
+ ! horizontal ice speed (mainly for diagnostic purposes)
+
+ velnorm(:,:,:) = sqrt((uvel(:,:,:)**2 + vvel(:,:,:)**2))
+
+ end subroutine velo_calc_velo
+
+ !*****************************************************************************
+ ! old velo functions come here
+ !*****************************************************************************
+
+ subroutine slipvelo(model,flag1,btrc,ubas,vbas)
+
+ !> Calculate the basal slip velocity and the value of $B$, the free parameter
+ !> in the basal velocity equation (though I'm not sure that $B$ is used anywhere
+ !> else).
+
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+
+ type(glide_global_type) :: model !> model instance
+ integer, intent(in) :: flag1 !> \texttt{flag1} sets the calculation
+ !> method to use for the basal velocity
+ !> (corresponded to \texttt{whichslip} in the
+ !> old model.
+ real(dp),dimension(:,:),intent(in) :: btrc !> The basal slip coefficient.
+ real(dp),dimension(:,:),intent(out) :: ubas !> The $x$ basal velocity (scaled)
+ real(dp),dimension(:,:),intent(out) :: vbas !> The $y$ basal velocity (scaled)
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ real(dp), parameter :: rhograv = - rhoi * grav
+ integer :: nsn,ewn
+
+ ! Get array sizes -------------------------------------------------------------------
+
+ ewn=size(btrc,1) ; nsn=size(btrc,2)
+
+ !------------------------------------------------------------------------------------
+ ! Main calculation starts here
+ !------------------------------------------------------------------------------------
+
+ select case(flag1)
+ case(0)
+
+ ! Linear function of gravitational driving stress ---------------------------------
+
+ where (model%numerics%thklim < model%geomderv%stagthck)
+ ubas = btrc * rhograv * model%geomderv%stagthck * model%geomderv%dusrfdew
+ vbas = btrc * rhograv * model%geomderv%stagthck * model%geomderv%dusrfdns
+ elsewhere
+ ubas = 0.0d0
+ vbas = 0.0d0
+ end where
+
+ case(1)
+
+ ! *tp* option to be used in picard iteration for thck
+ ! *tp* start by find constants which dont vary in iteration
+
+ model%velowk%fslip = rhograv * btrc
+
+ case(2)
+
+ ! *tp* option to be used in picard iteration for thck
+ ! *tp* called once per non-linear iteration, set uvel to ub * H /(ds/dx) which is
+ ! *tp* a diffusivity for the slip term (note same in x and y)
+
+ where (model%numerics%thklim < model%geomderv%stagthck)
+ ubas = model%velowk%fslip * model%geomderv%stagthck**2
+ elsewhere
+ ubas = 0.0d0
+ end where
+
+ case(3)
+
+ ! *tp* option to be used in picard iteration for thck
+ ! *tp* finally calc ub and vb from diffusivities
+
+ where (model%numerics%thklim < model%geomderv%stagthck)
+ vbas = ubas * model%geomderv%dusrfdns / model%geomderv%stagthck
+ ubas = ubas * model%geomderv%dusrfdew / model%geomderv%stagthck
+ elsewhere
+ ubas = 0.0d0
+ vbas = 0.0d0
+ end where
+
+ case default
+ ubas = 0.0d0
+ vbas = 0.0d0
+ end select
+
+ end subroutine slipvelo
+
+!------------------------------------------------------------------------------------------
+
+ subroutine zerovelo(velowk,sigma,flag,stagthck,dusrfdew,dusrfdns,flwa,ubas,vbas,uvel,vvel,uflx,vflx,diffu)
+
+ !> Performs the velocity calculation. This subroutine is called with
+ !> different values of \texttt{flag}, depending on exactly what we want to calculate.
+
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+
+ type(glide_velowk), intent(inout) :: velowk
+ real(dp),dimension(:), intent(in) :: sigma
+ integer, intent(in) :: flag
+ real(dp),dimension(:,:), intent(in) :: stagthck
+ real(dp),dimension(:,:), intent(in) :: dusrfdew
+ real(dp),dimension(:,:), intent(in) :: dusrfdns
+ real(dp),dimension(:,:,:),intent(in) :: flwa
+ real(dp),dimension(:,:), intent(in) :: ubas
+ real(dp),dimension(:,:), intent(in) :: vbas
+ real(dp),dimension(:,:,:),intent(out) :: uvel
+ real(dp),dimension(:,:,:),intent(out) :: vvel
+ real(dp),dimension(:,:), intent(out) :: uflx
+ real(dp),dimension(:,:), intent(out) :: vflx
+ real(dp),dimension(:,:), intent(out) :: diffu
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+
+ real(dp),dimension(size(sigma)) :: hrzflwa, intflwa
+ real(dp),dimension(3) :: const
+
+ integer :: ew,ns,up,ewn,nsn,upn
+
+ !------------------------------------------------------------------------------------
+
+ upn=size(sigma) ; ewn=size(ubas,1) ; nsn=size(ubas,2)
+
+
+ !------------------------------------------------------------------------------------
+
+ select case(flag)
+ case(0)
+
+ do ns = 1,nsn
+ do ew = 1,ewn
+
+ if (stagthck(ew,ns) /= 0.0d0) then
+
+ ! Set velocity to zero at base of column
+
+ uvel(upn,ew,ns) = 0.0d0
+ vvel(upn,ew,ns) = 0.0d0
+
+ ! Get column profile of Glen's A
+
+ hrzflwa = flwa(:,ew,ns) + flwa(:,ew,ns+1) + flwa(:,ew+1,ns) + flwa(:,ew+1,ns+1)
+
+ ! Calculate coefficient for integration
+
+ const(1) = cflow * stagthck(ew,ns)**p1 * sqrt(dusrfdew(ew,ns)**2 + dusrfdns(ew,ns)**2)**p2
+
+ ! Do first step of finding u according to (8) in Payne and Dongelmans
+
+ do up = upn-1, 1, -1
+ uvel(up,ew,ns) = uvel(up+1,ew,ns) + const(1) * velowk%depth(up) * sum(hrzflwa(up:up+1))
+ end do
+
+ ! Calculate u diffusivity (?)
+
+ diffu(ew,ns) = vertintg(velowk,uvel(:,ew,ns)) * stagthck(ew,ns)
+
+ ! Complete calculation of u and v
+
+ vvel(:,ew,ns) = uvel(:,ew,ns) * dusrfdns(ew,ns) + vbas(ew,ns)
+ uvel(:,ew,ns) = uvel(:,ew,ns) * dusrfdew(ew,ns) + ubas(ew,ns)
+
+ ! Calculate ice fluxes
+
+ uflx(ew,ns) = diffu(ew,ns) * dusrfdew(ew,ns) + ubas(ew,ns) * stagthck(ew,ns)
+ vflx(ew,ns) = diffu(ew,ns) * dusrfdns(ew,ns) + vbas(ew,ns) * stagthck(ew,ns)
+
+ else
+
+ ! Where there is no ice, set everything to zero.
+
+ uvel(:,ew,ns) = 0.0d0
+ vvel(:,ew,ns) = 0.0d0
+ uflx(ew,ns) = 0.0d0
+ vflx(ew,ns) = 0.0d0
+ diffu(ew,ns) = 0.0d0
+
+ end if
+
+ end do
+ end do
+
+ case(1)
+
+ do ns = 1,nsn
+ do ew = 1,ewn
+ if (stagthck(ew,ns) /= 0.0d0) then
+
+ hrzflwa = flwa(:,ew,ns) + flwa(:,ew,ns+1) + flwa(:,ew+1,ns) + flwa(:,ew+1,ns+1)
+ intflwa(upn) = 0.0d0
+
+ do up = upn-1, 1, -1
+ intflwa(up) = intflwa(up+1) + velowk%depth(up) * sum(hrzflwa(up:up+1))
+ end do
+
+ velowk%dintflwa(ew,ns) = cflow * vertintg(velowk,intflwa)
+
+ else
+
+ velowk%dintflwa(ew,ns) = 0.0d0
+
+ end if
+ end do
+ end do
+
+ case(2)
+
+ where (stagthck /= 0.0d0)
+ diffu = velowk%dintflwa * stagthck**p4 * sqrt(dusrfdew**2 + dusrfdns**2)**p2
+ elsewhere
+ diffu = 0.0d0
+ end where
+
+ case(3)
+
+ do ns = 1,nsn
+ do ew = 1,ewn
+ if (stagthck(ew,ns) /= 0.0d0) then
+
+ vflx(ew,ns) = diffu(ew,ns) * dusrfdns(ew,ns) + vbas(ew,ns) * stagthck(ew,ns)
+ uflx(ew,ns) = diffu(ew,ns) * dusrfdew(ew,ns) + ubas(ew,ns) * stagthck(ew,ns)
+
+ uvel(upn,ew,ns) = ubas(ew,ns)
+ vvel(upn,ew,ns) = vbas(ew,ns)
+
+ hrzflwa = flwa(:,ew,ns) + flwa(:,ew,ns+1) + flwa(:,ew+1,ns) + flwa(:,ew+1,ns+1)
+
+ if (velowk%dintflwa(ew,ns) /= 0.0d0) then
+
+ const(2) = cflow * diffu(ew,ns) / velowk%dintflwa(ew,ns)/stagthck(ew,ns)
+
+ const(3) = const(2) * dusrfdns(ew,ns)
+ const(2) = const(2) * dusrfdew(ew,ns)
+ else
+ const(2:3) = 0.0d0
+ end if
+
+ do up = upn-1, 1, -1
+ const(1) = velowk%depth(up) * sum(hrzflwa(up:up+1))
+ uvel(up,ew,ns) = uvel(up+1,ew,ns) + const(1) * const(2)
+ vvel(up,ew,ns) = vvel(up+1,ew,ns) + const(1) * const(3)
+ end do
+
+ else
+
+ uvel(:,ew,ns) = 0.0d0
+ vvel(:,ew,ns) = 0.0d0
+ uflx(ew,ns) = 0.0d0
+ vflx(ew,ns) = 0.0d0
+
+ end if
+ end do
+ end do
+
+ end select
+
+ end subroutine zerovelo
+
+!------------------------------------------------------------------------------------------
+
+ subroutine glide_velo_vertical(model)
+
+ type(glide_global_type), intent(inout) :: model ! model instance
+
+ ! Compute the ice vertical velocity
+
+ ! This is a new subroutine created by combining calls to several existing subroutines.
+
+ ! Note: It is now called at the end of glide_tstep_p3, so that exact restart is easier.
+ ! In older versions of Glimmer the vertical velocity was computed at the start of
+ ! the temperature calculation in glide_tstep_p1.
+
+ ! Calculate time-derivatives of thickness and upper surface elevation ------------
+
+ call timeders(model%thckwk, &
+ model%geometry%thck, &
+ model%geomderv%dthckdtm, &
+ model%geometry%thck_index, &
+ model%numerics%time, &
+ 1)
+
+ call timeders(model%thckwk, &
+ model%geometry%usrf, &
+ model%geomderv%dusrfdtm, &
+ model%geometry%thck_index, &
+ model%numerics%time, &
+ 2)
+
+ ! Calculate the vertical velocity of the grid ------------------------------------
+
+ call gridwvel(model%numerics%sigma, &
+ model%numerics%thklim, &
+ model%velocity%uvel, &
+ model%velocity%vvel, &
+ model%geomderv, &
+ model%geometry%thck, &
+ model%velocity%wgrd)
+
+ ! Calculate the actual vertical velocity; method depends on whichwvel ------------
+
+ select case(model%options%whichwvel)
+
+ case(VERTINT_STANDARD) ! Usual vertical integration
+
+ call wvelintg(model%velocity%uvel, &
+ model%velocity%vvel, &
+ model%geomderv, &
+ model%numerics, &
+ model%velowk, &
+ model%velocity%wgrd(model%general%upn,:,:), &
+ model%geometry%thck, &
+ model%temper%bmlt, &
+ model%velocity%wvel)
+
+ case(VERTINT_KINEMATIC_BC) ! Vertical integration constrained so kinematic upper BC obeyed.
+
+ call wvelintg(model%velocity%uvel, &
+ model%velocity%vvel, &
+ model%geomderv, &
+ model%numerics, &
+ model%velowk, &
+ model%velocity%wgrd(model%general%upn,:,:), &
+ model%geometry%thck, &
+ model%temper% bmlt, &
+ model%velocity%wvel)
+
+ call chckwvel(model%numerics, &
+ model%geomderv, &
+ model%velocity%uvel(1,:,:), &
+ model%velocity%vvel(1,:,:), &
+ model%velocity%wvel, &
+ model%geometry%thck, &
+ model%climate% acab)
+
+ end select
+
+ ! apply periodic ew BC
+
+ if (model%options%periodic_ew) then
+ call wvel_ew(model)
+ end if
+
+ end subroutine glide_velo_vertical
+
+!---------------------------------------------------------------
+
+ subroutine timeders(thckwk,ipvr,opvr,mask,time,which)
+
+ !> Calculates the time-derivative of a field. This subroutine is used by
+ !> the Glimmer temperature solver only.
+
+ use glimmer_paramets, only : tim0
+ use glimmer_physcon, only: scyr
+
+ implicit none
+
+ type(glide_thckwk) :: thckwk !> Derived-type containing work data
+ real(dp), intent(out), dimension(:,:) :: opvr !> output (time derivative) field
+ real(dp), intent(in), dimension(:,:) :: ipvr !> input field
+ real(dp), intent(in) :: time !> current time
+ integer, intent(in), dimension(:,:) :: mask !> mask for calculation
+ integer, intent(in) :: which !> selector for stored field
+
+ real(dp) :: factor
+
+ factor = (time - thckwk%oldtime)
+ if (factor == 0.d0) then
+ opvr = 0.0d0
+ else
+ factor = 1.d0/factor
+ where (mask /= 0)
+ opvr = (tim0/scyr) * (ipvr - thckwk%olds(:,:,which)) * factor
+ elsewhere
+ opvr = 0.0d0
+ end where
+ end if
+
+ thckwk%olds(:,:,which) = ipvr
+
+ if (which == thckwk%nwhich) then
+ thckwk%oldtime = time
+ end if
+
+ end subroutine timeders
+
+!------------------------------------------------------------------------------------------
+
+ subroutine gridwvel(sigma,thklim,uvel,vvel,geomderv,thck,wgrd)
+
+ !> Calculates the vertical velocity of the grid, and returns it in \texttt{wgrd}. This
+ !> is necessary because the model uses a sigma coordinate system.
+ !> The equation for grid velocity is:
+ !> \[
+ !> \mathtt{wgrd}(x,y,\sigma)=\frac{\partial s}{\partial t}+\mathbf{U}\cdot\nabla s
+ !> -\sigma\left(\frac{\partial H}{\partial t}+\mathbf{U}\cdot\nabla H\right)
+ !> \]
+ !> Compare this with equation A1 in {\em Payne and Dongelmans}.
+
+ !TODO Change the name of subroutine gridwvel? It computes wgrd but not wvel.
+
+!! use parallel
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+
+ real(dp),dimension(:), intent(in) :: sigma !> Array holding values of sigma
+ !> at each vertical level
+ real(dp), intent(in) :: thklim !> Minimum thickness to be considered
+ !> when calculating the grid velocity.
+ !> This is in m, divided by \texttt{thk0}.
+ real(dp),dimension(:,:,:),intent(in) :: uvel !> The $x$-velocity field (scaled). Velocity
+ !> is on the staggered grid
+ real(dp),dimension(:,:,:),intent(in) :: vvel !> The $y$-velocity field (scaled). Velocity
+ !> is on the staggered grid
+ type(glide_geomderv), intent(in) :: geomderv !> Derived type holding temporal
+ !> and horizontal derivatives of
+ !> ice-sheet thickness and upper
+ !> surface elevation
+ real(dp),dimension(:,:), intent(in) :: thck !> Ice-sheet thickness (divided by
+ !> \texttt{thk0})
+ real(dp),dimension(:,:,:),intent(out) :: wgrd !> The grid velocity at each point. This
+ !> is the output.
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ integer :: ns,ew,nsn,ewn
+
+ !------------------------------------------------------------------------------------
+
+ ewn=size(wgrd,2) ; nsn=size(wgrd,3)
+
+ do ns = 2,nsn-1
+ do ew = 2,ewn-1
+ if (thck(ew,ns) > thklim) then
+ wgrd(:,ew,ns) = geomderv%dusrfdtm(ew,ns) - sigma * geomderv%dthckdtm(ew,ns) + &
+ ((uvel(:,ew-1,ns-1) + uvel(:,ew-1,ns) + uvel(:,ew,ns-1) + uvel(:,ew,ns)) * &
+ (sum(geomderv%dusrfdew(ew-1:ew,ns-1:ns)) - sigma * &
+ sum(geomderv%dthckdew(ew-1:ew,ns-1:ns))) + &
+ (vvel(:,ew-1,ns-1) + vvel(:,ew-1,ns) + vvel(:,ew,ns-1) + vvel(:,ew,ns)) * &
+ (sum(geomderv%dusrfdns(ew-1:ew,ns-1:ns)) - sigma * &
+ sum(geomderv%dthckdns(ew-1:ew,ns-1:ns)))) / 16.0d0
+ else
+ wgrd(:,ew,ns) = 0.0d0
+ end if
+ end do
+ end do
+
+ !WHL - Removed halo call. wgrd is needed only for the old temperature code, which is not supported in parallel.
+!! call parallel_halo(wgrd)
+
+ end subroutine gridwvel
+
+!------------------------------------------------------------------------------------------
+
+ subroutine wvelintg(uvel,vvel,geomderv,numerics,velowk,wgrd,thck,bmlt,wvel)
+
+ !> Calculates the vertical velocity field, which is returned in \texttt{wvel}.
+ !> This is found by doing this integration:
+ !> \[
+ !> w(\sigma)=-\int_{1}^{\sigma}\left[\frac{\partial \mathbf{U}}{\partial \sigma}
+ !> (\sigma) \cdot (\nabla s - \sigma \nabla H) +H\nabla \cdot \mathbf{U}(\sigma)\right]d\sigma
+ !> + w(1)
+ !> \]
+ !> (This is equation 13 in {\em Payne and Dongelmans}.) Note that this is only
+ !> done if the thickness is greater than the threshold given by \texttt{numerics\%thklim}.
+
+!! use parallel
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+
+ real(dp),dimension(:,:,:), intent(in) :: uvel !> The $x$-velocity on the
+ !> staggered grid (scaled)
+ real(dp),dimension(:,:,:), intent(in) :: vvel !> The $y$-velocity on the
+ !> staggered grid (scaled)
+ real(dp),dimension(:,:), intent(in) :: thck !> The ice thickness, divided
+ !> by \texttt{thk0}
+ type(glide_geomderv), intent(in) :: geomderv !> Derived type holding the
+ !> horizontal and temporal derivatives
+ !> of the thickness and upper surface
+ !> elevation.
+ type(glide_numerics), intent(in) :: numerics !> Derived type holding numerical
+ !> parameters, including sigma values.
+ type(glide_velowk), intent(inout) :: velowk !> Derived type holding working arrays
+ !> used by the subroutine
+ real(dp),dimension(:,:), intent(in) :: wgrd !> The grid vertical velocity at
+ !> the lowest model level.
+ real(dp),dimension(:,:), intent(in) :: bmlt !> Basal melt-rate (scaled?) This
+ !> is required in the basal boundary
+ !> condition. See {\em Payne and Dongelmans}
+ !> equation 14.
+ real(dp),dimension(:,:,:), intent(out) :: wvel !> The vertical velocity field.
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ real(dp) :: dew16, dns16 ! The grid-spacings multiplied by 16
+ real(dp),dimension(6) :: cons ! Holds temporary local values of derivatives
+ integer :: ns,ew,up ! Loop indicies
+ integer :: nsn,ewn,upn ! Domain sizes
+
+ !------------------------------------------------------------------------------------
+ ! Get some values for the domain size by checking sizes of input arrays
+ !------------------------------------------------------------------------------------
+
+ upn=size(uvel,1) ; ewn=size(uvel,2) ; nsn=size(uvel,3)
+
+
+ ! Multiply grid-spacings by 16 -----------------------------------------------------
+
+ dew16 = 1.d0/(16.0d0 * numerics%dew)
+ dns16 = 1.d0/(16.0d0 * numerics%dns)
+
+ ! ----------------------------------------------------------------------------------
+ ! Main loop over each grid-box
+ ! ----------------------------------------------------------------------------------
+
+ do ns = 2,nsn
+ do ew = 2,ewn
+ if (thck(ew,ns) > numerics%thklim) then
+
+ ! Set the bottom boundary condition ------------------------------------------
+
+ wvel(upn,ew,ns) = wgrd(ew,ns) - bmlt(ew,ns)
+
+ ! Calculate temporary local values of thickness and surface ------------------
+ ! elevation derivatives.
+
+ cons(1) = sum(geomderv%dusrfdew(ew-1:ew,ns-1:ns)) / 16.0d0
+ cons(2) = sum(geomderv%dthckdew(ew-1:ew,ns-1:ns)) / 16.0d0
+ cons(3) = sum(geomderv%dusrfdns(ew-1:ew,ns-1:ns)) / 16.0d0
+ cons(4) = sum(geomderv%dthckdns(ew-1:ew,ns-1:ns)) / 16.0d0
+ cons(5) = sum(geomderv%stagthck(ew-1:ew,ns-1:ns))
+ cons(6) = cons(5)*dns16
+ cons(5) = cons(5)*dew16
+ ! * better? (an alternative from TP's original code)
+ !cons(5) = (thck(ew-1,ns)+2.0d0*thck(ew,ns)+thck(ew+1,ns)) * dew16
+ !cons(6) = (thck(ew,ns-1)+2.0d0*thck(ew,ns)+thck(ew,ns+1)) * dns16
+
+ velowk%suvel(:) = uvel(:,ew-1,ns-1) + uvel(:,ew-1,ns) + uvel(:,ew,ns-1) + uvel(:,ew,ns)
+ velowk%svvel(:) = vvel(:,ew-1,ns-1) + vvel(:,ew-1,ns) + vvel(:,ew,ns-1) + vvel(:,ew,ns)
+
+ ! Loop over each model level, starting from the bottom ----------------------
+
+ do up = upn-1, 1, -1
+ wvel(up,ew,ns) = wvel(up+1,ew,ns) &
+ - velowk%dupsw(up) * cons(5) * (sum(uvel(up:up+1,ew,ns-1:ns)) - sum(uvel(up:up+1,ew-1,ns-1:ns))) &
+ - velowk%dupsw(up) * cons(6) * (sum(vvel(up:up+1,ew-1:ew,ns)) - sum(vvel(up:up+1,ew-1:ew,ns-1))) &
+ - (velowk%suvel(up+1) - velowk%suvel(up)) * (cons(1) - velowk%depthw(up) * cons(2)) &
+ - (velowk%svvel(up+1) - velowk%svvel(up)) * (cons(3) - velowk%depthw(up) * cons(4))
+ end do
+ else
+
+ ! If there isn't enough ice, set velocities to zero ----------------------------
+
+ wvel(:,ew,ns) = 0.0d0
+
+ end if
+ end do
+ end do
+
+ !WHL - Removed halo call, since wvel is needed only for the old temperature code, which is not supported in parallel.
+!! call parallel_halo(wvel)
+
+ end subroutine wvelintg
+
+ subroutine wvel_ew(model)
+
+ !> set periodic EW boundary conditions
+ implicit none
+ type(glide_global_type),intent(inout) :: model !> Ice model parameters.
+
+ model%velocity%wgrd(:,1,:) = model%velocity%wgrd(:,model%general%ewn-1,:)
+ model%velocity%wgrd(:,model%general%ewn,:) = model%velocity%wgrd(:,2,:)
+ model%velocity%wvel(:,1,:) = model%velocity%wvel(:,model%general%ewn-1,:)
+ model%velocity%wvel(:,model%general%ewn,:) = model%velocity%wvel(:,2,:)
+
+ end subroutine wvel_ew
+
+!------------------------------------------------------------------------------------------
+
+ subroutine chckwvel(numerics,geomderv,uvel,vvel,wvel,thck,acab)
+
+ !> Constrain the vertical velocity field to obey a kinematic upper boundary
+ !> condition.
+
+!! use parallel
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+
+ type(glide_numerics), intent(in) :: numerics !> Numerical parameters of model
+ type(glide_geomderv), intent(in) :: geomderv !> Temporal and horizontal derivatives
+ !> of thickness and upper ice surface
+ !> elevation.
+ real(dp),dimension(:,:), intent(in) :: uvel !> $x$ velocity field at top model
+ !> level (scaled, on staggered grid).
+ real(dp),dimension(:,:), intent(in) :: vvel !> $y$ velocity field at top model
+ !> level (scaled, on staggered grid).
+ real(dp),dimension(:,:,:),intent(inout) :: wvel !> Vertical velocity field,
+ real(dp),dimension(:,:), intent(in) :: thck !> Ice thickness (scaled)
+ real(dp),dimension(:,:), intent(in) :: acab !> Mass-balance (scaled)
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ real(dp) :: wchk
+ real(dp) :: tempcoef
+ integer :: ns,ew,nsn,ewn
+
+ ! Get array sizes -------------------------------------------------------------------
+
+ ewn=size(thck,1) ; nsn=size(thck,2)
+
+ ! Allocate temporary work array -----------------------------------------------------
+
+
+ ! Loop over all grid-boxes ----------------------------------------------------------
+
+ do ns = 2,nsn-1
+ do ew = 2,ewn-1
+ if (thck(ew,ns) > numerics%thklim .and. wvel(1,ew,ns) /= 0) then
+
+ wchk = geomderv%dusrfdtm(ew,ns) &
+ - acab(ew,ns) &
+ + (sum(uvel(ew-1:ew,ns-1:ns)) * sum(geomderv%dusrfdew(ew-1:ew,ns-1:ns)) &
+ + sum(vvel(ew-1:ew,ns-1:ns)) * sum(geomderv%dusrfdns(ew-1:ew,ns-1:ns))) &
+ / 16.0d0
+
+
+ tempcoef = wchk - wvel(1,ew,ns)
+
+ wvel(:,ew,ns) = wvel(:,ew,ns) + tempcoef * (1.0d0 - numerics%sigma)
+ end if
+ end do
+ end do
+
+ !WHL - Removed halo call, since wvel is needed only for the old temperature code, which is not supported in parallel.
+!! call parallel_halo(wvel)
+
+ end subroutine chckwvel
+
+!------------------------------------------------------------------------------------------
+! PRIVATE subroutines
+!------------------------------------------------------------------------------------------
+
+!TODO - Remove function vertintg? Not currently used (glam_strs2 has its own version).
+
+ function vertintg(velowk,in)
+
+ !> Performs a depth integral using the trapezium rule.
+ !*RV The value of in integrated over depth.
+
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+
+ type(glide_velowk), intent(inout) :: velowk !> Work arrays and things for this module
+ real(dp),dimension(:),intent(in) :: in !> Input array of vertical velocities (size = upn)
+ real(dp) :: vertintg
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ integer :: up, upn
+
+ ! Set up array of sigma intervals, if not done already ------------------------------
+
+ upn=size(in)
+
+ ! Do integration --------------------------------------------------------------------
+
+ vertintg = 0.0d0
+
+ do up = upn-1, 1, -1
+ vertintg = vertintg + (in(up)+in(up+1)) * velowk%dups(up)
+ end do
+
+ vertintg = 0.5d0*vertintg
+
+ end function vertintg
+
+!------------------------------------------------------------------------------------------
+
+ subroutine calc_btrc(model,flag,btrc)
+
+ !> Calculate the value of $B$ used for basal sliding calculations.
+
+ use glimmer_physcon, only : rhoo, rhoi
+ use glimmer_paramets, only : len0, thk0, scyr, vel0
+ implicit none
+
+ type(glide_global_type) :: model !> model instance
+ integer, intent(in) :: flag !> Flag to select method of
+ real(dp),dimension(:,:),intent(out) :: btrc !> Array of values of $B$.
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ real(dp) :: stagbwat, stagbmlt
+ integer :: ew,ns,nsn,ewn
+ real(dp) :: Asl = 1.8d-10 !in units N^-3 yr^-1 m^8 for case(5)
+ real(dp) :: Z !accounts for reduced basal traction due to pressure of
+ !subglacial water for case(5)
+ real(dp) :: tau !basal shear stress
+
+ !scaling
+ real(dp) :: tau_factor = 1.d-3*thk0*thk0/len0
+ !real(dp) :: tau_factor = 1.0d0
+ !------------------------------------------------------------------------------------
+
+ ewn=model%general%ewn
+ nsn=model%general%nsn
+
+ !------------------------------------------------------------------------------------
+
+ select case(flag)
+
+ case(BTRC_CONSTANT)
+ ! constant everywhere
+ ! This option is used for EISMINT-2 experiment G
+ btrc = model%velocity%bed_softness
+
+ case(BTRC_CONSTANT_BWAT)
+ ! constant where basal melt water is present, else = 0
+ ! This option can be used for EISMINT-2 experiment H, provided that
+ ! basal water is present where T = Tpmp (e.g., BWATER_LOCAL)
+
+ do ns = 1,nsn-1
+ do ew = 1,ewn-1
+ if (0.0d0 < model%temper%stagbwat(ew,ns)) then
+ btrc(ew,ns) = model%velocity%bed_softness(ew,ns)
+ else
+ btrc(ew,ns) = 0.0d0
+ end if
+ end do
+ end do
+
+ case(BTRC_CONSTANT_TPMP)
+ ! constant where basal temperature equal to pressure melting point, else = 0
+ ! This is the actual condition for EISMINT-2 experiment H, which may not be
+ ! the same as case BTRC_CONSTANT_BWAT above, depending on the hydrology
+
+ do ns = 1,nsn-1
+ do ew = 1,ewn-1
+ if (abs(model%temper%stagbpmp(ew,ns) - model%temper%stagbtemp(ew,ns))<0.001) then
+ btrc(ew,ns) = model%velocity%bed_softness(ew,ns)
+ else
+ btrc(ew,ns) = 0.0d0
+ end if
+ end do
+ end do
+
+ case(BTRC_LINEAR_BMLT)
+ ! linear function of basal melt rate
+
+ do ns = 1,nsn-1
+ do ew = 1,ewn-1
+ stagbmlt = 0.25d0*sum(model%temper%bmlt(ew:ew+1,ns:ns+1))
+
+ if (stagbmlt > 0.0d0) then
+ btrc(ew,ns) = min(model%velowk%btrac_max, &
+ model%velocity%bed_softness(ew,ns) + model%velowk%btrac_slope*stagbmlt)
+ else
+ btrc(ew,ns) = 0.0d0
+ end if
+ end do
+ end do
+
+ case(BTRC_TANH_BWAT)
+ ! tanh function of basal water depth
+ ! The 'velowk%c' parameters are derived above from the 5-part parameter bpar
+
+ do ns = 1,nsn-1
+ do ew = 1,ewn-1
+ if (0.0d0 < model%temper%stagbwat(ew,ns)) then
+
+ btrc(ew,ns) = model%velowk%c(1) + model%velowk%c(2) * tanh(model%velowk%c(3) * &
+ model%temper%stagbwat(ew,ns) - model%velowk%c(4))
+
+ if (0.0d0 > sum(model%isostasy%relx(ew:ew+1,ns:ns+1))) then
+ btrc(ew,ns) = btrc(ew,ns) * model%velowk%marine
+ end if
+ else
+ btrc(ew,ns) = 0.0d0
+ end if
+ end do
+ end do
+
+!WHL - I'm not aware of anyone using this parameterization. Commented out for now.
+!! case(6)
+!! ! increases with the third power of the basal shear stress, from Huybrechts
+
+!! Asl = model%climate%slidconst
+!! do ns = 1, nsn-1
+!! do ew = 1, ewn-1
+!NOTE - Scaling looks wrong here: stagthck and thklim should have the same scaling.
+!! if ((model%geomderv%stagthck(ew,ns)*thk0) > model%numerics%thklim) then
+!! if((model%geomderv%stagtopg(ew,ns)*thk0) > (model%climate%eus*thk0)) then
+!! Z = model%geomderv%stagthck(ew,ns)*thk0
+!! else
+!! Z = model%geomderv%stagthck(ew,ns)*thk0 + rhoi*((model%geomderv%stagtopg(ew,ns) *thk0 &
+!! - model%climate%eus*thk0)/ rhoo)
+!! end if
+
+!! if(Z <= model%numerics%thklim) then !avoid division by zero
+!! Z = model%numerics%thklim
+!! end if
+
+!! tau = ((tau_factor*model%stress%tau_x(ew,ns))**2 +&
+!! (model%stress%tau_y(ew,ns)*tau_factor)**2)**(0.5d0)
+
+!! btrc(ew,ns) = (Asl*(tau)**2)/Z !assuming that that btrc is later
+!! !multiplied again by the basal shear stress
+
+!! end if
+!! end do
+!! end do
+
+ case default ! includes BTRC_ZERO
+ ! zero everywhere
+ ! This is used for EISMINT-2 experiments A to F
+ btrc = 0.0d0
+
+ end select
+
+ end subroutine calc_btrc
+
+!TODO - Remove one of the two versions of calc_basal_shear?
+
+#ifdef JEFFORIG
+ subroutine calc_basal_shear(model)
+ !> calculate basal shear stress: tau_{x,y} = -rho_i*g*H*d(H+h)/d{x,y}
+ use glimmer_physcon, only : rhoi,grav
+ implicit none
+ type(glide_global_type) :: model !> model instance
+
+
+ model%velocity%tau_x = -rhoi*grav*model%geomderv%stagthck
+ model%velocity%tau_y = model%velocity%tau_x * model%geomderv%dusrfdns
+ model%velocity%tau_x = model%velocity%tau_x * model%geomderv%dusrfdew
+ end subroutine calc_basal_shear
+#endif
+
+ subroutine calc_basal_shear(stagthck, dusrfdew, dusrfdns, tau_x, tau_y)
+
+ ! calculate basal shear stress: tau_{x,y} = -rho_i*g*H*d(H+h)/d{x,y}
+ use glimmer_physcon, only : rhoi,grav
+
+ implicit none
+ real(dp),dimension(:,:),intent(in) :: stagthck !> Ice thickness (scaled)
+ real(dp),dimension(:,:),intent(in) :: dusrfdew, dusrfdns
+ real(dp),dimension(:,:),intent(out) :: tau_x
+ real(dp),dimension(:,:),intent(out) :: tau_y
+
+ tau_x(:,:) = -rhoi*grav*stagthck(:,:)
+ tau_y(:,:) = tau_x * dusrfdns(:,:)
+ tau_x(:,:) = tau_x * dusrfdew(:,:)
+
+ !JEFF Are these replaced by the three lines above? They are not compiling. 7/28/11
+ ! model%stress%tau_x = -rhoi*grav*model%geomderv%stagthck
+ ! model%stress%tau_y = model%stress%tau_x * model%geomderv%dusrfdns
+ ! model%stress%tau_x = model%stress%tau_x * model%geomderv%dusrfdew
+
+ end subroutine calc_basal_shear
+
+!-------------------------------------------------------------------
+
+end module glide_velo
+
+!-------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglide/glide_vertint.F90 b/components/cism/glimmer-cism/libglide/glide_vertint.F90
new file mode 100644
index 0000000000..8e413f779e
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/glide_vertint.F90
@@ -0,0 +1,164 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glide_vertint.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+!TODO - Remove module glide_vertint? Not currently used.
+
+module glide_vertint
+
+ !> This module contains routines to vertically integrate fields
+ !> All 3d fields are assumed to use the (z,x,y) coordinate system,
+ !> where the top is the minimum z and the bottom is the maximum z.
+
+ use glimmer_global , only: dp
+ implicit none
+
+contains
+
+ !> Performs vertical integration, places the result on a 3d field
+ !> where each level in the 3d field is the integral of all levels
+ !> above it
+
+ subroutine vertint_output3d(infield, outfield, levels, topdown, initial_value)
+ real(dp), dimension(:,:,:), intent(in) :: infield
+ real(dp), dimension(:,:,:), intent(out) :: outfield
+ real(dp), dimension(:), intent(in) :: levels
+ logical :: topdown !> Controls the direction of integration. If true,
+ !> outfield(1,:,:) contains zeros and each level
+ !> below it accumulates another part of the
+ !> integral. If false, outfield(upn,:,:) contains
+ !> zeros and each level above it accumulates
+ !> another part of the integral
+ real(dp), dimension(:,:), intent(in), optional :: initial_value
+
+ integer :: upn
+ integer :: i
+ integer :: lower, upper, step !Loop control, parameterized based on
+ !value of topdown
+
+ real(dp) :: deltax
+
+ upn = size(levels)
+ if (topdown) then
+ lower = 2
+ upper = upn
+ step = 1
+ else
+ lower = upn-1
+ upper = 1
+ step = -1
+ end if
+
+ if (present(initial_value)) then
+ outfield(lower - step,:,:) = initial_value
+ else
+ outfield(lower - step,:,:) = 0
+ end if
+
+
+ do i = lower, upper, step
+ deltax = step*(levels(i) - levels(i - step))
+ !Apply trapezoid rule
+ outfield(i,:,:) = outfield(i - step,:,:) + .5 * deltax*(infield(i - step,:,:) + infield(i,:,:))
+ end do
+ end subroutine vertint_output3d
+
+ subroutine vertint_output2d(infield, outfield, levels, initial_value)
+ !> Vertically integrates the 3D field and places the result of the
+ !> integral on a 2D field
+ real(dp), dimension(:,:,:), intent(in) :: infield
+ real(dp), dimension(:,:), intent(out) :: outfield
+ real(dp), dimension(:), intent(in) :: levels
+
+ real(dp), dimension(:,:), intent(in), optional :: initial_value
+
+ integer :: upn
+ integer :: i
+ real(dp) :: deltax
+
+ upn = size(levels)
+
+ if (present(initial_value)) then
+ outfield = initial_value
+ else
+ outfield = 0
+ end if
+
+ do i = 2, upn
+ deltax = levels(i) - levels(i - 1)
+ outfield = outfield + .5 * deltax*(infield(i-1,:,:) + infield(i,:,:))
+ end do
+ end subroutine
+
+
+ !Contained unit test cases
+ !Based around evaluation of the integral of x^2dx from 0 to 1.
+ subroutine test_vertint()
+ real(dp), dimension(11) :: levels
+ real(dp), dimension(11,1,1) :: values
+ real(dp), dimension(1,1) :: answer
+ real(dp), dimension(1,1) :: ival
+
+ integer :: i
+ real(dp) :: val
+
+
+ !Test case where we have evenly spaced levels
+ val = 0
+ do i = 1,11
+ levels(i) = val
+ values(i,1,1) = val ** 2
+ val = val + .1
+ write(*,*) levels(i),values(i,1,1)
+ end do
+
+ ival = 0
+
+ call vertint_output2d(values, answer, levels, ival)
+ write(*,*) answer(1,1)
+
+ !Test case where we do not have evenly spaced levels
+ levels(1) = 0
+ levels(2) = .2
+ levels(3) = .4
+ levels(4) = .5
+ levels(5) = .6
+ levels(6) = .7
+ levels(7) = .8
+ levels(8) = .85
+ levels(9) = .9
+ levels(10) = .95
+ levels(11) = 1
+ do i = 1,11
+ values(i,1,1) = levels(i) ** 2
+ write(*,*) levels(i),values(i,1,1)
+ end do
+ ival = 0
+
+ call vertint_output2d(values, answer, levels, ival)
+ write(*,*) answer(1,1)
+
+ end subroutine
+end module glide_vertint
diff --git a/components/cism/glimmer-cism/libglide/isostasy.F90 b/components/cism/glimmer-cism/libglide/isostasy.F90
new file mode 100644
index 0000000000..f75dc6fb05
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/isostasy.F90
@@ -0,0 +1,213 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! isostasy.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module isostasy
+
+ !TODO - Test the isostasy for parallel simulations.
+ ! Elastic lithosphere will not easily parallelize, but local lithosphere should be OK.
+
+ !> calculate isostatic adjustment due to changing surface loads
+
+ use glimmer_global, only : dp
+ use isostasy_elastic
+
+ implicit none
+
+ private :: relaxing_mantle
+
+!-------------------------------------------------------------------------
+
+contains
+
+!-------------------------------------------------------------------------
+
+ subroutine init_isostasy(model)
+
+ !> initialise isostasy calculations
+ use parallel
+ use glide_types
+ use glimmer_physcon, only: scyr
+ use glimmer_paramets, only: tim0
+ implicit none
+
+ type(glide_global_type) :: model
+
+ if (model%isostasy%lithosphere == LITHOSPHERE_ELASTIC) then
+ call not_parallel(__FILE__,__LINE__)
+ call init_elastic(model%isostasy%rbel,model%numerics%dew)
+ end if
+
+ model%isostasy%next_calc = model%numerics%tstart
+ model%isostasy%relaxed_tau = model%isostasy%relaxed_tau * scyr / tim0
+
+ end subroutine init_isostasy
+
+!-------------------------------------------------------------------------
+
+ subroutine isos_icewaterload(model)
+
+ !> calculate surface load factors due to water and ice distribution
+
+ use glimmer_physcon
+ use glide_types
+ implicit none
+
+ type(glide_global_type) :: model
+
+ real(dp) :: ice_mass, water_depth, water_mass
+ integer :: ew,ns
+
+ do ns=1,model%general%nsn
+ do ew=1,model%general%ewn
+ ice_mass = rhoi * model%geometry%thck(ew,ns)
+
+ if (model%geometry%topg(ew,ns) - model%climate%eus < 0.d0) then ! check if we are below sea level
+
+ water_depth = model%climate%eus - model%geometry%topg(ew,ns)
+ water_mass = rhoo * water_depth
+
+ ! Just the water load due to changes in sea-level
+ model%isostasy%load_factors(ew,ns) = rhoo* model%climate%eus/rhom
+
+ ! Check if ice is not floating
+ if ( ice_mass > water_mass ) then
+ model%isostasy%load_factors(ew,ns) = model%isostasy%load_factors(ew,ns) + (ice_mass - water_mass)/rhom
+ end if
+
+ else ! bedrock is above sea level
+
+ model%isostasy%load_factors(ew,ns) = ice_mass/rhom
+
+ end if
+
+ end do
+ end do
+
+ end subroutine isos_icewaterload
+
+!-------------------------------------------------------------------------
+
+ subroutine isos_compute(model)
+
+ !> calculate isostatic adjustment due to changing surface loads
+
+ use glide_types
+ implicit none
+
+ type(glide_global_type) :: model
+
+ ! update load if necessary
+ if (model%isostasy%new_load) then
+ call isos_lithosphere(model, model%isostasy%load, model%isostasy%load_factors)
+ ! update bedrock with (non-viscous) fluid mantle
+ if (model%isostasy%asthenosphere == ASTHENOSPHERE_FLUID) then
+ model%geometry%topg = model%isostasy%relx - model%isostasy%load
+ end if
+ model%isostasy%new_load = .false.
+ end if
+
+ ! update bedrock with relaxing mantle
+ if (model%isostasy%asthenosphere == ASTHENOSPHERE_RELAXING) then
+ call relaxing_mantle(model)
+ end if
+
+ end subroutine isos_compute
+
+!-------------------------------------------------------------------------
+
+ subroutine isos_lithosphere(model,load,load_factors)
+
+ use glide_types
+ implicit none
+ type(glide_global_type) :: model
+ real(dp), dimension(:,:), intent(out) :: load !> loading effect due to load_factors
+ real(dp), dimension(:,:), intent(in) :: load_factors !> load mass divided by mantle density
+
+ if (model%isostasy%lithosphere == LITHOSPHERE_LOCAL) then
+ load = load_factors
+ else if (model%isostasy%lithosphere == LITHOSPHERE_ELASTIC) then
+ call calc_elastic(model%isostasy%rbel, load, load_factors)
+ end if
+
+ end subroutine isos_lithosphere
+
+!-------------------------------------------------------------------------
+
+ subroutine isos_relaxed(model)
+
+ !> Calculate the relaxed topography, assuming the isostatic depression
+ !> is the equilibrium state for the current topography.
+
+ use glide_types
+ implicit none
+ type(glide_global_type) :: model
+
+ ! Calculate the load
+ call isos_icewaterload(model)
+
+ ! Apply lithosphere model
+ call isos_lithosphere(model, model%isostasy%load, model%isostasy%load_factors)
+
+ ! Add to present topography to get relaxed topography
+ model%isostasy%relx = model%geometry%topg + model%isostasy%load
+
+ end subroutine isos_relaxed
+
+!-------------------------------------------------------------------------
+! private subroutines
+!-------------------------------------------------------------------------
+
+ subroutine relaxing_mantle(model)
+
+ !> approximate mantle with a relaxing half-space: dh/dt=-1/tau*(w-h)
+ use glide_types
+ implicit none
+ type(glide_global_type) :: model
+
+ integer :: ew,ns
+ real(dp) :: ft1, ft2
+
+ ft1 = exp(-model%numerics%dt/model%isostasy%relaxed_tau)
+ ft2 = 1.d0 - ft1
+
+ do ns=1,model%general%nsn
+ do ew=1,model%general%ewn
+ model%geometry%topg(ew,ns) = ft2 * (model%isostasy%relx(ew,ns) - model%isostasy%load(ew,ns)) &
+ + ft1 * model%geometry%topg(ew,ns)
+ end do
+ end do
+
+ end subroutine relaxing_mantle
+
+!-------------------------------------------------------------------------
+
+end module isostasy
+
+!-------------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglide/isostasy_elastic.F90 b/components/cism/glimmer-cism/libglide/isostasy_elastic.F90
new file mode 100644
index 0000000000..2d356cb761
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/isostasy_elastic.F90
@@ -0,0 +1,218 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! isostasy_elastic.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module isostasy_elastic
+
+ !> handle elastic lithosphere
+
+ !NOTE: This works for serial simulations only.
+
+ use glimmer_global, only : dp
+ use glide_types, only: isos_elastic
+
+ implicit none
+
+ real(dp), private, parameter :: r_lr = 6.d0 ! influence of disk load at (0,0) is felt within a radius of rbel_r_lr*rbel_r
+
+ private :: init_rbel, rbel_ow, rbel_iw
+
+!-------------------------------------------------------------------------
+
+contains
+
+!-------------------------------------------------------------------------
+
+ subroutine init_elastic(rbel, deltax)
+
+ !> initialise elastic lithosphere calculations
+ use glimmer_physcon, only : pi
+ implicit none
+ type(isos_elastic) :: rbel !> structure holding elastic litho data
+ real(dp), intent(in) :: deltax !> grid spacing
+
+ ! local variables
+ real(dp) :: a ! radius of disk
+ real(dp) :: r ! distance from centre
+ integer :: i,j
+
+ ! calculate a so that a circle of radius a is equivalent to a square with size deltax
+ a = deltax/sqrt(pi)
+
+ ! initialise w
+ call init_rbel(rbel, a)
+
+ ! calculate size of operator
+ rbel%wsize = int(r_lr*rbel%lr/deltax)
+
+ ! allocate memory for operator
+ allocate(rbel%w(0:rbel%wsize,0:rbel%wsize))
+
+ ! calculating points within disk
+ rbel%w(0,0) = rbel_iw(rbel,0.d0)
+ r = deltax/rbel%lr
+ rbel%w(0,1) = rbel_iw(rbel,r)
+ rbel%w(1,0) = rbel%w(0,1)
+
+ ! calculating points outside disk
+ do j=0,rbel%wsize
+ do i=2,rbel%wsize
+ r = deltax * sqrt(real(i)**2 + real(j)**2)/rbel%lr
+ rbel%w(i,j) = rbel_ow(rbel,r)
+ end do
+ end do
+
+ do j=2,rbel%wsize
+ do i=0,1
+ r = deltax * sqrt(real(i)**2 + real(j)**2)/rbel%lr
+ rbel%w(i,j) = rbel_ow(rbel,r)
+ end do
+ end do
+
+ i=1
+ j=1
+ r = deltax * sqrt(real(i)**2 + real(j)**2)/rbel%lr
+ rbel%w(i,j) = rbel_ow(rbel,r)
+
+#ifdef DEB_REBOUND
+ open(1,file='w.dat',status='UNKNOWN')
+ do j=0,rbel%wsize
+ do i=0,rbel%wsize
+ write(1,*) i,j,rbel%w(i,j)
+ end do
+ end do
+ close(1)
+#endif
+
+ !rbel%w=rbel%w/len0
+
+ end subroutine init_elastic
+
+!-------------------------------------------------------------------------
+
+ subroutine calc_elastic(rbel,load,load_factors)
+
+ !> calculate surface loading effect using elastic lithosphere approximation
+ implicit none
+ type(isos_elastic) :: rbel !> structure holding elastic litho data
+ real(dp), dimension(:,:), intent(out) :: load !> loading effect due to load_factors
+ real(dp), dimension(:,:), intent(in) :: load_factors !> load mass divided by mantle density
+
+ integer ewn,nsn
+ integer i,j,n,m
+
+ ewn = size(load,1)
+ nsn = size(load,2)
+
+ load = 0.d0
+ do j=1,nsn
+ do i=1,ewn
+ do n=max(1,j-rbel%wsize),min(nsn,j+rbel%wsize)
+ do m=max(1,i-rbel%wsize),min(ewn,i+rbel%wsize)
+ load(i,j) = load(i,j) + load_factors(m,n) * rbel%w(abs(m-i),abs(n-j))
+ end do
+ end do
+ end do
+ end do
+
+ end subroutine calc_elastic
+
+!-------------------------------------------------------------------------
+
+ subroutine finalise_elastic(rbel)
+ !> clean-up data structure
+ implicit none
+ type(isos_elastic) :: rbel !> structure holding elastic litho data
+
+ deallocate(rbel%w)
+ end subroutine finalise_elastic
+
+!-------------------------------------------------------------------------
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! private subroutines
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ subroutine init_rbel(rbel, a)
+
+ !> initialise elastic lithosphere calculations
+ use glimmer_paramets, only: len0
+ use glimmer_physcon, only: rhom,grav
+ use isostasy_kelvin
+ implicit none
+ type(isos_elastic) :: rbel !> structure holding elastic litho data
+ real(dp), intent(in) :: a !> radius of disk
+
+ real(dp) :: dummy_a
+
+ call set_kelvin(1.d-10,40)
+
+ rbel%lr = ((rbel%d/(rhom*grav))**0.25d0)/len0
+ rbel%a = a
+
+ dummy_a = rbel%a/rbel%lr
+
+ rbel%c1 = dummy_a * dker(dummy_a)
+ rbel%c2 = -dummy_a * dkei(dummy_a)
+ rbel%cd3 = dummy_a * dber(dummy_a)
+ rbel%cd4 = -dummy_a * dbei(dummy_a)
+
+ end subroutine init_rbel
+
+!-------------------------------------------------------------------------
+
+ function rbel_ow(rbel,r)
+ use isostasy_kelvin
+ !> calculating deflection outside disk
+ implicit none
+ real(dp) :: rbel_ow
+ real(dp), intent(in) :: r !> radius, r should be scaled with lr
+ type(isos_elastic) :: rbel !> structure holding elastic litho data
+
+ rbel_ow = rbel%cd3*ker(r) + rbel%cd4*kei(r)
+ end function rbel_ow
+
+!-------------------------------------------------------------------------
+
+ function rbel_iw(rbel,r)
+ use isostasy_kelvin
+ !> calculating deflection inside disk
+ implicit none
+ real(dp) :: rbel_iw
+ real(dp), intent(in) :: r !> radius, r should be scaled with lr
+ type(isos_elastic) :: rbel !> structure holding elastic litho data
+
+ rbel_iw = 1.d0 + rbel%c1*ber(r) + rbel%c2*bei(r)
+ end function rbel_iw
+
+!-------------------------------------------------------------------------
+
+end module isostasy_elastic
+
+!-------------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglide/isostasy_kelvin.F90 b/components/cism/glimmer-cism/libglide/isostasy_kelvin.F90
new file mode 100644
index 0000000000..7f7b147a92
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/isostasy_kelvin.F90
@@ -0,0 +1,414 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! isostasy_kelvin.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> module for calculating zeroth order Kelvin functions and their derivatives.
+!! Both single and double precision versions are provided
+!!
+!! \author Magnus Hagdorn
+!! \date June 2000
+
+module isostasy_kelvin
+
+ use glimmer_global, only: sp, dp
+ use glimmer_physcon, only: pi
+ implicit none
+
+ real(kind=dp), private, parameter :: gamma=0.577215664901532860606512d0 !< Euler's constant
+ integer, private :: j_max = 40 !< maximum number of iterations
+ real(kind=dp), private :: tolerance = 1.d-10 !< the tolerance
+
+ interface ber
+ module procedure d_ber, s_ber
+ end interface
+ interface bei
+ module procedure d_bei, s_bei
+ end interface
+ interface ker
+ module procedure d_ker, s_ker
+ end interface
+ interface kei
+ module procedure d_kei, s_kei
+ end interface
+
+ interface dber
+ module procedure d_dber, s_dber
+ end interface
+ interface dbei
+ module procedure d_dbei, s_dbei
+ end interface
+ interface dker
+ module procedure d_dker, s_dker
+ end interface
+ interface dkei
+ module procedure d_dkei, s_dkei
+ end interface
+
+contains
+ !> set tolerance and maximum number of iterations
+ subroutine set_kelvin(tol, jmax)
+ implicit none
+ real(kind=dp), intent(in) :: tol
+ integer, intent(in) :: jmax
+ j_max = jmax
+ tolerance = tol
+ end subroutine set_kelvin
+
+ function d_ber(x)
+ implicit none
+ real(kind=dp) :: d_ber
+ real(kind=dp), intent(in) :: x
+
+ real(kind=dp) :: arg, arg_d
+ real(kind=dp) :: p_d_ber
+ real(kind=dp) :: factorial
+ real(kind=dp) :: sign
+ integer :: j
+
+ p_d_ber = 0.d0
+ factorial = 1.d0
+
+ d_ber = 1.d0
+ arg = (x/2.d0)**4
+ arg_d = arg
+ sign = -1.d0
+
+ j=1
+ do while (j < j_max)
+ p_d_ber = d_ber
+ factorial = factorial*2*j*(2*j-1.d0)
+ d_ber = d_ber + sign*arg_d/(factorial*factorial)
+ if (abs(d_ber-p_d_ber) < tolerance) exit
+ arg_d = arg_d*arg
+ sign = -sign
+ j = j+1
+ end do
+ end function d_ber
+
+ function d_bei(x)
+ implicit none
+ real(kind=dp) :: d_bei
+ real(kind=dp), intent(in) :: x
+
+ real(kind=dp) :: arg, arg_d
+ real(kind=dp) :: p_d_bei
+ real(kind=dp) :: factorial
+ real(kind=dp) :: sign
+ integer :: j
+
+ p_d_bei = 1.d12
+ factorial = 1.d0
+
+ arg = (x/2.d0)**2
+ d_bei = arg
+ arg_d = arg*arg*arg
+ arg = arg*arg
+ sign = -1.d0
+
+ j=1
+ do while (j < j_max)
+ p_d_bei = d_bei
+ factorial = factorial*2*j*(2*j+1.d0)
+ d_bei = d_bei + sign*arg_d/(factorial*factorial)
+ if (abs(d_bei-p_d_bei) < tolerance) exit
+ arg_d = arg_d*arg
+ sign = -sign
+ j = j+1
+ end do
+ end function d_bei
+
+ function d_ker(x)
+ implicit none
+ real(kind=dp) :: d_ker
+ real(kind=dp), intent(in) :: x
+
+ real(kind=dp) :: arg, arg_d
+ real(kind=dp) :: p_d_ker
+ real(kind=dp) :: factorial
+ real(kind=dp) :: phi
+ real(kind=dp) :: sign
+ integer :: j
+
+ p_d_ker = 0.d0
+ factorial = 1.d0
+
+ arg = (x/2.d0)**4
+ arg_d = arg
+ sign = -1.d0
+ phi = 0.d0
+ d_ker = -(log(x/2.d0)+gamma)*d_ber(x)+(pi/4.d0)*d_bei(x)
+
+ j=1
+ do while (j < j_max)
+ p_d_ker = d_ker
+ factorial = factorial*2*j*(2*j-1.d0)
+ phi = phi + 1.d0/(2.d0*j-1.d0) + 1.d0/(2.d0*j)
+ d_ker = d_ker + sign*phi*arg_d/(factorial*factorial)
+ if (abs(d_ker-p_d_ker) < tolerance) exit
+ arg_d = arg_d*arg
+ sign = -sign
+ j = j+1
+ end do
+ end function d_ker
+
+ function d_kei(x)
+ implicit none
+ real(kind=dp) :: d_kei
+ real(kind=dp), intent(in) :: x
+
+ real(kind=dp) :: arg, arg_d
+ real(kind=dp) :: p_d_kei
+ real(kind=dp) :: factorial
+ real(kind=dp) :: phi
+ real(kind=dp) :: sign
+ integer :: j
+
+ p_d_kei = 0.d0
+ factorial = 1.d0
+
+ arg = (x/2.d0)**2
+ sign = -1.d0
+ phi = 1.d0
+ d_kei = -(log(x/2.d0)+gamma)*d_bei(x)-(pi/4.d0)*d_ber(x)+arg
+ arg_d = arg
+ arg = arg*arg
+ arg_d = arg_d*arg
+
+ j=1
+ do while (j < j_max)
+ p_d_kei = d_kei
+ factorial = factorial*2*j*(2*j+1.d0)
+ phi = phi + 1.d0/(2.d0*j+1.d0) + 1.d0/(2.d0*j)
+ d_kei = d_kei + sign*phi*arg_d/(factorial*factorial)
+ if (abs(d_kei-p_d_kei) < tolerance) exit
+ arg_d = arg_d*arg
+ sign = -sign
+ j = j+1
+ end do
+ end function d_kei
+
+ function s_ber(x)
+ implicit none
+ real(kind=sp) :: s_ber
+ real(kind=sp), intent(in) :: x
+
+ s_ber = real(d_ber(real(x,kind=dp)),kind=sp)
+ end function s_ber
+
+ function s_bei(x)
+ implicit none
+ real(kind=sp) :: s_bei
+ real(kind=sp), intent(in) :: x
+
+ s_bei = real(d_bei(real(x,kind=dp)),kind=sp)
+ end function s_bei
+
+ function s_ker(x)
+ implicit none
+ real(kind=sp) :: s_ker
+ real(kind=sp), intent(in) :: x
+
+ s_ker = real(d_ker(real(x,kind=dp)),kind=sp)
+ end function s_ker
+
+ function s_kei(x)
+ implicit none
+ real(kind=sp) :: s_kei
+ real(kind=sp), intent(in) :: x
+
+ s_kei = real(d_kei(real(x,kind=dp)),kind=sp)
+ end function s_kei
+
+ function d_dber(x)
+ implicit none
+ real(kind=dp) :: d_dber
+ real(kind=dp), intent(in) :: x
+
+ real(kind=dp) :: arg, arg_d
+ real(kind=dp) :: p_d_dber
+ real(kind=dp) :: factorial
+ real(kind=dp) :: sign
+ integer :: j
+
+ p_d_dber = 0.d0
+ factorial = 1.d0
+
+ d_dber = 0.d0
+ arg = (x/2.d0)**4
+ arg_d = (x/2.d0)**3
+ sign = -1.d0
+
+ j=1
+ do while (j < j_max)
+ p_d_dber = d_dber
+ factorial = factorial*2*j*(2*j-1.d0)
+ d_dber = d_dber + sign*2.d0*j*arg_d/(factorial*factorial)
+ if (abs(d_dber-p_d_dber) < tolerance) exit
+ arg_d = arg_d*arg
+ sign = -sign
+ j = j+1
+ end do
+ end function d_dber
+
+ function d_dbei(x)
+ implicit none
+ real(kind=dp) :: d_dbei
+ real(kind=dp), intent(in) :: x
+
+ real(kind=dp) :: arg, arg_d
+ real(kind=dp) :: p_d_dbei
+ real(kind=dp) :: factorial
+ real(kind=dp) :: sign
+ integer :: j
+
+ p_d_dbei = 1.d12
+ factorial = 1.d0
+
+ arg = (x/2.d0)**4
+ arg_d = arg*(x/2.d0)
+ d_dbei = (x/2.d0)
+ sign = -1.d0
+
+ j=1
+ do while (j < j_max)
+ p_d_dbei = d_dbei
+ factorial = factorial*2*j*(2*j+1.d0)
+ d_dbei = d_dbei + sign*(2.d0*j+1.d0)*arg_d/(factorial*factorial)
+ if (abs(d_dbei-p_d_dbei) < tolerance) exit
+ arg_d = arg_d*arg
+ sign = -sign
+ j = j+1
+ end do
+ end function d_dbei
+
+ function d_dker(x)
+ implicit none
+ real(kind=dp) :: d_dker
+ real(kind=dp), intent(in) :: x
+
+ real(kind=dp) :: arg, arg_d
+ real(kind=dp) :: p_d_dker
+ real(kind=dp) :: factorial
+ real(kind=dp) :: phi
+ real(kind=dp) :: sign
+ integer :: j
+
+ p_d_dker = 0.d0
+ factorial = 1.d0
+
+ arg = (x/2.d0)**4
+ arg_d = (x/2.d0)**3
+ sign = -1.d0
+ phi = 0.d0
+ d_dker = -(log(x/2.d0)+gamma)*d_dber(x)-d_ber(x)/x+(pi/4.d0)*d_dbei(x)
+
+ j=1
+ do while (j < j_max)
+ p_d_dker = d_dker
+ factorial = factorial*2*j*(2*j-1.d0)
+ phi = phi + 1.d0/(2.d0*j-1.d0) + 1.d0/(2.d0*j)
+ d_dker = d_dker + sign*phi*2.d0*j*arg_d/(factorial*factorial)
+ if (abs(d_dker-p_d_dker) < tolerance) exit
+ arg_d = arg_d*arg
+ sign = -sign
+ j = j+1
+ end do
+ end function d_dker
+
+ function d_dkei(x)
+ implicit none
+ real(kind=dp) :: d_dkei
+ real(kind=dp), intent(in) :: x
+
+ real(kind=dp) :: arg, arg_d
+ real(kind=dp) :: p_d_dkei
+ real(kind=dp) :: factorial
+ real(kind=dp) :: phi
+ real(kind=dp) :: sign
+ integer :: j
+
+ p_d_dkei = 0.d0
+ factorial = 1.d0
+
+ arg = (x/2.d0)
+ sign = -1.d0
+ phi = 1.d0
+ d_dkei = -(log(x/2.d0)+gamma)*d_dbei(x)-d_bei(x)/x-(pi/4.d0)*d_dber(x)+arg
+ arg_d = arg**5
+ arg = arg**4
+
+ j=1
+ do while (j < j_max)
+ p_d_dkei = d_dkei
+ factorial = factorial*2*j*(2*j+1.d0)
+ phi = phi + 1.d0/(2.d0*j+1.d0) + 1.d0/(2.d0*j)
+ d_dkei = d_dkei + sign*phi*(2.d0*j+1.d0)*arg_d/(factorial*factorial)
+ if (abs(d_dkei-p_d_dkei) < tolerance) exit
+ arg_d = arg_d*arg
+ sign = -sign
+ j = j+1
+ end do
+ end function d_dkei
+
+ function s_dber(x)
+ implicit none
+ real(kind=sp) :: s_dber
+ real(kind=sp), intent(in) :: x
+
+ s_dber = real(d_dber(real(x,kind=dp)),kind=sp)
+ end function s_dber
+
+ function s_dbei(x)
+ implicit none
+ real(kind=sp) :: s_dbei
+ real(kind=sp), intent(in) :: x
+
+ s_dbei = real(d_dbei(real(x,kind=dp)),kind=sp)
+ end function s_dbei
+
+ function s_dker(x)
+ implicit none
+ real(kind=sp) :: s_dker
+ real(kind=sp), intent(in) :: x
+
+ s_dker = real(d_dker(real(x,kind=dp)),kind=sp)
+ end function s_dker
+
+ function s_dkei(x)
+ implicit none
+ real(kind=sp) :: s_dkei
+ real(kind=sp), intent(in) :: x
+
+ s_dkei = real(d_dkei(real(x,kind=dp)),kind=sp)
+ end function s_dkei
+
+end module isostasy_kelvin
+
+
+
diff --git a/components/cism/glimmer-cism/libglide/time_vars.def b/components/cism/glimmer-cism/libglide/time_vars.def
new file mode 100644
index 0000000000..ad729e4b07
--- /dev/null
+++ b/components/cism/glimmer-cism/libglide/time_vars.def
@@ -0,0 +1,7 @@
+[time]
+dimensions: time
+units: year since 1-1-1 0:0:0
+long_name: Model time
+standard_name: time
+calendar: none
+
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/READ.ME b/components/cism/glimmer-cism/libglimmer-solve/SLAP/READ.ME
new file mode 100644
index 0000000000..5ca4fb57a2
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/READ.ME
@@ -0,0 +1,189 @@
+----------------------------------------------------------------------
+ The
+ Sparse Linear Algebra Package
+
+ @@@@@@@ @ @@@ @@@@@@@@
+ @ @ @ @ @ @ @
+ @ @ @ @ @ @
+ @@@@@@@ @ @ @ @@@@@@@@
+ @ @ @@@@@@@@@ @
+ @ @ @ @ @ @
+ @@@@@@@ @@@@@@@@@ @ @ @
+
+ @ @ @@@@@@@ @@@@@
+ @ @ @ @ @ @@
+ @ @ @@@@@@@ @ @@ @ @ @ @
+ @ @ @ @ @@ @ @@@@@@ @ @ @
+ @ @ @@@@@@@@@ @ @ @ @ @
+ @ @ @ @ @ @@@ @@ @
+ @@@ @@@@@@@ @ @@@@@@@@@ @@@ @@@@@
+
+----------------------------------------------------------------------
+
+SLAP This is the official release version 2.0 of the Sparse Linear
+ Algebra Package: a SLAP for the Masses! It contains "core"
+ routines for the iterative solution symmetric and non-symmetric
+ positive definite and positive semi-definite linear systems.
+ Included in this package are core routines to do Iterative
+ Refinement iteration, Preconditioned Conjugate Gradient
+ iteration, Preconditioned Conjugate Gradient iteration on the
+ Normal Equations, Preconditioned BiConjugate Gradient iteration,
+ Preconditioned BiConjugate Gradient Squared iteration, Orthomin
+ iteration and Generalized Minimum Residual iteration. Core
+ routines require the user to supply "MATVEC" (Matrix Vector
+ Multiply) and "MSOLVE" (Preconditioning) routines. This allows
+ the core routines to be written in a way that makes them
+ independent of the matrix data structure. For each core routine
+ there are several drivers and support routines that allow the
+ user to utilize Diagonal Scaling and Incomplete
+ Cholesky/Incomplete LU factorization as preconditioners with no
+ coding. The price for this convenience is that one must use the
+ a specific matrix data structure: SLAP Column or SLAP Triad
+ format.
+
+Comments and suggestions should be sent to:
+ Dr. Mark K. Seager
+ Lawrence Livermore National Lab.
+ PO Box 808, L-300
+ Livermore, CA 94550
+ (415) 423-3141
+ seager@lll-crg.llnl.gov
+or
+ Dr. Anne Greenbaum
+ Courant Institute of Mathematical Sciences
+ New York University
+ 251 Mercer St.
+ New York, NY 10012
+ (212)998-3145
+ greenbau@nyu.edu
+
+ **********************************************************************
+ GETTING STARTED ON ==> NON UNIX <== SYSTEMS
+ **********************************************************************
+To generate the SLAP test program and library on *NON* Un*x systems
+get the following files:
+ READ.ME This very file.
+ slapqc.f Quick Check driver routine. Read the comments in this
+ file for more information about porting the test code.
+ slap.f Source code for SLAP 2.0. The first "routine" is a
+ "RoadMap" document routine that describes the package
+ in gory detail.
+ dlapqc.f Quick Check driver routine for double precision
+ routines. Read the comments in this file for more
+ information about porting the test code.
+ dlap.f Source code for SLAP 2.0 double precision routines.
+ The first "routine" is a "RoadMap" document routine
+ that describes the package in gory detail.
+Additional routines required for correct execution that are not
+supplied directly with the package are the BLAS and the SLATEC error
+handling package. These can be obtained from the SLATEC library
+itself.
+
+To make the library simply compile "slap.f" with the highest
+optimization level you have at you disposal. Then look at the listing
+to make sure that the "inner loops" in the routines SSMV, SSMTV,
+SSLI2, SLLTI2, SSLUI2, SSLUI4 and SSMMI2 vectorized. Compiler
+directives have been set up for the Alliant FX/Fortran, Cray CFT and
+LCC Civic compilers, but you may want to verify the vectorization of
+these loops anyway. Now do what ever is necessary to turn the binary
+from "slap.f" into a library.
+
+To make the test program compile the "slapqc.f" and link this with the
+SLAP library made in the last step and the SLATEC library (to get the
+SLATEC error handling package and the BLAS).
+
+**********************************************************************
+ GETTING STARTED ON UNIX SYSTEMS
+**********************************************************************
+To generate the SLAP test program and library on Un*x systems edit the
+makefile included in this distribution and change the make macros:
+1) FFLAGS to what ever your Fortran77 compiler needs to optimize
+ things.
+2) LFLAGS to what ever libraries you need to load in.
+3) OWNER to the owner of the library file to be created with
+ "make install" ( "root" or your login name are two obvious choices
+ here).
+3) GROUP to the group who should have privileges on the library file.
+4) LIBDIR to the directory where to put the library. The choice in
+ the makefile distribution is the standard Un*x place.
+
+To make the SLAP library type "make install" and the makefile will
+construct the libslap.a file and install it in $(LIBDIR) with $(OWNER)
+and $(GROUP) privileges.
+
+Also, the files xersla.f, blas.f and mach.f contain routines that are
+usually contained in the SLATEC library and are included to be used if
+you don't have them in some library somewhere. xersla.f is the error
+handler for SLATEC. Add it to the load line for slapqc:
+
+slapqc : slapqc.o ${OBJS} xersla.o
+ $(FC) ${FFLAGS} slapqc.o ${OBJS} xersla.o -o slapqc ${LFLAGS}
+
+if you don't have the SLATEC library installed and referenced in
+${LFLAGS}. mach.f contains the machine constants for various
+machines. If you get unsatisfied externals R1MACH and I1MACH then you
+need to add mach.o to the load line for slapqc. Uncomment the
+machine constants for you machine (or add them if needed) and run make
+again. blas.f contains the "LINPACK BLAS" and you should use the hand
+coded versions, if your machine vendor supplied them (most do
+now-a-days). If you don't have them then add blas.o to the load line
+and rerun make.
+
+After editing the makefile do "make slapqc" or just "make" to get the
+SLAP Quick Test program made.
+
+**********************************************************************
+ RUNNING THE SLAP 2.0 QUICK CHECK
+**********************************************************************
+The SLAP 2.0 SLATEC quick check test program "slapqc" requires one
+input parameter "KPRINT" from the STANDARD IN (I1MACH(1)) Fortran I/O
+unit (this is all described in the source file "slapqc.f") KPRINT=2
+gives nice output about the progression of tests. Running the Quick
+Test will generate output to the STANDARD OUT (I1MACH(2)). All
+iterative methods should complete their iteration without error. If
+all went well with the quick checks then the following message is
+printed out at the end of the test:
+****************************************************
+**** All SLAP Quick Checks Passed. No Errors. ****
+****************************************************
+
+**********************************************************************
+
+ Notice
+ This computer code material was prepared as an account of
+ work sponsored by the United States Government. Neither the
+ United States nor the United States Department of Energy,
+ nor any of their employees, nor any of their contractors,
+ subcontractors, or their employees, makes any warranty,
+ express or implied, or assumes any legal liability or
+ responsibility for the accuracy, completeness or usefulness
+ of any information, apparatus, product or process disclosed,
+ or represents that its use would not infringe
+ privately-owned rights.
+
+**********************************************************************
+
+ DISCLAIMER
+ This document was prepared as an account of work sponsored
+ by an agency of the United States Government. Neither the
+ United States Government nor the University of California
+ nor any of their employees, makes any warranty, express or
+ implied, or assumes any legal liability or responsibility
+ for the accuracy, completeness or usefulness of any
+ information, apparatus, product or process disclosed, or
+ represents that its use would not infringe privately owned
+ rights. Reference herein to any specific commercial
+ products, process, or service by trade name, trademark,
+ manufacturer, or otherwise, does not necessarily constitute
+ or imply its endorsement, recommendation, or favoring by the
+ United States Government or the University of California.
+ The views and opinions of authors expressed herein do not
+ necessarily state or reflect those of the United States
+ Government thereof, and shall not be used for advertising or
+ product endorsement purposes.
+
+ Work performed under the auspices of the U.S. Department of
+ Energy by Lawrence Livermore National Laboratory under
+ contract number W-7405-Eng-48.
+
+**********************************************************************
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/dbcg.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dbcg.f
new file mode 100644
index 0000000000..45c8bce782
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dbcg.f
@@ -0,0 +1,1083 @@
+*DECK DBCG
+ SUBROUTINE DBCG(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MTTVEC,
+ $ MSOLVE, MTSOLV, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ R, Z, P, RR, ZZ, PP, DZ, RWORK, IWORK)
+C***BEGIN PROLOGUE DBCG
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DBCG-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, BiConjugate Gradient
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned BiConjugate Gradient Sparse Ax=b solver.
+C Routine to solve a Non-Symmetric linear system Ax = b
+C using the Preconditioned BiConjugate Gradient method.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, IWORK(USER DEFINABLE)
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N), P(N)
+C DOUBLE PRECISION RR(N), ZZ(N), PP(N), DZ(N)
+C DOUBLE PRECISION RWORK(USER DEFINABLE)
+C EXTERNAL MATVEC, MTTVEC, MSOLVE, MTSOLV
+C
+C CALL DBCG(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MTTVEC,
+C $ MSOLVE, MTSOLV, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+C $ R, Z, P, RR, ZZ, PP, DZ, RWORK, IWORK)
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "Description", below for more
+C late breaking details...
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MATVEC :EXT External.
+C Name of a routine which performs the matrix vector multiply
+C operation Y = A*X given A and X. The name of the MATVEC
+C routine must be declared external in the calling program.
+C The calling sequence of MATVEC is:
+C CALL MATVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C Where N is the number of unknowns, Y is the product A*X upon
+C return, X is an input vector. NELT, IA, JA, A and ISYM
+C define the SLAP matrix data structure: see Description,below.
+C MTTVEC :EXT External.
+C Name of a routine which performs the matrix transpose vector
+C multiply y = A'*X given A and X (where ' denotes transpose).
+C The name of the MTTVEC routine must be declared external in
+C the calling program. The calling sequence to MTTVEC is the
+C same as that for MTTVEC, viz.:
+C CALL MTTVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C Where N is the number of unknowns, Y is the product A'*X
+C upon return, X is an input vector. NELT, IA, JA, A and ISYM
+C define the SLAP matrix data structure: see Description,below.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for Z
+C given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine
+C must be declared external in the calling program. The
+C calling sequence of MSLOVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. NELT, IA, JA, A
+C and ISYM define the SLAP matrix data structure: see
+C Description, below. RWORK is a double precision array that
+C can be used
+C to pass necessary preconditioning information and/or
+C workspace to MSOLVE. IWORK is an integer work array for the
+C same purpose as RWORK.
+C MTSOLV :EXT External. T
+C Name of a routine which solves a linear system M ZZ = RR for
+C ZZ given RR with the preconditioning matrix M (M is supplied
+C via RWORK and IWORK arrays). The name of the MTSOLV routine
+C must be declared external in the calling program. The call-
+C ing sequence to MTSOLV is:
+C CALL MTSOLV(N, RR, ZZ, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, RR is the right-hand side
+C vector, and ZZ is the solution upon return. NELT, IA, JA, A
+C and ISYM define the SLAP matrix data structure: see
+C Description, below. RWORK is a double precision array that
+C can be used
+C to pass necessary preconditioning information and/or
+C workspace to MTSOLV. IWORK is an integer work array for the
+C same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :WORK Double Precision R(N).
+C Z :WORK Double Precision Z(N).
+C P :WORK Double Precision P(N).
+C RR :WORK Double Precision RR(N).
+C ZZ :WORK Double Precision ZZ(N).
+C PP :WORK Double Precision PP(N).
+C DZ :WORK Double Precision DZ(N).
+C RWORK :WORK Double Precision RWORK(USER DEFINED).
+C Double Precision array that can be used for workspace in
+C MSOLVE and MTSOLV.
+C IWORK :WORK Integer IWORK(USER DEFINED).
+C Integer array that can be used for workspace in MSOLVE
+C and MTSOLV.
+C
+C *Description
+C This routine does not care what matrix data structure is
+C used for A and M. It simply calls the MATVEC and MSOLVE
+C routines, with the arguments as described above. The user
+C could write any type of structure and the appropriate MATVEC
+C and MSOLVE routines. It is assumed that A is stored in the
+C IA, JA, A arrays in some fashion and that M (or INV(M)) is
+C stored in IWORK and RWORK in some fashion. The SLAP
+C routines SDBCG and DSLUBC are examples of this procedure.
+C
+C Two examples of matrix data structures are the: 1) SLAP
+C Triad format and 2) SLAP Column format.
+C
+C =================== S L A P Triad format ===================
+C In this format only the non-zeros are stored. They may
+C appear in *ANY* order. The user supplies three arrays of
+C length NELT, where NELT is the number of non-zeros in the
+C matrix: (IA(NELT), JA(NELT), A(NELT)). For each non-zero
+C the user puts the row and column index of that matrix
+C element in the IA and JA arrays. The value of the non-zero
+C matrix element is placed in the corresponding location of
+C the A array. This is an extremely easy data structure to
+C generate. On the other hand it is not too efficient on
+C vector computers for the iterative solution of linear
+C systems. Hence, SLAP changes this input data structure to
+C the SLAP Column format for the iteration (but does not
+C change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *See Also:
+C SDBCG, DSLUBC
+C***REFERENCES (NONE)
+C***ROUTINES CALLED MATVEC, MTTVEC, MSOLVE, MTSOLV, ISDBCG,
+C DCOPY, DDOT, DAXPY, D1MACH
+C***END PROLOGUE DBCG
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+ INTEGER ITER, IERR, IUNIT, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N), P(N)
+ DOUBLE PRECISION RR(N), ZZ(N), PP(N), DZ(N), RWORK(*)
+ EXTERNAL MATVEC, MTTVEC, MSOLVE, MTSOLV
+C
+C Check some of the input data.
+C***FIRST EXECUTABLE STATEMENT DBCG
+ ITER = 0
+ IERR = 0
+ IF( N.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ FUZZ = D1MACH(3)
+ TOLMIN = 500.0*FUZZ
+ FUZZ = FUZZ*FUZZ
+ IF( TOL.LT.TOLMIN ) THEN
+ TOL = TOLMIN
+ IERR = 4
+ ENDIF
+C
+C Calculate initial residual and pseudo-residual, and check
+C stopping criterion.
+ CALL MATVEC(N, X, R, NELT, IA, JA, A, ISYM)
+ DO 10 I = 1, N
+ R(I) = B(I) - R(I)
+ RR(I) = R(I)
+ 10 CONTINUE
+ CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ CALL MTSOLV(N, RR, ZZ, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C
+ IF( ISDBCG(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, RR, ZZ, PP,
+ $ DZ, RWORK, IWORK, AK, BK, BNRM, SOLNRM) .NE. 0 )
+ $ GO TO 200
+ IF( IERR.NE.0 ) RETURN
+C
+C ***** iteration loop *****
+C
+ DO 100 K=1,ITMAX
+ ITER = K
+C
+C Calculate coefficient BK and direction vectors P and PP.
+ BKNUM = DDOT(N, Z, 1, RR, 1)
+ IF( ABS(BKNUM).LE.FUZZ ) THEN
+ IERR = 6
+ RETURN
+ ENDIF
+ IF(ITER .EQ. 1) THEN
+ CALL DCOPY(N, Z, 1, P, 1)
+ CALL DCOPY(N, ZZ, 1, PP, 1)
+ ELSE
+ BK = BKNUM/BKDEN
+ DO 20 I = 1, N
+ P(I) = Z(I) + BK*P(I)
+ PP(I) = ZZ(I) + BK*PP(I)
+ 20 CONTINUE
+ ENDIF
+ BKDEN = BKNUM
+C
+C Calculate coefficient AK, new iterate X, new resids R and RR,
+C and new pseudo-resids Z and ZZ.
+ CALL MATVEC(N, P, Z, NELT, IA, JA, A, ISYM)
+ AKDEN = DDOT(N, PP, 1, Z, 1)
+ AK = BKNUM/AKDEN
+ IF( ABS(AKDEN).LE.FUZZ ) THEN
+ IERR = 6
+ RETURN
+ ENDIF
+ CALL DAXPY(N, AK, P, 1, X, 1)
+ CALL DAXPY(N, -AK, Z, 1, R, 1)
+ CALL MTTVEC(N, PP, ZZ, NELT, IA, JA, A, ISYM)
+ CALL DAXPY(N, -AK, ZZ, 1, RR, 1)
+ CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ CALL MTSOLV(N, RR, ZZ, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C
+C check stopping criterion.
+ IF( ISDBCG(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, RR, ZZ,
+ $ PP, DZ, RWORK, IWORK, AK, BK, BNRM, SOLNRM) .NE. 0 )
+ $ GO TO 200
+C
+ 100 CONTINUE
+C
+C ***** end of loop *****
+C
+C stopping criterion not satisfied.
+ ITER = ITMAX + 1
+ IERR = 2
+C
+ 200 RETURN
+C------------- LAST LINE OF DBCG FOLLOWS ----------------------------
+ END
+*DECK DSDBCG
+ SUBROUTINE DSDBCG(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSDBCG
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(SSDBCG-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Diagonally Scaled BiConjugate Gradient Sparse Ax=b solver.
+C Routine to solve a linear system Ax = b using the
+C BiConjugate Gradient method with diagonal scaling.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(10), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(8*N)
+C
+C CALL DSDBCG(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace.
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= 8*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Used to hold pointers into the RWORK array.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK. LENIW >= 10.
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C
+C *Description:
+C This routine performs preconditioned BiConjugate gradient
+C method on the Non-Symmetric positive definite linear system
+C Ax=b. The preconditioner is M = DIAG(A), the diagonal of the
+C matrix A. This is the simplest of preconditioners and
+C vectorizes very well.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to
+C be the SLAP Column format. See above.
+C
+C *See Also:
+C DBCG, DLUBCG
+C***REFERENCES (NONE)
+C***ROUTINES CALLED DS2Y, DCHKW, DSDS, DBCG
+C***END PROLOGUE DSDBCG
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IERR, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(N), TOL, ERR, RWORK(LENW)
+ EXTERNAL DSMV, DSMTV, DSDI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSDBCG
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Set up the workspace. Compute the inverse of the
+C diagonal of the matrix.
+ LOCIW = LOCIB
+C
+ LOCDIN = LOCRB
+ LOCR = LOCDIN + N
+ LOCZ = LOCR + N
+ LOCP = LOCZ + N
+ LOCRR = LOCP + N
+ LOCZZ = LOCRR + N
+ LOCPP = LOCZZ + N
+ LOCDZ = LOCPP + N
+ LOCW = LOCDZ + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSDBCG', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(4) = LOCDIN
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+ CALL DSDS(N, NELT, IA, JA, A, ISYM, RWORK(LOCDIN))
+C
+C Perform the Diagonally Scaled BiConjugate gradient algorithm.
+ CALL DBCG(N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSMTV,
+ $ DSDI, DSDI, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK(LOCR), RWORK(LOCZ), RWORK(LOCP),
+ $ RWORK(LOCRR), RWORK(LOCZZ), RWORK(LOCPP),
+ $ RWORK(LOCDZ), RWORK(1), IWORK(1))
+ RETURN
+C------------- LAST LINE OF DSDBCG FOLLOWS ----------------------------
+ END
+*DECK DSLUBC
+ SUBROUTINE DSLUBC(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSLUBC
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(SSLUBC-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative incomplete LU Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Incomplete LU BiConjugate Gradient Sparse Ax=b solver.
+C Routine to solve a linear system Ax = b using the
+C BiConjugate Gradient method with Incomplete LU
+C decomposition preconditioning.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(NEL+NU+4*N+2), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(NEL+NU+8*N)
+C
+C CALL DSLUBC(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW)
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN( )
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IERR = 7 => Incomplete factorization broke down
+C and was fudged. Resulting preconditioning may
+C be less than the best.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace. NEL is the
+C number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal). NU is the number of nonzeros in the upper
+C triangle of the matrix (including the diagonal).
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= NEL+NU+8*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Integer array used for workspace. NEL is the number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal). NU is the number of nonzeros in the upper
+C triangle of the matrix (including the diagonal).
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK.
+C LENIW >= NEL+NU+4*N+12.
+C
+C *Description:
+C This routine is simply a driver for the DBCGN routine. It
+C calls the DSILUS routine to set up the preconditioning and
+C then calls DBCGN with the appropriate MATVEC, MTTVEC and
+C MSOLVE, MTSOLV routines.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to
+C be the SLAP Column format. See above.
+C
+C *See Also:
+C DBCG, SDBCG
+C***REFERENCES (NONE)
+C***ROUTINES CALLED DS2Y, DCHKW, DSILUS, DBCG, DSMV, DSMTV
+C***END PROLOGUE DSLUBC
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
+ EXTERNAL DSMV, DSMTV, DSLUI, DSLUTI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSLUBC
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Count number of Non-Zero elements preconditioner ILU matrix.
+C Then set up the work arrays.
+ NL = 0
+ NU = 0
+ DO 20 ICOL = 1, N
+C Don't count diagonal.
+ JBGN = JA(ICOL)+1
+ JEND = JA(ICOL+1)-1
+ IF( JBGN.LE.JEND ) THEN
+CVD$ NOVECTOR
+ DO 10 J = JBGN, JEND
+ IF( IA(J).GT.ICOL ) THEN
+ NL = NL + 1
+ IF( ISYM.NE.0 ) NU = NU + 1
+ ELSE
+ NU = NU + 1
+ ENDIF
+ 10 CONTINUE
+ ENDIF
+ 20 CONTINUE
+C
+ LOCIL = LOCIB
+ LOCJL = LOCIL + N+1
+ LOCIU = LOCJL + NL
+ LOCJU = LOCIU + NU
+ LOCNR = LOCJU + N+1
+ LOCNC = LOCNR + N
+ LOCIW = LOCNC + N
+C
+ LOCL = LOCRB
+ LOCDIN = LOCL + NL
+ LOCU = LOCDIN + N
+ LOCR = LOCU + NU
+ LOCZ = LOCR + N
+ LOCP = LOCZ + N
+ LOCRR = LOCP + N
+ LOCZZ = LOCRR + N
+ LOCPP = LOCZZ + N
+ LOCDZ = LOCPP + N
+ LOCW = LOCDZ + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSLUBC', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(1) = LOCIL
+ IWORK(2) = LOCJL
+ IWORK(3) = LOCIU
+ IWORK(4) = LOCJU
+ IWORK(5) = LOCL
+ IWORK(6) = LOCDIN
+ IWORK(7) = LOCU
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+C Compute the Incomplete LU decomposition.
+ CALL DSILUS( N, NELT, IA, JA, A, ISYM, NL, IWORK(LOCIL),
+ $ IWORK(LOCJL), RWORK(LOCL), RWORK(LOCDIN), NU, IWORK(LOCIU),
+ $ IWORK(LOCJU), RWORK(LOCU), IWORK(LOCNR), IWORK(LOCNC) )
+C
+C Perform the incomplete LU preconditioned
+C BiConjugate Gradient algorithm.
+ CALL DBCG(N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSMTV,
+ $ DSLUI, DSLUTI, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK(LOCR), RWORK(LOCZ), RWORK(LOCP),
+ $ RWORK(LOCRR), RWORK(LOCZZ), RWORK(LOCPP),
+ $ RWORK(LOCDZ), RWORK, IWORK )
+ RETURN
+C------------- LAST LINE OF DSLUBC FOLLOWS ----------------------------
+ END
+*DECK ISDBCG
+ FUNCTION ISDBCG(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL,
+ $ TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, RR, ZZ, PP, DZ,
+ $ RWORK, IWORK, AK, BK, BNRM, SOLNRM)
+C***BEGIN PROLOGUE ISDBCG
+C***REFER TO DBCG, DSDBCG, DSLUBC
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(ISDBCG-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Stop Test
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned BiConjugate Gradient Stop Test.
+C This routine calculates the stop test for the BiConjugate
+C Gradient iteration scheme. It returns a nonzero if the
+C error estimate (the type of which is determined by ITOL)
+C is less than the user specified tolerance TOL.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+C INTEGER IERR, IUNIT, IWORK(USER DEFINED)
+C DOUBLE PRECISION B(N), X(N), A(N), TOL, ERR, R(N), Z(N), P(N)
+C DOUBLE PRECISION RR(N), ZZ(N), PP(N), DZ(N)
+C DOUBLE PRECISION RWORK(USER DEFINED), AK, BK, BNRM, SOLNRM
+C EXTERNAL MSOLVE
+C
+C IF( ISDBCG(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, RR, ZZ, PP, DZ,
+C $ RWORK, IWORK, AK, BK, BNRM, SOLNRM) .NE. 0 )
+C $ THEN ITERATION DONE
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "Description", in the SLAP
+C routine DBCG for more late breaking details...
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for Z
+C given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine
+C must be declared external in the calling program. The
+C calling sequence of MSLOVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. NELT, IA, JA, A
+C and ISYM define the SLAP matrix data structure: see
+C Description, below. RWORK is a double precision array that
+C can be used
+C to pass necessary preconditioning information and/or
+C workspace to MSOLVE. IWORK is an integer work array for the
+C same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than tol, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than tol) through a common block,
+C COMMON /SOLBLK/ SOLN( )
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than tol.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Error flag. IERR is set to 3 if ITOL is not on of the
+C acceptable values, see above.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :IN Double Precision R(N).
+C The residual r = b - Ax.
+C Z :WORK Double Precision Z(N).
+C P :DUMMY Double Precision P(N).
+C RR :DUMMY Double Precision RR(N).
+C ZZ :DUMMY Double Precision ZZ(N).
+C PP :DUMMY Double Precision PP(N).
+C DZ :WORK Double Precision DZ(N).
+C If ITOL.eq.0 then DZ is used to hold M-inv * B on the first
+C call. If ITOL.eq.11 then DZ is used to hold X-SOLN.
+C RWORK :WORK Double Precision RWORK(USER DEFINED).
+C Double Precision array that can be used for workspace in
+C MSOLVE and MTSOLV.
+C IWORK :WORK Integer IWORK(USER DEFINED).
+C Integer array that can be used for workspace in MSOLVE
+C and MTSOLV.
+C AK :IN Double Precision.
+C Current iterate BiConjugate Gradient iteration parameter.
+C BK :IN Double Precision.
+C Current iterate BiConjugate Gradient iteration parameter.
+C BNRM :INOUT Double Precision.
+C Norm of the right hand side. Type of norm depends on ITOL.
+C Calculated only on the first call.
+C SOLNRM :INOUT Double Precision.
+C 2-Norm of the true solution, SOLN. Only computed and used
+C if ITOL = 11.
+C
+C *Function Return Values:
+C 0 : Error estimate (determined by ITOL) is *NOT* less than the
+C specified tolerance, TOL. The iteration must continue.
+C 1 : Error estimate (determined by ITOL) is less than the
+C specified tolerance, TOL. The iteration can be considered
+C complete.
+C
+C *Precision: Double Precision
+C***REFERENCES (NONE)
+C***ROUTINES CALLED MSOLVE, DNRM2
+C***COMMON BLOCKS SOLBLK
+C***END PROLOGUE ISDBCG
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+ INTEGER ITER, IERR, IUNIT, IWORK(1)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N), P(N)
+ DOUBLE PRECISION RR(N), ZZ(N), PP(N), DZ(N), RWORK(*)
+ DOUBLE PRECISION AK, BK, BNRM, SOLNRM
+ COMMON /SOLBLK/ SOLN(1)
+ EXTERNAL MSOLVE
+C
+C***FIRST EXECUTABLE STATEMENT ISDBCG
+ ISDBCG = 0
+C
+ IF( ITOL.EQ.1 ) THEN
+C err = ||Residual||/||RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) BNRM = DNRM2(N, B, 1)
+ ERR = DNRM2(N, R, 1)/BNRM
+ ELSE IF( ITOL.EQ.2 ) THEN
+C -1 -1
+C err = ||M Residual||/||M RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) THEN
+ CALL MSOLVE(N, B, DZ, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ BNRM = DNRM2(N, DZ, 1)
+ ENDIF
+ ERR = DNRM2(N, Z, 1)/BNRM
+ ELSE IF( ITOL.EQ.11 ) THEN
+C err = ||x-TrueSolution||/||TrueSolution|| (2-Norms).
+ IF(ITER .EQ. 0) SOLNRM = DNRM2(N, SOLN, 1)
+ DO 10 I = 1, N
+ DZ(I) = X(I) - SOLN(I)
+ 10 CONTINUE
+ ERR = DNRM2(N, DZ, 1)/SOLNRM
+ ELSE
+C
+C If we get here ITOL is not one of the acceptable values.
+ ERR = 1.0E10
+ IERR = 3
+ ENDIF
+C
+ IF(IUNIT .NE. 0) THEN
+ IF( ITER.EQ.0 ) THEN
+ WRITE(IUNIT,1000) N, ITOL
+ ENDIF
+ WRITE(IUNIT,1010) ITER, ERR, AK, BK
+ ENDIF
+ IF(ERR .LE. TOL) ISDBCG = 1
+C
+ RETURN
+ 1000 FORMAT(' Preconditioned BiConjugate Gradient for N, ITOL = ',
+ $ I5,I5,/' ITER',' Error Estimate',' Alpha',
+ $ ' Beta')
+ 1010 FORMAT(1X,I4,1X,E16.7,1X,E16.7,1X,E16.7)
+C------------- LAST LINE OF ISDBCG FOLLOWS ----------------------------
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/dcg.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dcg.f
new file mode 100644
index 0000000000..00c3423617
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dcg.f
@@ -0,0 +1,1053 @@
+*DECK DCG
+ SUBROUTINE DCG(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, DZ,
+ $ RWORK, IWORK )
+C***BEGIN PROLOGUE DCG
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DCG-D),
+C Symmetric Linear system, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned Conjugate Gradient iterative Ax=b solver.
+C Routine to solve a symmetric positive definite linear
+C system Ax = b using the Preconditioned Conjugate
+C Gradient method.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, IWORK(USER DEFINABLE)
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N)
+C DOUBLE PRECISION P(N), DZ(N), RWORK(USER DEFINABLE)
+C EXTERNAL MATVEC, MSOLVE
+C
+C CALL DCG(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSLOVE,
+C $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, DZ,
+C $ RWORK, IWORK )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Integer A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See ``Description'', below
+C for more late breaking details...
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MATVEC :EXT External.
+C Name of a routine which performs the matrix vector multiply
+C Y = A*X given A and X. The name of the MATVEC routine must
+C be declared external in the calling program. The calling
+C sequence to MATVEC is:
+C
+C CALL MATVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C
+C Where N is the number of unknowns, Y is the product A*X
+C upon return X is an input vector, NELT is the number of
+C non-zeros in the SLAP IA, JA, A storage for the matrix A.
+C ISYM is a flag which, if non-zero, denotest that A is
+C symmetric and only the lower or upper triangle is stored.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for
+C Z given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine must
+C be declared external in the calling program. The calling
+C sequence to MSLOVE is:
+C
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. RWORK is a double
+C precision
+C array that can be used to pass necessary preconditioning
+C information and/or workspace to MSOLVE. IWORK is an integer
+C work array for the same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*R1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :WORK Double Precision R(N).
+C Z :WORK Double Precision Z(N).
+C P :WORK Double Precision P(N).
+C DZ :WORK Double Precision DZ(N).
+C RWORK :WORK Double Precision RWORK(USER DEFINABLE).
+C Double Precision array that can be used by MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINABLE).
+C Integer array that can be used by MSOLVE.
+C
+C *Description
+C This routine does not care what matrix data structure is
+C used for A and M. It simply calls the MATVEC and MSOLVE
+C routines, with the arguments as described above. The user
+C could write any type of structure and the appropriate MATVEC
+C and MSOLVE routines. It is assumed that A is stored in the
+C IA, JA, A arrays in some fashion and that M (or INV(M)) is
+C stored in IWORK and RWORK in some fashion. The SLAP
+C routines DSDCG and DSICCG are examples of this procedure.
+C
+C Two examples of matrix data structures are the: 1) SLAP
+C Triad format and 2) SLAP Column format.
+C
+C =================== S L A P Triad format ===================
+C
+C In this format only the non-zeros are stored. They may
+C appear in *ANY* order. The user supplies three arrays of
+C length NELT, where NELT is the number of non-zeros in the
+C matrix: (IA(NELT), JA(NELT), A(NELT)). For each non-zero
+C the user puts the row and column index of that matrix
+C element in the IA and JA arrays. The value of the non-zero
+C matrix element is placed in the corresponding location of
+C the A array. This is an extremely easy data structure to
+C generate. On the other hand it is not too efficient on
+C vector computers for the iterative solution of linear
+C systems. Hence, SLAP changes this input data structure to
+C the SLAP Column format for the iteration (but does not
+C change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *See Also:
+C DSDCG, DSICCG
+C***REFERENCES 1. Louis Hageman \& David Young, ``Applied Iterative
+C Methods'', Academic Press, New York (1981) ISBN
+C 0-12-313340-8.
+C
+C 2. Concus, Golub \& O'Leary, ``A Generalized Conjugate
+C Gradient Method for the Numerical Solution of
+C Elliptic Partial Differential Equations,'' in Sparse
+C Matrix Computations (Bunch \& Rose, Eds.), Academic
+C Press, New York (1979).
+C***ROUTINES CALLED MATVEC, MSOLVE, ISDCG, DCOPY, DDOT, DAXPY, D1MACH
+C***END PROLOGUE DCG
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IUNIT, IERR, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N), P(N)
+ DOUBLE PRECISION DZ(N), RWORK(*)
+ EXTERNAL MATVEC, MSOLVE
+C
+C Check some of the input data.
+C***FIRST EXECUTABLE STATEMENT DCG
+ ITER = 0
+ IERR = 0
+ IF( N.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ TOLMIN = 500.0*D1MACH(3)
+ IF( TOL.LT.TOLMIN ) THEN
+ TOL = TOLMIN
+ IERR = 4
+ ENDIF
+C
+C Calculate initial residual and pseudo-residual, and check
+C stopping criterion.
+ CALL MATVEC(N, X, R, NELT, IA, JA, A, ISYM)
+ DO 10 I = 1, N
+ R(I) = B(I) - R(I)
+ 10 CONTINUE
+ CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C
+ IF( ISDCG(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, DZ,
+ $ RWORK, IWORK, AK, BK, BNRM, SOLNRM) .NE. 0 ) GO TO 200
+ IF( IERR.NE.0 ) RETURN
+C
+C ***** Iteration loop *****
+C
+ DO 100 K=1,ITMAX
+ ITER = K
+C
+C Calculate coefficient bk and direction vector p.
+ BKNUM = DDOT(N, Z, 1, R, 1)
+ IF( BKNUM.LE.0.0D0 ) THEN
+ IERR = 5
+ RETURN
+ ENDIF
+ IF(ITER .EQ. 1) THEN
+ CALL DCOPY(N, Z, 1, P, 1)
+ ELSE
+ BK = BKNUM/BKDEN
+ DO 20 I = 1, N
+ P(I) = Z(I) + BK*P(I)
+ 20 CONTINUE
+ ENDIF
+ BKDEN = BKNUM
+C
+C Calculate coefficient ak, new iterate x, new residual r,
+C and new pseudo-residual z.
+ CALL MATVEC(N, P, Z, NELT, IA, JA, A, ISYM)
+ AKDEN = DDOT(N, P, 1, Z, 1)
+ IF( AKDEN.LE.0.0D0 ) THEN
+ IERR = 6
+ RETURN
+ ENDIF
+ AK = BKNUM/AKDEN
+ CALL DAXPY(N, AK, P, 1, X, 1)
+ CALL DAXPY(N, -AK, Z, 1, R, 1)
+ CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C
+C check stopping criterion.
+ IF( ISDCG(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, DZ, RWORK,
+ $ IWORK, AK, BK, BNRM, SOLNRM) .NE. 0 ) GO TO 200
+C
+ 100 CONTINUE
+C
+C ***** end of loop *****
+C
+C stopping criterion not satisfied.
+ ITER = ITMAX + 1
+ IERR = 2
+C
+ 200 RETURN
+ END
+*DECK DSDCG
+ SUBROUTINE DSDCG(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSDCG
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSDCG-D),
+C Symmetric Linear system, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Diagonally Scaled Conjugate Gradient Sparse Ax=b Solver.
+C Routine to solve a symmetric positive definite linear
+C system Ax = b using the Preconditioned Conjugate
+C Gradient method. The preconditioner is diagonal
+C scaling.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(10), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(5*N)
+C
+C CALL DSDCG(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Integer A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See ``Description'',
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace.
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK. LENW >= 5*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Used to hold pointers into the double precision workspace,
+C RWORK. Upon return the following locations of IWORK hold
+C information which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK. LENIW >= 10.
+C
+C *Description:
+C This routine performs preconditioned conjugate gradient
+C method on the symmetric positive definite linear system
+C Ax=b. The preconditioner is M = DIAG(A), the diagonal of
+C the matrix A. This is the simplest of preconditioners and
+C vectorizes very well. This routine is simply a driver for
+C the DCG routine. It calls the DSDS routine to set up the
+C preconditioning and then calls DCG with the appropriate
+C MATVEC and MSOLVE routines.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to
+C be the SLAP Column format. See above.
+C
+C *See Also:
+C DCG, DSICCG
+C***REFERENCES 1. Louis Hageman \& David Young, ``Applied Iterative
+C Methods'', Academic Press, New York (1981) ISBN
+C 0-12-313340-8.
+C 2. Concus, Golub \& O'Leary, ``A Generalized Conjugate
+C Gradient Method for the Numerical Solution of
+C Elliptic Partial Differential Equations,'' in Sparse
+C Matrix Computations (Bunch \& Rose, Eds.), Academic
+C Press, New York (1979).
+C***ROUTINES CALLED DS2Y, DCHKW, DSDS, DCG
+C***END PROLOGUE DSDCG
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL
+ INTEGER ITMAX, ITER, IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
+ EXTERNAL DSMV, DSDI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Modify the SLAP matrix data structure to YSMP-Column.
+C***FIRST EXECUTABLE STATEMENT DSDCG
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Set up the work arrays.
+C Compute the inverse of the diagonal of the matrix. This
+C will be used as the preconditioner.
+ LOCIW = LOCIB
+C
+ LOCD = LOCRB
+ LOCR = LOCD + N
+ LOCZ = LOCR + N
+ LOCP = LOCZ + N
+ LOCDZ = LOCP + N
+ LOCW = LOCDZ + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSDCG', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(4) = LOCD
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+ CALL DSDS(N, NELT, IA, JA, A, ISYM, RWORK(LOCD))
+C
+C Do the Preconditioned Conjugate Gradient.
+ CALL DCG(N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSDI,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK(LOCR),
+ $ RWORK(LOCZ), RWORK(LOCP), RWORK(LOCDZ), RWORK, IWORK)
+ RETURN
+C------------- LAST LINE OF DSDCG FOLLOWS -----------------------------
+ END
+*DECK DSICCG
+ SUBROUTINE DSICCG(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSICCG
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSICCG-D),
+C Symmetric Linear system, Sparse,
+C Iterative Precondition, Incomplete Cholesky
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Incomplete Cholesky Conjugate Gradient Sparse Ax=b Solver.
+C Routine to solve a symmetric positive definite linear
+C system Ax = b using the incomplete Cholesky
+C Preconditioned Conjugate Gradient method.
+C
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(NEL+2*n+1), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(NEL+5*N)
+C
+C CALL DSICCG(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Integer A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See ``Description'',
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IERR = 7 => Incomplete factorization broke down
+C and was fudged. Resulting preconditioning may
+C be less than the best.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace. NEL is the
+C number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal)
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= NEL+5*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Integer array used for workspace. NEL is the number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal).
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK. LENIW >= NEL+N+11.
+C
+C *Description:
+C This routine performs preconditioned conjugate gradient
+C method on the symmetric positive definite linear system
+C Ax=b. The preconditioner is the incomplete Cholesky (IC)
+C factorization of the matrix A. See DSICS for details about
+C the incomplete factorization algorithm. One should note
+C here however, that the IC factorization is a slow process
+C and that one should save factorizations for reuse, if
+C possible. The MSOLVE operation (handled in DSLLTI) does
+C vectorize on machines with hardware gather/scatter and is
+C quite fast.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to be
+C the SLAP Column format. See above.
+C
+C *See Also:
+C DCG, DSLLTI
+C***REFERENCES 1. Louis Hageman \& David Young, ``Applied Iterative
+C Methods'', Academic Press, New York (1981) ISBN
+C 0-12-313340-8.
+C 2. Concus, Golub \& O'Leary, ``A Generalized Conjugate
+C Gradient Method for the Numerical Solution of
+C Elliptic Partial Differential Equations,'' in Sparse
+C Matrix Computations (Bunch \& Rose, Eds.), Academic
+C Press, New York (1979).
+C***ROUTINES CALLED DS2Y, DCHKW, DSICS, XERRWV, DCG
+C***END PROLOGUE DSICCG
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL
+ INTEGER ITMAX, ITER, IUNIT, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
+ EXTERNAL DSMV, DSLLTI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSICCG
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Count number of elements in lower triangle of the matrix.
+C Then set up the work arrays.
+ IF( ISYM.EQ.0 ) THEN
+ NEL = (NELT + N)/2
+ ELSE
+ NEL = NELT
+ ENDIF
+C
+ LOCJEL = LOCIB
+ LOCIEL = LOCJEL + NEL
+ LOCIW = LOCIEL + N + 1
+C
+ LOCEL = LOCRB
+ LOCDIN = LOCEL + NEL
+ LOCR = LOCDIN + N
+ LOCZ = LOCR + N
+ LOCP = LOCZ + N
+ LOCDZ = LOCP + N
+ LOCW = LOCDZ + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSICCG', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(1) = NEL
+ IWORK(2) = LOCJEL
+ IWORK(3) = LOCIEL
+ IWORK(4) = LOCEL
+ IWORK(5) = LOCDIN
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+C Compute the Incomplete Cholesky decomposition.
+C
+ CALL DSICS(N, NELT, IA, JA, A, ISYM, NEL, IWORK(LOCIEL),
+ $ IWORK(LOCJEL), RWORK(LOCEL), RWORK(LOCDIN),
+ $ RWORK(LOCR), IERR )
+ IF( IERR.NE.0 ) THEN
+ CALL XERRWV('DSICCG: Warning...IC factorization broke down '//
+ $ 'on step i1. Diagonal was set to unity and '//
+ $ 'factorization proceeded.', 113, 1, 1, 1, IERR, 0,
+ $ 0, 0.0, 0.0 )
+ IERR = 7
+ ENDIF
+C
+C Do the Preconditioned Conjugate Gradient.
+ CALL DCG(N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSLLTI,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK(LOCR),
+ $ RWORK(LOCZ), RWORK(LOCP), RWORK(LOCDZ), RWORK(1),
+ $ IWORK(1))
+ RETURN
+C------------- LAST LINE OF DSICCG FOLLOWS ----------------------------
+ END
+*DECK ISDCG
+ FUNCTION ISDCG(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL,
+ $ TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, DZ,
+ $ RWORK, IWORK, AK, BK, BNRM, SOLNRM)
+C***BEGIN PROLOGUE ISDCG
+C***REFER TO DCG, DSDCG, DSICCG
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(ISDCG-D),
+C Linear system, Sparse, Stop Test
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned Conjugate Gradient Stop Test.
+C This routine calculates the stop test for the Conjugate
+C Gradient iteration scheme. It returns a nonzero if the
+C error estimate (the type of which is determined by ITOL)
+C is less than the user specified tolerance TOL.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+C INTEGER IERR, IUNIT, IWORK(USER DEFINED)
+C DOUBLE PRECISION B(N), X(N), A(N), TOL, ERR, R(N), Z(N)
+C DOUBLE PRECISION P(N), DZ(N), RWORK(USER DEFINED), AK, BK
+C DOUBLE PRECISION BNRM, SOLNRM
+C EXTERNAL MSOLVE
+C
+C IF( ISDCG(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, DZ, RWORK, IWORK,
+C $ AK, BK, BNRM, SOLNRM) .NE. 0 ) THEN ITERATION DONE
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :IN Double Precision X(N).
+C The current approximate solution vector.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See ``Description''
+C in the DCG, DSDCG or DSICCG routines.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for
+C Z given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine must
+C be declared external in the calling program. The calling
+C sequence to MSLOVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. RWORK is a double
+C precision
+C array that can be used to pass necessary preconditioning
+C information and/or workspace to MSOLVE. IWORK is an integer
+C work array for the same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than tol, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the ``exact''
+C solution or a very accurate approximation (one with an error
+C much less than tol) through a common block,
+C COMMON /SOLBLK/ SOLN( )
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than tol.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :IN Integer.
+C The iteration for which to check for convergence.
+C ERR :OUT Double Precision.
+C Error estimate of error in the X(N) approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Error flag. IERR is set to 3 if ITOL is not on of the
+C acceptable values, see above.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :IN Double Precision R(N).
+C The residual R = B-AX.
+C Z :WORK Double Precision Z(N).
+C Workspace used to hold the pseudo-residual M Z = R.
+C P :IN Double Precision P(N).
+C The conjugate direction vector.
+C DZ :WORK Double Precision DZ(N).
+C Workspace used to hold temporary vector(s).
+C RWORK :WORK Double Precision RWORK(USER DEFINABLE).
+C Double Precision array that can be used by MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINABLE).
+C Integer array that can be used by MSOLVE.
+C BNRM :INOUT Double Precision.
+C Norm of the right hand side. Type of norm depends on ITOL.
+C Calculated only on the first call.
+C SOLNRM :INOUT Double Precision.
+C 2-Norm of the true solution, SOLN. Only computed and used
+C if ITOL = 11.
+C
+C *Function Return Values:
+C 0 : Error estimate (determined by ITOL) is *NOT* less than the
+C specified tolerance, TOL. The iteration must continue.
+C 1 : Error estimate (determined by ITOL) is less than the
+C specified tolerance, TOL. The iteration can be considered
+C complete.
+C
+C *Precision: Double Precision
+C *See Also:
+C DCG, DSDCG, DSICCG
+C
+C *Cautions:
+C This routine will attempt to write to the fortran logical output
+C unit IUNIT, if IUNIT .ne. 0. Thus, the user must make sure that
+C this logical unit must be attached to a file or terminal
+C before calling this routine with a non-zero value for IUNIT.
+C This routine does not check for the validity of a non-zero IUNIT
+C unit number.
+C***REFERENCES (NONE)
+C***ROUTINES CALLED MSOLVE, DNRM2
+C***COMMON BLOCKS SOLBLK
+C***END PROLOGUE ISDCG
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+ INTEGER ITER, IERR, IUNIT, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N)
+ DOUBLE PRECISION Z(N), P(N), DZ(N), RWORK(*)
+ EXTERNAL MSOLVE
+ COMMON /SOLBLK/ SOLN(1)
+C
+C***FIRST EXECUTABLE STATEMENT ISDCG
+ ISDCG = 0
+C
+ IF( ITOL.EQ.1 ) THEN
+C err = ||Residual||/||RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) BNRM = DNRM2(N, B, 1)
+ ERR = DNRM2(N, R, 1)/BNRM
+ ELSE IF( ITOL.EQ.2 ) THEN
+C -1 -1
+C err = ||M Residual||/||M RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) THEN
+ CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ BNRM = DNRM2(N, DZ, 1)
+ ENDIF
+ ERR = DNRM2(N, Z, 1)/BNRM
+ ELSE IF( ITOL.EQ.11 ) THEN
+C err = ||x-TrueSolution||/||TrueSolution|| (2-Norms).
+ IF(ITER .EQ. 0) SOLNRM = DNRM2(N, SOLN, 1)
+ DO 10 I = 1, N
+ DZ(I) = X(I) - SOLN(I)
+ 10 CONTINUE
+ ERR = DNRM2(N, DZ, 1)/SOLNRM
+ ELSE
+C
+C If we get here ITOL is not one of the acceptable values.
+ ERR = 1.0E10
+ IERR = 3
+ ENDIF
+C
+ IF(IUNIT .NE. 0) THEN
+ IF( ITER.EQ.0 ) THEN
+ WRITE(IUNIT,1000) N, ITOL
+ ENDIF
+ WRITE(IUNIT,1010) ITER, ERR, AK, BK
+ ENDIF
+ IF(ERR .LE. TOL) ISDCG = 1
+ RETURN
+ 1000 FORMAT(' Preconditioned Conjugate Gradient for ',
+ $ 'N, ITOL = ',I5, I5,
+ $ /' ITER',' Error Estimate',' Alpha',
+ $ ' Beta')
+ 1010 FORMAT(1X,I4,1X,E16.7,1X,E16.7,1X,E16.7)
+C------------- LAST LINE OF ISDCG FOLLOWS ------------------------------
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/dcgn.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dcgn.f
new file mode 100644
index 0000000000..fde7dd3f1f
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dcgn.f
@@ -0,0 +1,1125 @@
+*DECK DCGN
+ SUBROUTINE DCGN(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MTTVEC,
+ $ MSOLVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P,
+ $ ATP, ATZ, DZ, ATDZ, RWORK, IWORK)
+C***BEGIN PROLOGUE DCGN
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DCGN-D),
+C Non-Symmetric Linear system solve, Sparse,
+C Iterative Precondition, Normal Equations.
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned CG Sparse Ax=b Solver for Normal Equations.
+C Routine to solve a general linear system Ax = b using the
+C Preconditioned Conjugate Gradient method applied to the
+C normal equations AA'y = b, x=A'y.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, IWORK(USER DEFINABLE)
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N)
+C DOUBLE PRECISION P(N), ATP(N), ATZ(N), DZ(N), ATDZ(N)
+C DOUBLE PRECISION RWORK(USER DEFINABLE)
+C EXTERNAL MATVEC, MTTVEC, MSOLVE
+C
+C CALL DCGN(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MTTVEC,
+C $ MSOLVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R,
+C $ Z, P, ATP, ATZ, DZ, ATDZ, RWORK, IWORK)
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "Description", below
+C for more late breaking details...
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MATVEC :EXT External.
+C Name of a routine which performs the matrix vector multiply
+C y = A*X given A and X. The name of the MATVEC routine must
+C be declared external in the calling program. The calling
+C sequence to MATVEC is:
+C CALL MATVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C Where N is the number of unknowns, Y is the product A*X
+C upon return X is an input vector, NELT is the number of
+C non-zeros in the SLAP-Column IA, JA, A storage for the matrix
+C A. ISYM is a flag which, if non-zero, denotes that A is
+C symmetric and only the lower or upper triangle is stored.
+C MTTVEC :EXT External.
+C Name of a routine which performs the matrix transpose vector
+C multiply y = A'*X given A and X (where ' denotes transpose).
+C The name of the MTTVEC routine must be declared external in
+C the calling program. The calling sequence to MTTVEC is the
+C same as that for MATVEC, viz.:
+C CALL MTTVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C Where N is the number of unknowns, Y is the product A'*X
+C upon return X is an input vector, NELT is the number of
+C non-zeros in the SLAP-Column IA, JA, A storage for the matrix
+C A. ISYM is a flag which, if non-zero, denotes that A is
+C symmetric and only the lower or upper triangle is stored.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for
+C Z given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine must
+C be declared external in the calling program. The calling
+C sequence to MSOLVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. RWORK is a
+C double precision
+C array that can be used to pass necessary preconditioning
+C information and/or workspace to MSOLVE. IWORK is an integer
+C work array for the same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*R1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :WORK Double Precision R(N).
+C Z :WORK Double Precision Z(N).
+C P :WORK Double Precision P(N).
+C ATP :WORK Double Precision ATP(N).
+C ATZ :WORK Double Precision ATZ(N).
+C DZ :WORK Double Precision DZ(N).
+C ATDZ :WORK Double Precision ATDZ(N).
+C RWORK :WORK Double Precision RWORK(USER DEFINABLE).
+C Double Precision array that can be used by MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINABLE).
+C Integer array that can be used by MSOLVE.
+C
+C *Description:
+C This routine applies the preconditioned conjugate gradient
+C (PCG) method to a non-symmetric system of equations Ax=b. To
+C do this the normal equations are solved:
+C AA' y = b, where x = A'y.
+C In PCG method the iteration count is determined by condition
+C -1
+C number of the matrix (M A). In the situation where the
+C normal equations are used to solve a non-symmetric system
+C the condition number depends on AA' and should therefore be
+C much worse than that of A. This is the conventional wisdom.
+C When one has a good preconditioner for AA' this may not hold.
+C The latter is the situation when DCGN should be tried.
+C
+C If one is trying to solve a symmetric system, SCG should be
+C used instead.
+C
+C This routine does not care what matrix data structure is
+C used for A and M. It simply calls the MATVEC and MSOLVE
+C routines, with the arguments as described above. The user
+C could write any type of structure and the appropriate MATVEC
+C and MSOLVE routines. It is assumed that A is stored in the
+C IA, JA, A arrays in some fashion and that M (or INV(M)) is
+C stored in IWORK and RWORK) in some fashion. The SLAP
+C routines SSDCGN and SSLUCN are examples of this procedure.
+C
+C Two examples of matrix data structures are the: 1) SLAP
+C Triad format and 2) SLAP Column format.
+C
+C =================== S L A P Triad format ===================
+C
+C In this format only the non-zeros are stored. They may
+C appear in *ANY* order. The user supplies three arrays of
+C length NELT, where NELT is the number of non-zeros in the
+C matrix: (IA(NELT), JA(NELT), A(NELT)). For each non-zero
+C the user puts the row and column index of that matrix
+C element in the IA and JA arrays. The value of the non-zero
+C matrix element is placed in the corresponding location of
+C the A array. This is an extremely easy data structure to
+C generate. On the other hand it is not too efficient on
+C vector computers for the iterative solution of linear
+C systems. Hence, SLAP changes this input data structure to
+C the SLAP Column format for the iteration (but does not
+C change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *See Also:
+C DSDCGN, DSLUCN, ISDCGN
+C***REFERENCES (NONE)
+C***ROUTINES CALLED MATVEC, MTTVEC, MSOLVE, ISDCGN,
+C DCOPY, DDOT, DAXPY, D1MACH
+C***END PROLOGUE DCGN
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IUNIT, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(N), R(N), Z(N), P(N)
+ DOUBLE PRECISION ATP(N), ATZ(N), DZ(N), ATDZ(N), RWORK(*)
+ EXTERNAL MATVEC, MTTVEC, MSOLVE
+C
+C Check user input.
+C***FIRST EXECUTABLE STATEMENT DCGN
+ ITER = 0
+ IERR = 0
+ IF( N.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ TOLMIN = 500.0*D1MACH(3)
+ IF( TOL.LT.TOLMIN ) THEN
+ TOL = TOLMIN
+ IERR = 4
+ ENDIF
+C Calculate initial residual and pseudo-residual, and check
+C stopping criterion.
+ CALL MATVEC(N, X, R, NELT, IA, JA, A, ISYM)
+ DO 10 I = 1, N
+ R(I) = B(I) - R(I)
+ 10 CONTINUE
+ CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ CALL MTTVEC(N, Z, ATZ, NELT, IA, JA, A, ISYM)
+C
+ IF( ISDCGN(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MTTVEC, MSOLVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, ATP, ATZ,
+ $ DZ, ATDZ, RWORK, IWORK, AK, BK, BNRM, SOLNRM) .NE. 0 )
+ $ GO TO 200
+ IF( IERR.NE.0 ) RETURN
+C
+C ***** iteration loop *****
+C
+ DO 100 K=1,ITMAX
+ ITER = K
+C
+C Calculate coefficient BK and direction vector P.
+ BKNUM = DDOT(N, Z, 1, R, 1)
+ IF( BKNUM.LE.0.0D0 ) THEN
+ IERR = 6
+ RETURN
+ ENDIF
+ IF(ITER .EQ. 1) THEN
+ CALL DCOPY(N, Z, 1, P, 1)
+ ELSE
+ BK = BKNUM/BKDEN
+ DO 20 I = 1, N
+ P(I) = Z(I) + BK*P(I)
+ 20 CONTINUE
+ ENDIF
+ BKDEN = BKNUM
+C
+C Calculate coefficient AK, new iterate X, new residual R,
+C and new pseudo-residual ATZ.
+ IF(ITER .NE. 1) CALL DAXPY(N, BK, ATP, 1, ATZ, 1)
+ CALL DCOPY(N, ATZ, 1, ATP, 1)
+ AKDEN = DDOT(N, ATP, 1, ATP, 1)
+ IF( AKDEN.LE.0.0D0 ) THEN
+ IERR = 6
+ RETURN
+ ENDIF
+ AK = BKNUM/AKDEN
+ CALL DAXPY(N, AK, ATP, 1, X, 1)
+ CALL MATVEC(N, ATP, Z, NELT, IA, JA, A, ISYM)
+ CALL DAXPY(N, -AK, Z, 1, R, 1)
+ CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ CALL MTTVEC(N, Z, ATZ, NELT, IA, JA, A, ISYM)
+C
+C check stopping criterion.
+ IF( ISDCGN(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MTTVEC,
+ $ MSOLVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R,
+ $ Z, P, ATP, ATZ, DZ, ATDZ, RWORK, IWORK, AK, BK, BNRM,
+ $ SOLNRM) .NE. 0) GOTO 200
+C
+ 100 CONTINUE
+C
+C ***** end of loop *****
+C
+C stopping criterion not satisfied.
+ ITER = ITMAX + 1
+C
+ 200 RETURN
+C------------- LAST LINE OF DCGN FOLLOWS ----------------------------
+ END
+*DECK DSDCGN
+ SUBROUTINE DSDCGN(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSDCGN
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(SSDCGN-D),
+C Non-Symmetric Linear system solve, Sparse,
+C Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Diagonally Scaled CG Sparse Ax=b Solver for Normal Eqn's.
+C Routine to solve a general linear system Ax = b using
+C diagonal scaling with the Conjugate Gradient method
+C applied to the the normal equations, viz., AA'y = b,
+C where x = A'y.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK, LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(8*N)
+C
+C CALL DSDCGN(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW)
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace.
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= 8*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Used to hold pointers into the RWORK array.
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK. LENIW >= 10.
+C
+C *Description:
+C This routine is simply a driver for the DCGN routine. It
+C calls the DSD2S routine to set up the preconditioning and
+C then calls DCGN with the appropriate MATVEC and MSOLVE
+C routines.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to be
+C the SLAP Column format. See above.
+C
+C *See Also:
+C DCGN, DSD2S, DSMV, DSMTV, DSDI
+C***REFERENCES (NONE)
+C***ROUTINES CALLED DS2Y, DCHKW, DSD2S, DCGN, DSMV, DSMTV, DSDI
+C***END PROLOGUE DSDCGN
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL
+ INTEGER ITMAX, ITER, IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
+ EXTERNAL DSMV, DSMTV, DSDI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Modify the SLAP matrix data structure to YSMP-Column.
+C***FIRST EXECUTABLE STATEMENT DSDCGN
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Set up the work arrays.
+C Compute the inverse of the diagonal of AA'. This will be
+C used as the preconditioner.
+ LOCIW = LOCIB
+C
+ LOCD = LOCRB
+ LOCR = LOCD + N
+ LOCZ = LOCR + N
+ LOCP = LOCZ + N
+ LOCATP = LOCP + N
+ LOCATZ = LOCATP + N
+ LOCDZ = LOCATZ + N
+ LOCATD = LOCDZ + N
+ LOCW = LOCATD + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSDCGN', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(4) = LOCD
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+ CALL DSD2S(N, NELT, IA, JA, A, ISYM, RWORK(1))
+C
+C Perform Conjugate Gradient algorithm on the normal equations.
+ CALL DCGN( N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSMTV, DSDI,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK(LOCR),
+ $ RWORK(LOCZ), RWORK(LOCP), RWORK(LOCATP), RWORK(LOCATZ),
+ $ RWORK(LOCDZ), RWORK(LOCATD), RWORK, IWORK )
+C
+ IF( ITER.GT.ITMAX ) IERR = 2
+ RETURN
+C------------- LAST LINE OF DSDCGN FOLLOWS ----------------------------
+ END
+*DECK DSLUCN
+ SUBROUTINE DSLUCN(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSLUCN
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(SSLUCN-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Incomplete LU Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Incomplete LU CG Sparse Ax=b Solver for Normal Equations.
+C Routine to solve a general linear system Ax = b using the
+C incomplete LU decomposition with the Conjugate Gradient
+C method applied to the normal equations, viz., AA'y = b,
+C x=A'y.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(NEL+NU+4*N+2), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(NEL+NU+8*N)
+C
+C CALL DSLUCN(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IERR = 7 => Incomplete factorization broke down
+C and was fudged. Resulting preconditioning may
+C be less than the best.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace. NEL is the number
+C of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal). NU is the number of nonzeros in the upper
+C triangle of the matrix (including the diagonal).
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= NEL+NU+8*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Integer array used for workspace. NEL is the number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal). NU is the number of nonzeros in the upper
+C triangle of the matrix (including the diagonal).
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK. LENIW >=
+C NEL+NU+4*N+12.
+C
+C *Description:
+C This routine is simply a driver for the DCGN routine. It
+C calls the DSILUS routine to set up the preconditioning and then
+C calls DCGN with the appropriate MATVEC and MSOLVE routines.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to be
+C the SLAP Column format. See above.
+C
+C *See Also:
+C DCGN, SDCGN, DSILUS
+C***REFERENCES (NONE)
+C***ROUTINES CALLED DS2Y, DSILUS, DCHKW, DSMV, DSMTV, DSMMTI, DCGN
+C***END PROLOGUE DSLUCN
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+ EXTERNAL DSMV, DSMTV, DSMMTI
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSLUCN
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Count number of Non-Zero elements preconditioner ILU matrix.
+C Then set up the work arrays.
+ NL = 0
+ NU = 0
+ DO 20 ICOL = 1, N
+C Don't count diagional.
+ JBGN = JA(ICOL)+1
+ JEND = JA(ICOL+1)-1
+ IF( JBGN.LE.JEND ) THEN
+CVD$ NOVECTOR
+ DO 10 J = JBGN, JEND
+ IF( IA(J).GT.ICOL ) THEN
+ NL = NL + 1
+ IF( ISYM.NE.0 ) NU = NU + 1
+ ELSE
+ NU = NU + 1
+ ENDIF
+ 10 CONTINUE
+ ENDIF
+ 20 CONTINUE
+C
+ LOCIL = LOCIB
+ LOCJL = LOCIL + N+1
+ LOCIU = LOCJL + NL
+ LOCJU = LOCIU + NU
+ LOCNR = LOCJU + N+1
+ LOCNC = LOCNR + N
+ LOCIW = LOCNC + N
+C
+ LOCL = LOCRB
+ LOCDIN = LOCL + NL
+ LOCU = LOCDIN + N
+ LOCR = LOCU + NU
+ LOCZ = LOCR + N
+ LOCP = LOCZ + N
+ LOCATP = LOCP + N
+ LOCATZ = LOCATP + N
+ LOCDZ = LOCATZ + N
+ LOCATD = LOCDZ + N
+ LOCW = LOCATD + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSLUCN', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(1) = LOCIL
+ IWORK(2) = LOCJL
+ IWORK(3) = LOCIU
+ IWORK(4) = LOCJU
+ IWORK(5) = LOCL
+ IWORK(6) = LOCDIN
+ IWORK(7) = LOCU
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+C Compute the Incomplete LU decomposition.
+ CALL DSILUS( N, NELT, IA, JA, A, ISYM, NL, IWORK(LOCIL),
+ $ IWORK(LOCJL), RWORK(LOCL), RWORK(LOCDIN), NU, IWORK(LOCIU),
+ $ IWORK(LOCJU), RWORK(LOCU), IWORK(LOCNR), IWORK(LOCNC) )
+C
+C Perform Conjugate Gradient algorithm on the normal equations.
+ CALL DCGN(N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSMTV, DSMMTI,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK(LOCR),
+ $ RWORK(LOCZ), RWORK(LOCP), RWORK(LOCATP), RWORK(LOCATZ),
+ $ RWORK(LOCDZ), RWORK(LOCATD), RWORK, IWORK )
+C
+ IF( ITER.GT.ITMAX ) IERR = 2
+ RETURN
+C------------- LAST LINE OF DSLUCN FOLLOWS ----------------------------
+ END
+*DECK ISDCGN
+ FUNCTION ISDCGN(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MTTVEC,
+ $ MSOLVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z,
+ $ P, ATP, ATZ, DZ, ATDZ, RWORK, IWORK, AK, BK, BNRM, SOLNRM)
+C***BEGIN PROLOGUE ISDCGN
+C***REFER TO DCGN, DSDCGN, DSLUCN
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(ISDCGN-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Normal Equations
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned CG on Normal Equations Stop Test.
+C This routine calculates the stop test for the Conjugate
+C Gradient iteration scheme applied to the normal
+C equations. It returns a nonzero if the error estimate
+C (the type of which is determined by ITOL) is less than
+C the user specified tolerance TOL.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+C INTEGER IERR, IUNIT, IWORK(USER DEFINED)
+C DOUBLE PRECISION B(N), X(N), A(N), TOL, ERR, R(N), Z(N), P(N)
+C DOUBLE PRECISION ATP(N), ATZ(N), DZ(N), ATDZ(N)
+C DOUBLE PRECISION RWORK(USER DEFINED), AK, BK, BNRM, SOLNRM
+C EXTERNAL MATVEC, MTTVEC, MSOLVE
+C
+C IF( ISTPCGN(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MTTVEC,
+C $ MSOLVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P,
+C $ ATP, ATZ, DZ, ATDZ, RWORK, IWORK, AK, BK, BNRM, SOLNRM)
+C $ .NE. 0 ) THEN ITERATION DONE
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :IN Double Precision X(N).
+C The current approximate solution vector.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "Description" in the
+C SDCGN routine.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MATVEC :EXT External.
+C Name of a routine which performs the matrix vector multiply
+C Y = A*X given A and X. The name of the MATVEC routine must
+C be declared external in the calling program. The calling
+C sequence to MATVEC is:
+C CALL MATVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C Where N is the number of unknowns, Y is the product A*X
+C upon return X is an input vector, NELT is the number of
+C non-zeros in the SLAP-Column IA, JA, A storage for the matrix
+C A. ISYM is a flag which, if non-zero, denotes that A is
+C symmetric and only the lower or upper triangle is stored.
+C MTTVEC :EXT External.
+C Name of a routine which performs the matrix transpose vector
+C multiply y = A'*X given A and X (where ' denotes transpose).
+C The name of the MTTVEC routine must be declared external in
+C the calling program. The calling sequence to MTTVEC is the
+C same as that for MATVEC, viz.:
+C CALL MTTVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C Where N is the number of unknowns, Y is the product A'*X
+C upon return X is an input vector, NELT is the number of
+C non-zeros in the SLAP-Column IA, JA, A storage for the matrix
+C A. ISYM is a flag which, if non-zero, denotes that A is
+C symmetric and only the lower or upper triangle is stored.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for
+C Z given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine must
+C be declared external in the calling program. The calling
+C sequence to MSOLVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. RWORK is a
+C double precision
+C array that can be used to pass necessary preconditioning
+C information and/or workspace to MSOLVE. IWORK is an integer
+C work array for the same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than tol, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :IN Integer.
+C The iteration for which to check for convergence.
+C ERR :OUT Double Precision.
+C Error estimate of error in the X(N) approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Error flag. IERR is set to 3 if ITOL is not on of the
+C acceptable values, see above.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :IN Double Precision R(N).
+C The residual R = B-AX.
+C Z :WORK Double Precision Z(N).
+C P :IN Double Precision P(N).
+C The conjugate direction vector.
+C ATP :IN Double Precision ATP(N).
+C A-transpose times the conjugate direction vector.
+C ATZ :IN Double Precision ATZ(N).
+C A-transpose times the pseudo-residual.
+C DZ :IN Double Precision DZ(N).
+C Workspace used to hold temporary vector(s).
+C ATDZ :WORK Double Precision ATDZ(N).
+C Workspace.
+C RWORK :WORK Double Precision RWORK(USER DEFINABLE).
+C Double Precision array that can be used by MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINABLE).
+C Integer array that can be used by MSOLVE.
+C BNRM :INOUT Double Precision.
+C Norm of the right hand side. Type of norm depends on ITOL.
+C Calculated only on the first call.
+C SOLNRM :INOUT Double Precision.
+C 2-Norm of the true solution, SOLN. Only computed and used
+C if ITOL = 11.
+C
+C *Function Return Values:
+C 0 : Error estimate (determined by ITOL) is *NOT* less than the
+C specified tolerance, TOL. The iteration must continue.
+C 1 : Error estimate (determined by ITOL) is less than the
+C specified tolerance, TOL. The iteration can be considered
+C complete.
+C
+C *Precision: Double Precision
+C *See Also:
+C SDCGN
+C
+C *Cautions:
+C This routine will attempt to write to the fortran logical output
+C unit IUNIT, if IUNIT .ne. 0. Thus, the user must make sure that
+C this logical unit must be attached to a file or terminal
+C before calling this routine with a non-zero value for IUNIT.
+C This routine does not check for the validity of a non-zero IUNIT
+C unit number.
+C***REFERENCES (NONE)
+C***ROUTINES CALLED MATVEC, MTTVEC, MSOLVE and the BLAS
+C***COMMON BLOCKS SOLBLK
+C***END PROLOGUE ISDCGN
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IUNIT, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(N), TOL, ERR, R(N), Z(N), P(N)
+ DOUBLE PRECISION ATP(N), ATZ(N), DZ(N), ATDZ(N), RWORK(*)
+ DOUBLE PRECISION AK, BK, BNRM, SOLNRM
+ EXTERNAL MATVEC, MTTVEC, MSOLVE
+ COMMON /SOLBLK/ SOLN(1)
+C
+C***FIRST EXECUTABLE STATEMENT ISDCGN
+ ISDCGN = 0
+C
+ IF( ITOL.EQ.1 ) THEN
+C err = ||Residual||/||RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) BNRM = DNRM2(N, B, 1)
+ ERR = DNRM2(N, R, 1)/BNRM
+ ELSE IF( ITOL.EQ.2 ) THEN
+C -1 -1
+C err = ||M Residual||/||M RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) THEN
+ CALL MSOLVE(N, B, DZ, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ CALL MTTVEC(N, DZ, ATDZ, NELT, IA, JA, A, ISYM)
+ BNRM = DNRM2(N, ATDZ, 1)
+ ENDIF
+ ERR = DNRM2(N, ATZ, 1)/BNRM
+ ELSE IF( ITOL.EQ.11 ) THEN
+C err = ||x-TrueSolution||/||TrueSolution|| (2-Norms).
+ IF(ITER .EQ. 0) SOLNRM = DNRM2(N, SOLN, 1)
+ DO 10 I = 1, N
+ DZ(I) = X(I) - SOLN(I)
+ 10 CONTINUE
+ ERR = DNRM2(N, DZ, 1)/SOLNRM
+ ELSE
+C
+C If we get here ITOL is not one of the acceptable values.
+ ERR = 1.0E10
+ IERR = 3
+ ENDIF
+C
+ IF( IUNIT.NE.0 ) THEN
+ IF( ITER.EQ.0 ) THEN
+ WRITE(IUNIT,1000) N, ITOL
+ ENDIF
+ WRITE(IUNIT,1010) ITER, ERR, AK, BK
+ ENDIF
+ IF( ERR.LE.TOL ) ISDCGN = 1
+C
+ RETURN
+ 1000 FORMAT(' PCG Applied to the Normal Equations for ',
+ $ 'N, ITOL = ',I5, I5,
+ $ /' ITER',' Error Estimate',' Alpha',
+ $ ' Beta')
+ 1010 FORMAT(1X,I4,1X,E16.7,1X,E16.7,1X,E16.7)
+C------------- LAST LINE OF ISDCGN FOLLOWS ----------------------------
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/dcgs.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dcgs.f
new file mode 100644
index 0000000000..10685c0b10
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dcgs.f
@@ -0,0 +1,1132 @@
+*DECK DCGS
+ SUBROUTINE DCGS(N, B, X, NELT, IA, JA, A, ISYM, MATVEC,
+ $ MSOLVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ R, R0, P, Q, U, V1, V2, RWORK, IWORK)
+C***BEGIN PROLOGUE DCGS
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DCGS-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, BiConjugate Gradient
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned BiConjugate Gradient Sparse Ax=b solver.
+C Routine to solve a Non-Symmetric linear system Ax = b
+C using the Preconditioned BiConjugate Gradient method.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, IWORK(USER DEFINABLE)
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), R0(N), P(N)
+C DOUBLE PRECISION Q(N), U(N), V1(N), V2(N), RWORK(USER DEFINABLE)
+C EXTERNAL MATVEC, MSOLVE
+C
+C CALL DCGS(N, B, X, NELT, IA, JA, A, ISYM, MATVEC,
+C $ MSOLVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+C $ R, R0, P, Q, U, V1, V2, RWORK, IWORK)
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "Description", below
+C for more late breaking details...
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MATVEC :EXT External.
+C Name of a routine which performs the matrix vector multiply
+C operation Y = A*X given A and X. The name of the MATVEC
+C routine must be declared external in the calling program.
+C The calling sequence of MATVEC is:
+C CALL MATVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C Where N is the number of unknowns, Y is the product A*X upon
+C return, X is an input vector. NELT, IA, JA, A and ISYM
+C define the SLAP matrix data structure: see Description,below.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for Z
+C given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine
+C must be declared external in the calling program. The
+C calling sequence of MSLOVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. NELT, IA, JA, A
+C and ISYM define the SLAP matrix data structure: see
+C Description, below. RWORK is a double precision array that
+C can be used
+C to pass necessary preconditioning information and/or
+C workspace to MSOLVE. IWORK is an integer work array for the
+C same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C This routine must calculate the residual from R = A*X - B.
+C This is un-natural and hence expensive for this type of iter-
+C ative method. ITOL=2 is *STRONGLY* recommended.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than tol, where M-inv time a vector is the pre-
+C conditioning step. This is the *NATURAL* stopping for this
+C iterative method and is *STRONGLY* recommended.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than tol) through a common block,
+C COMMON /SOLBLK/ SOLN( )
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than tol.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Breakdown of the method detected.
+C $(r0,r) approximately 0.0$.
+C IERR = 6 => Stagnation of the method detected.
+C $(r0,v) approximately 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :WORK Double Precision R(N).
+C R0 :WORK Double Precision R0(N).
+C P :WORK Double Precision P(N).
+C Q :WORK Double Precision Q(N).
+C U :WORK Double Precision U(N).
+C V1 :WORK Double Precision V1(N).
+C V2 :WORK Double Precision V2(N).
+C RWORK :WORK Double Precision RWORK(USER DEFINED).
+C Double Precision array that can be used for workspace in
+C MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINED).
+C Integer array that can be used for workspace in MSOLVE.
+C
+C *Description
+C This routine does not care what matrix data structure is
+C used for A and M. It simply calls the MATVEC and MSOLVE
+C routines, with the arguments as described above. The user
+C could write any type of structure and the appropriate MATVEC
+C and MSOLVE routines. It is assumed that A is stored in the
+C IA, JA, A arrays in some fashion and that M (or INV(M)) is
+C stored in IWORK and RWORK in some fashion. The SLAP
+C routines SDBCG and DSLUCS are examples of this procedure.
+C
+C Two examples of matrix data structures are the: 1) SLAP
+C Triad format and 2) SLAP Column format.
+C
+C =================== S L A P Triad format ===================
+C
+C In this format only the non-zeros are stored. They may
+C appear in *ANY* order. The user supplies three arrays of
+C length NELT, where NELT is the number of non-zeros in the
+C matrix: (IA(NELT), JA(NELT), A(NELT)). For each non-zero
+C the user puts the row and column index of that matrix
+C element in the IA and JA arrays. The value of the non-zero
+C matrix element is placed in the corresponding location of
+C the A array. This is an extremely easy data structure to
+C generate. On the other hand it is not too efficient on
+C vector computers for the iterative solution of linear
+C systems. Hence, SLAP changes this input data structure to
+C the SLAP Column format for the iteration (but does not
+C change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *See Also:
+C DSDCGS, DSLUCS
+C***REFERENCES 1. P. Sonneveld, ``CGS, a fast Lanczos-type solver
+C for nonsymmetric linear systems'', Delft University
+C of Technology Report 84-16, Department of Math-
+C ematics and Informatics, Julianalaan 132, 2628 BL
+C Delft, Phone 015-784568.
+C
+C 2. E.F. Kaasschieter, ``The solution of non-symmetric
+C linear systems by bi-conjugate gradients or conjugate
+C gradients squared,'' Delft University of Tech-
+C nology Report 86-21, Department of Mathematics and
+C Informatics, Julianalaan 132, 2628 BL Delft,
+C Phone 015-784568.
+C***ROUTINES CALLED MATVEC, MSOLVE, ISDCGS, DDOT, D1MACH
+C***END PROLOGUE DCGS
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+ INTEGER ITER, IERR, IUNIT, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), R0(N), P(N)
+ DOUBLE PRECISION Q(N), U(N), V1(N), V2(N), RWORK(*)
+ EXTERNAL MATVEC, MSOLVE
+C
+C Check some of the input data.
+C***FIRST EXECUTABLE STATEMENT DCGS
+ ITER = 0
+ IERR = 0
+ IF( N.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ TOLMIN = 500.0*D1MACH(3)
+ IF( TOL.LT.TOLMIN ) THEN
+ TOL = TOLMIN
+ IERR = 4
+ ENDIF
+C
+C Calculate initial residual and pseudo-residual, and check
+C stopping criterion.
+ CALL MATVEC(N, X, R, NELT, IA, JA, A, ISYM)
+ DO 10 I = 1, N
+ V1(I) = R(I) - B(I)
+ 10 CONTINUE
+ CALL MSOLVE(N, V1, R, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C
+ IF( ISDCGS(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, R0, P, Q,
+ $ U, V1, V2, RWORK, IWORK, AK, BK, BNRM, SOLNRM) .NE. 0 )
+ $ GO TO 200
+ IF( IERR.NE.0 ) RETURN
+C
+C Set initial values.
+C
+ FUZZ = D1MACH(3)**2
+ DO 20 I = 1, N
+ R0(I) = R(I)
+ 20 CONTINUE
+ RHONM1 = 1.0
+C
+C ***** ITERATION LOOP *****
+C
+ DO 100 K=1,ITMAX
+ ITER = K
+C
+C Calculate coefficient BK and direction vectors U, V and P.
+ RHON = DDOT(N, R0, 1, R, 1)
+ IF( ABS(RHONM1).LT.FUZZ ) GOTO 998
+ BK = RHON/RHONM1
+ IF( ITER.EQ.1 ) THEN
+ DO 30 I = 1, N
+ U(I) = R(I)
+ P(I) = R(I)
+ 30 CONTINUE
+ ELSE
+ DO 40 I = 1, N
+ U(I) = R(I) + BK*Q(I)
+ V1(I) = Q(I) + BK*P(I)
+ 40 CONTINUE
+ DO 50 I = 1, N
+ P(I) = U(I) + BK*V1(I)
+ 50 CONTINUE
+ ENDIF
+C
+C Calculate coefficient AK, new iterate X, Q
+ CALL MATVEC(N, P, V2, NELT, IA, JA, A, ISYM)
+ CALL MSOLVE(N, V2, V1, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ SIGMA = DDOT(N, R0, 1, V1, 1)
+ IF( ABS(SIGMA).LT.FUZZ ) GOTO 999
+ AK = RHON/SIGMA
+ AKM = -AK
+ DO 60 I = 1, N
+ Q(I) = U(I) + AKM*V1(I)
+ 60 CONTINUE
+ DO 70 I = 1, N
+ V1(I) = U(I) + Q(I)
+ 70 CONTINUE
+C X = X - ak*V1.
+ CALL DAXPY( N, AKM, V1, 1, X, 1 )
+C -1
+C R = R - ak*M *A*V1
+ CALL MATVEC(N, V1, V2, NELT, IA, JA, A, ISYM)
+ CALL MSOLVE(N, V2, V1, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ CALL DAXPY( N, AKM, V1, 1, R, 1 )
+C
+C check stopping criterion.
+ IF( ISDCGS(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, R0, P, Q,
+ $ U, V1, V2, RWORK, IWORK, AK, BK, BNRM, SOLNRM) .NE. 0 )
+ $ GO TO 200
+C
+C Update RHO.
+ RHONM1 = RHON
+ 100 CONTINUE
+C
+C ***** end of loop *****
+C Stopping criterion not satisfied.
+ ITER = ITMAX + 1
+ IERR = 2
+ 200 RETURN
+C
+C Breakdown of method detected.
+ 998 IERR = 5
+ RETURN
+C
+C Stagnation of method detected.
+ 999 IERR = 6
+ RETURN
+C------------- LAST LINE OF DCGS FOLLOWS ----------------------------
+ END
+*DECK DSDCGS
+ SUBROUTINE DSDCGS(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSDCGS
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(SSDCGS-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Diagonally Scaled CGS Sparse Ax=b Solver.
+C Routine to solve a linear system Ax = b using the
+C BiConjugate Gradient Squared method with diagonal scaling.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(10), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(8*N)
+C
+C CALL DSDCGS(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C This routine must calculate the residual from R = A*X - B.
+C This is un-natural and hence expensive for this type of iter-
+C ative method. ITOL=2 is *STRONGLY* recommended.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than tol, where M-inv time a vector is the pre-
+C conditioning step. This is the *NATURAL* stopping for this
+C iterative method and is *STRONGLY* recommended.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Breakdown of the method detected.
+C $(r0,r) approximately 0.0$.
+C IERR = 6 => Stagnation of the method detected.
+C $(r0,v) approximately 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace.
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK. LENW >= 8*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Used to hold pointers into the RWORK array.
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK. LENIW >= 10.
+C
+C *Description:
+C This routine performs preconditioned BiConjugate gradient
+C method on the Non-Symmetric positive definite linear system
+C Ax=b. The preconditioner is M = DIAG(A), the diagonal of the
+C matrix A. This is the simplest of preconditioners and
+C vectorizes very well.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to
+C be the SLAP Column format. See above.
+C
+C *See Also:
+C DCGS, DLUBCG
+C***REFERENCES 1. P. Sonneveld, ``CGS, a fast Lanczos-type solver
+C for nonsymmetric linear systems'', Delft University
+C of Technology Report 84-16, Department of Math-
+C ematics and Informatics, Julianalaan 132, 2628 BL
+C Delft, Phone 015-784568.
+C
+C 2. E.F. Kaasschieter, ``The solution of non-symmetric
+C linear systems by bi-conjugate gradients or conjugate
+C gradients squared,'' Delft University of Tech-
+C nology Report 86-21, Department of Mathematics and
+C Informatics, Julianalaan 132, 2628 BL Delft,
+C Phone 015-784568.
+C***ROUTINES CALLED DS2Y, DCHKW, DSDS, DCGS
+C***END PROLOGUE DSDCGS
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IERR, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(N), TOL, ERR, RWORK(LENW)
+ EXTERNAL DSMV, DSDI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSDCGS
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Set up the workspace. Compute the inverse of the
+C diagonal of the matrix.
+ LOCIW = LOCIB
+C
+ LOCDIN = LOCRB
+ LOCR = LOCDIN + N
+ LOCR0 = LOCR + N
+ LOCP = LOCR0 + N
+ LOCQ = LOCP + N
+ LOCU = LOCQ + N
+ LOCV1 = LOCU + N
+ LOCV2 = LOCV1 + N
+ LOCW = LOCV2 + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSDCGS', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(4) = LOCDIN
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+ CALL DSDS(N, NELT, IA, JA, A, ISYM, RWORK(LOCDIN))
+C
+C Perform the Diagonally Scaled
+C BiConjugate Gradient Squared algorithm.
+ CALL DCGS(N, B, X, NELT, IA, JA, A, ISYM, DSMV,
+ $ DSDI, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK(LOCR), RWORK(LOCR0), RWORK(LOCP),
+ $ RWORK(LOCQ), RWORK(LOCU), RWORK(LOCV1),
+ $ RWORK(LOCV2), RWORK(1), IWORK(1))
+ RETURN
+C------------- LAST LINE OF DSDCGS FOLLOWS ----------------------------
+ END
+*DECK DSLUCS
+ SUBROUTINE DSLUCS(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSLUCS
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(SSLUCS-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative incomplete LU Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Incomplete LU BiConjugate Gradient Sparse Ax=b solver.
+C Routine to solve a linear system Ax = b using the
+C BiConjugate Gradient method with Incomplete LU
+C decomposition preconditioning.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(NEL+NU+4*N+2), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(NEL+NU+8*N)
+C
+C CALL DSLUCS(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW)
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C This routine must calculate the residual from R = A*X - B.
+C This is un-natural and hence expensive for this type of iter-
+C ative method. ITOL=2 is *STRONGLY* recommended.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than tol, where M-inv time a vector is the pre-
+C conditioning step. This is the *NATURAL* stopping for this
+C iterative method and is *STRONGLY* recommended.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Breakdown of the method detected.
+C $(r0,r) approximately 0.0$.
+C IERR = 6 => Stagnation of the method detected.
+C $(r0,v) approximately 0.0$.
+C IERR = 7 => Incomplete factorization broke down
+C and was fudged. Resulting preconditioning may
+C be less than the best.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace. NEL is the
+C number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal). NU is the number of nonzeros in the upper
+C triangle of the matrix (including the diagonal).
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= NEL+NU+8*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Integer array used for workspace. NEL is the number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal). NU is the number of nonzeros in the upper
+C triangle of the matrix (including the diagonal).
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK.
+C LENIW >= NEL+NU+4*N+12.
+C
+C *Description:
+C This routine is simply a driver for the DCGSN routine. It
+C calls the DSILUS routine to set up the preconditioning and
+C then calls DCGSN with the appropriate MATVEC, MTTVEC and
+C MSOLVE, MTSOLV routines.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to
+C be the SLAP Column format. See above.
+C
+C *See Also:
+C DCGS, DSDCGS
+C***REFERENCES 1. P. Sonneveld, ``CGS, a fast Lanczos-type solver
+C for nonsymmetric linear systems'', Delft University
+C of Technology Report 84-16, Department of Math-
+C ematics and Informatics, Julianalaan 132, 2628 BL
+C Delft, Phone 015-784568.
+C
+C 2. E.F. Kaasschieter, ``The solution of non-symmetric
+C linear systems by bi-conjugate gradients or conjugate
+C gradients squared,'' Delft University of Tech-
+C nology Report 86-21, Department of Mathematics and
+C Informatics, Julianalaan 132, 2628 BL Delft,
+C Phone 015-784568.
+C***ROUTINES CALLED DS2Y, DCHKW, DSILUS, DCGS, DSMV, DSLUI
+C***END PROLOGUE DSLUCS
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
+ EXTERNAL DSMV, DSLUI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSLUCS
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Count number of Non-Zero elements preconditioner ILU matrix.
+C Then set up the work arrays.
+ NL = 0
+ NU = 0
+ DO 20 ICOL = 1, N
+C Don't count diagonal.
+ JBGN = JA(ICOL)+1
+ JEND = JA(ICOL+1)-1
+ IF( JBGN.LE.JEND ) THEN
+CVD$ NOVECTOR
+ DO 10 J = JBGN, JEND
+ IF( IA(J).GT.ICOL ) THEN
+ NL = NL + 1
+ IF( ISYM.NE.0 ) NU = NU + 1
+ ELSE
+ NU = NU + 1
+ ENDIF
+ 10 CONTINUE
+ ENDIF
+ 20 CONTINUE
+C
+ LOCIL = LOCIB
+ LOCJL = LOCIL + N+1
+ LOCIU = LOCJL + NL
+ LOCJU = LOCIU + NU
+ LOCNR = LOCJU + N+1
+ LOCNC = LOCNR + N
+ LOCIW = LOCNC + N
+C
+ LOCL = LOCRB
+ LOCDIN = LOCL + NL
+ LOCUU = LOCDIN + N
+ LOCR = LOCUU + NU
+ LOCR0 = LOCR + N
+ LOCP = LOCR0 + N
+ LOCQ = LOCP + N
+ LOCU = LOCQ + N
+ LOCV1 = LOCU + N
+ LOCV2 = LOCV1 + N
+ LOCW = LOCV2 + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSLUCS', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(1) = LOCIL
+ IWORK(2) = LOCJL
+ IWORK(3) = LOCIU
+ IWORK(4) = LOCJU
+ IWORK(5) = LOCL
+ IWORK(6) = LOCDIN
+ IWORK(7) = LOCUU
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+C Compute the Incomplete LU decomposition.
+ CALL DSILUS( N, NELT, IA, JA, A, ISYM, NL, IWORK(LOCIL),
+ $ IWORK(LOCJL), RWORK(LOCL), RWORK(LOCDIN), NU, IWORK(LOCIU),
+ $ IWORK(LOCJU), RWORK(LOCUU), IWORK(LOCNR), IWORK(LOCNC) )
+C
+C Perform the incomplete LU preconditioned
+C BiConjugate Gradient Squared algorithm.
+ CALL DCGS(N, B, X, NELT, IA, JA, A, ISYM, DSMV,
+ $ DSLUI, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK(LOCR), RWORK(LOCR0), RWORK(LOCP),
+ $ RWORK(LOCQ), RWORK(LOCU), RWORK(LOCV1),
+ $ RWORK(LOCV2), RWORK, IWORK )
+ RETURN
+C------------- LAST LINE OF DSLUCS FOLLOWS ----------------------------
+ END
+*DECK ISDCGS
+ FUNCTION ISDCGS(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, R0, P, Q, U,
+ $ V1, V2, RWORK, IWORK, AK, BK, BNRM, SOLNRM)
+C***BEGIN PROLOGUE ISDCGS
+C***REFER TO DCGS, DSDCGS, DSLUCS
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(ISDCGS-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Stop Test
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned BiConjugate Gradient Stop Test.
+C This routine calculates the stop test for the BiConjugate
+C Gradient iteration scheme. It returns a nonzero if the
+C error estimate (the type of which is determined by ITOL)
+C is less than the user specified tolerance TOL.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+C INTEGER IERR, IUNIT, IWORK(USER DEFINED)
+C DOUBLE PRECISION B(N), X(N), A(N), TOL, ERR, R(N), R0(N), P(N)
+C DOUBLE PRECISION Q(N), U(N), V1(N), V2(N)
+C DOUBLE PRECISION RWORK(USER DEFINED), AK, BK, BNRM, SOLNRM
+C EXTERNAL MATVEC, MSOLVE
+C
+C IF( ISDCGS(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE, ITOL,
+C $ TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, R0, P, Q, U, V1,
+C $ V2, RWORK, IWORK, AK, BK, BNRM, SOLNRM) .NE. 0 )
+C $ THEN ITERATION DONE
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "LONG DESCRIPTION", in
+C the SLAP routine DCGS for more late breaking details...
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MATVEC :EXT External.
+C Name of a routine which performs the matrix vector multiply
+C operation Y = A*X given A and X. The name of the MATVEC
+C routine must be declared external in the calling program.
+C The calling sequence of MATVEC is:
+C CALL MATVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C Where N is the number of unknowns, Y is the product A*X upon
+C return, X is an input vector. NELT, IA, JA, A and ISYM
+C define the SLAP matrix data structure: see LONG DESCRIPTION,
+C below.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for Z
+C given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine
+C must be declared external in the calling program. The
+C calling sequence of MSLOVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. NELT, IA, JA, A
+C and ISYM define the SLAP matrix data structure: see LONG
+C DESCRIPTION, below. RWORK is a double precision array that
+C can be used
+C to pass necessary preconditioning information and/or
+C workspace to MSOLVE. IWORK is an integer work array for the
+C same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C This routine must calculate the residual from R = A*X - B.
+C This is un-natural and hence expensive for this type of iter-
+C ative method. ITOL=2 is *STRONGLY* recommended.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than tol, where M-inv time a vector is the pre-
+C conditioning step. This is the *NATURAL* stopping for this
+C iterative method and is *STRONGLY* recommended.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Error flag. IERR is set to 3 if ITOL is not on of the
+C acceptable values, see above.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :IN Double Precision R(N).
+C The residual r = b - Ax.
+C R0 :WORK Double Precision R0(N).
+C P :DUMMY Double Precision P(N).
+C Q :DUMMY Double Precision Q(N).
+C U :DUMMY Double Precision U(N).
+C V1 :DUMMY Double Precision V1(N).
+C V2 :WORK Double Precision V2(N).
+C If ITOL.eq.1 then V2 is used to hold A * X - B on every call.
+C If ITOL.eq.2 then V2 is used to hold M-inv * B on the first
+C call.
+C If ITOL.eq.11 then V2 is used to X - SOLN.
+C RWORK :WORK Double Precision RWORK(USER DEFINED).
+C Double Precision array that can be used for workspace in
+C MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINED).
+C Integer array that can be used for workspace in MSOLVE.
+C AK :IN Double Precision.
+C Current iterate BiConjugate Gradient iteration parameter.
+C BK :IN Double Precision.
+C Current iterate BiConjugate Gradient iteration parameter.
+C BNRM :INOUT Double Precision.
+C Norm of the right hand side. Type of norm depends on ITOL.
+C Calculated only on the first call.
+C SOLNRM :INOUT Double Precision.
+C 2-Norm of the true solution, SOLN. Only computed and used
+C if ITOL = 11.
+C
+C *Function Return Values:
+C 0 : Error estimate (determined by ITOL) is *NOT* less than the
+C specified tolerance, TOL. The iteration must continue.
+C 1 : Error estimate (determined by ITOL) is less than the
+C specified tolerance, TOL. The iteration can be considered
+C complete.
+C
+C *Precision: Double Precision
+C***REFERENCES (NONE)
+C***ROUTINES CALLED MATVEC, MSOLVE, DNRM2
+C***COMMON BLOCKS SOLBLK
+C***END PROLOGUE ISDCGS
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+ INTEGER ITER, IERR, IUNIT, IWORK(1)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), R0(N), P(N)
+ DOUBLE PRECISION Q(N), U(N), V1(N), V2(N), RWORK(1)
+ DOUBLE PRECISION AK, BK, BNRM, SOLNRM
+ COMMON /SOLBLK/ SOLN(1)
+ EXTERNAL MATVEC, MSOLVE
+C
+C***FIRST EXECUTABLE STATEMENT ISDCGS
+ ISDCGS = 0
+C
+ IF( ITOL.EQ.1 ) THEN
+C err = ||Residual||/||RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) BNRM = DNRM2(N, B, 1)
+ CALL MATVEC(N, X, V2, NELT, IA, JA, A, ISYM )
+ DO 5 I = 1, N
+ V2(I) = V2(I) - B(I)
+ 5 CONTINUE
+ ERR = DNRM2(N, V2, 1)/BNRM
+ ELSE IF( ITOL.EQ.2 ) THEN
+C -1 -1
+C err = ||M Residual||/||M RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) THEN
+ CALL MSOLVE(N, B, V2, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ BNRM = DNRM2(N, V2, 1)
+ ENDIF
+ ERR = DNRM2(N, R, 1)/BNRM
+ ELSE IF( ITOL.EQ.11 ) THEN
+C err = ||x-TrueSolution||/||TrueSolution|| (2-Norms).
+ IF(ITER .EQ. 0) SOLNRM = DNRM2(N, SOLN, 1)
+ DO 10 I = 1, N
+ V2(I) = X(I) - SOLN(I)
+ 10 CONTINUE
+ ERR = DNRM2(N, V2, 1)/SOLNRM
+ ELSE
+C
+C If we get here ITOL is not one of the acceptable values.
+ ERR = 1.0E10
+ IERR = 3
+ ENDIF
+C
+C Print the error and Coeficients AK, BK on each step,
+C if desired.
+ IF(IUNIT .NE. 0) THEN
+ IF( ITER.EQ.0 ) THEN
+ WRITE(IUNIT,1000) N, ITOL
+ ENDIF
+ WRITE(IUNIT,1010) ITER, ERR, AK, BK
+ ENDIF
+ IF(ERR .LE. TOL) ISDCGS = 1
+C
+ RETURN
+ 1000 FORMAT(' Preconditioned BiConjugate Gradient Squared for ',
+ $ 'N, ITOL = ',I5, I5,
+ $ /' ITER',' Error Estimate',' Alpha',
+ $ ' Beta')
+ 1010 FORMAT(1X,I4,1X,E16.7,1X,E16.7,1X,E16.7)
+C------------- LAST LINE OF ISDCGS FOLLOWS ----------------------------
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/dgmres.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dgmres.f
new file mode 100644
index 0000000000..b159b60d49
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dgmres.f
@@ -0,0 +1,2671 @@
+*DECK DGMRES
+ SUBROUTINE DGMRES(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, SB, SX,
+ $ RGWK, LRGW, IGWK, LIGW, RWORK, IWORK )
+C***BEGIN PROLOGUE DGMRES
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DGMRES-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Generalized Minimum Residual
+C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
+C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
+C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C***PURPOSE Preconditioned GMRES iterative sparse Ax=b solver.
+C This routine uses the generalized minimum residual
+C (GMRES) method with preconditioning to solve
+C non-symmetric linear systems of the form: A*x = b.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER IERR, IUNIT, LRGW, LIGW, IGWK(LIGW)
+C INTEGER IWORK(USER DEFINED)
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, SB(N), SX(N)
+C DOUBLE PRECISION RGWK(LRGW), RWORK(USER DEFINED)
+C EXTERNAL MATVEC, MSOLVE
+C
+C CALL DGMRES(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
+C $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, SB, SX,
+C $ RGWK, LRGW, IGWK, LIGW, RWORK, IWORK)
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for the solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "Description", below
+C for more late breaking details...
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MATVEC :EXT External.
+C Name of a routine which performs the matrix vector multiply
+C Y = A*X given A and X. The name of the MATVEC routine must
+C be declared external in the calling program. The calling
+C sequence to MATVEC is:
+C CALL MATVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C where N is the number of unknowns, Y is the product A*X
+C upon return, X is an input vector, and NELT is the number of
+C non-zeros in the SLAP IA, JA, A storage for the matrix A.
+C ISYM is a flag which, if non-zero, denotes that A is
+C symmetric and only the lower or upper triangle is stored.
+C MSOLVE :EXT External.
+C Name of the routine which solves a linear system Mz = r for
+C z given r with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays. The name of the MSOLVE routine must
+C be declared external in the calling program. The calling
+C sequence to MSLOVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and z is the solution upon return. RWORK is a
+C double precision
+C array that can be used to pass necessary preconditioning
+C information and/or workspace to MSOLVE. IWORK is an integer
+C work array for the same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate the type of convergence criterion used.
+C ITOL=0 Means the iteration stops when the test described
+C below on the residual RL is satisfied. This is
+C the "Natural Stopping Criteria" for this routine.
+C Other values of ITOL cause extra, otherwise
+C unnecessary, computation per iteration and are
+C therefore much less efficient. See ISDGMR (the
+C stop test routine) for more information.
+C ITOL=1 Means the iteration stops when the first test
+C described below on the residual RL is satisfied,
+C and there is either right or no preconditioning
+C being used.
+C ITOL=2 Implies that the user is using left
+C preconditioning, and the second stopping criterion
+C below is used.
+C ITOL=3 Means the iteration stops when the third test
+C described below on Minv*Residual is satisfied, and
+C there is either left or no preconditioning begin
+C used.
+C ITOL=11 is often useful for checking and comparing
+C different routines. For this case, the user must
+C supply the "exact" solution or a very accurate
+C approximation (one with an error much less than
+C TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the
+C difference between the iterative approximation and
+C the user-supplied solution divided by the 2-norm
+C of the user-supplied solution is less than TOL.
+C Note that this requires the user to set up the
+C "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling
+C routine. The routine with this declaration should
+C be loaded before the stop test so that the correct
+C length is used by the loader. This procedure is
+C not standard Fortran and may not work correctly on
+C your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11
+C then this common block is indeed standard Fortran.
+C TOL :INOUT Double Precision.
+C Convergence criterion, as described below. If TOL is set
+C to zero on input, then a default value of 500*(the smallest
+C positive magnitude, machine epsilon) is used.
+C ITMAX :DUMMY Integer.
+C Maximum number of iterations in most SLAP routines. In
+C this routine this does not make sense. The maximum number
+C of iterations here is given by ITMAX = MAXL*(NRMAX+1).
+C See IGWK for definitions of MAXL and NRMAX.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL. Letting norm() denote the Euclidean
+C norm, ERR is defined as follows..
+C
+C If ITOL=0, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
+C for right or no preconditioning, and
+C ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
+C norm(SB*(M-inverse)*B),
+C for left preconditioning.
+C If ITOL=1, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
+C since right or no preconditioning
+C being used.
+C If ITOL=2, then ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
+C norm(SB*(M-inverse)*B),
+C since left preconditioning is being
+C used.
+C If ITOL=3, then ERR = Max |(Minv*(B-A*X(L)))(i)/x(i)|
+C i=1,n
+C If ITOL=11, then ERR = norm(SB*(X(L)-SOLN))/norm(SB*SOLN).
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated for
+C RGWK or IGWK.
+C IERR = 2 => Routine Dgmres failed to reduce the norm
+C of the current residual on its last call,
+C and so the iteration has stalled. In
+C this case, X equals the last computed
+C approximation. The user must either
+C increase MAXL, or choose a different
+C initial guess.
+C IERR =-1 => Insufficient length for RGWK array.
+C IGWK(6) contains the required minimum
+C length of the RGWK array.
+C IERR =-2 => Inconsistent ITOL and JPRE values.
+C For IERR <= 2, RGWK(1) = RHOL, which is the norm on the
+C left-hand-side of the relevant stopping test defined
+C below associated with the residual for the current
+C approximation X(L).
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C SB :IN Double Precision SB(N).
+C Array of length N containing scale factors for the right
+C hand side vector B. If JSCAL.eq.0 (see below), SB need
+C not be supplied.
+C SX :IN Double Precision SX(N).
+C Array of length N containing scale factors for the solution
+C vector X. If JSCAL.eq.0 (see below), SX need not be
+C supplied. SB and SX can be the same array in the calling
+C program if desired.
+C RGWK :INOUT Double Precision RGWK(LRGW).
+C Double Precision array of size at least
+C 1 + N*(MAXL+6) + MAXL*(MAXL+3)
+C used for work space by DGMRES. See below for definition of
+C MAXL.
+C On return, RGWK(1) = RHOL. See IERR for definition of RHOL.
+C LRGW :IN Integer.
+C Length of the double precision workspace, RGWK.
+C LRGW > 1 + N*(MAXL+6) + MAXL*(MAXL+3).
+C For the default values, RGWK has size at least 131 + 16*N.
+C IGWK :INOUT Integer IGWK(LIGW).
+C The following IGWK parameters should be set by the user
+C before calling this routine.
+C IGWK(1) = MAXL. Maximum dimension of Krylov subspace in
+C which X - X0 is to be found (where, X0 is the initial
+C guess). The default value of MAXL is 10.
+C IGWK(2) = KMP. Maximum number of previous Krylov basis
+C vectors to which each new basis vector is made orthogonal.
+C The default value of KMP is MAXL.
+C IGWK(3) = JSCAL. Flag indicating whether the scaling
+C arrays SB and SX are to be used.
+C JSCAL = 0 => SB and SX are not used and the algorithm
+C will perform as if all SB(I) = 1 and SX(I) = 1.
+C JSCAL = 1 => Only SX is used, and the algorithm
+C performs as if all SB(I) = 1.
+C JSCAL = 2 => Only SB is used, and the algorithm
+C performs as if all SX(I) = 1.
+C JSCAL = 3 => Both SB and SX are used.
+C IGWK(4) = JPRE. Flag indicating whether preconditioning
+C is being used.
+C JPRE = 0 => There is no preconditioning.
+C JPRE > 0 => There is preconditioning on the right
+C only, and the solver will call routine MSOLVE.
+C JPRE < 0 => There is preconditioning on the left
+C only, and the solver will call routine MSOLVE.
+C IGWK(5) = NRMAX. Maximum number of restarts of the
+C Krylov iteration. The default value of NRMAX = 10.
+C if IWORK(5) = -1, then no restarts are performed (in
+C this case, NRMAX is set to zero internally).
+C The following IWORK parameters are diagnostic information
+C made available to the user after this routine completes.
+C IGWK(6) = MLWK. Required minimum length of RGWK array.
+C IGWK(7) = NMS. The total number of calls to MSOLVE.
+C LIGW :IN Integer.
+C Length of the integer workspace, IGWK. LIGW >= 20.
+C
+C *Description:
+C DGMRES solves a linear system A*X = B rewritten in the form:
+C
+C (SB*A*(M-inverse)*(SX-inverse))*(SX*M*X) = SB*B,
+C
+C with right preconditioning, or
+C
+C (SB*(M-inverse)*A*(SX-inverse))*(SX*X) = SB*(M-inverse)*B,
+C
+C with left preconditioning, where A is an N-by-N double
+C precision matrix,
+C X and B are N-vectors, SB and SX are diagonal scaling
+C matrices, and M is a preconditioning matrix. It uses
+C preconditioned Krylov subpace methods based on the
+C generalized minimum residual method (GMRES). This routine
+C optionally performs either the full orthogonalization
+C version of the GMRES algorithm or an incomplete variant of
+C it. Both versions use restarting of the linear iteration by
+C default, although the user can disable this feature.
+C
+C The GMRES algorithm generates a sequence of approximations
+C X(L) to the true solution of the above linear system. The
+C convergence criteria for stopping the iteration is based on
+C the size of the scaled norm of the residual R(L) = B -
+C A*X(L). The actual stopping test is either:
+C
+C norm(SB*(B-A*X(L))) .le. TOL*norm(SB*B),
+C
+C for right preconditioning, or
+C
+C norm(SB*(M-inverse)*(B-A*X(L))) .le.
+C TOL*norm(SB*(M-inverse)*B),
+C
+C for left preconditioning, where norm() denotes the euclidean
+C norm, and TOL is a positive scalar less than one input by
+C the user. If TOL equals zero when DGMRES is called, then a
+C default value of 500*(the smallest positive magnitude,
+C machine epsilon) is used. If the scaling arrays SB and SX
+C are used, then ideally they should be chosen so that the
+C vectors SX*X(or SX*M*X) and SB*B have all their components
+C approximately equal to one in magnitude. If one wants to
+C use the same scaling in X and B, then SB and SX can be the
+C same array in the calling program.
+C
+C The following is a list of the other routines and their
+C functions used by DGMRES:
+C DPIGMR Contains the main iteration loop for GMRES.
+C DORTH Orthogonalizes a new vector against older basis vects.
+C DHEQR Computes a QR decomposition of a Hessenberg matrix.
+C DHELS Solves a Hessenberg least-squares system, using QR
+C factors.
+C DRLCAL Computes the scaled residual RL.
+C DXLCAL Computes the solution XL.
+C ISDGMR User-replaceable stopping routine.
+C
+C This routine does not care what matrix data structure is
+C used for A and M. It simply calls the MATVEC and MSOLVE
+C routines, with the arguments as described above. The user
+C could write any type of structure and the appropriate MATVEC
+C and MSOLVE routines. It is assumed that A is stored in the
+C IA, JA, A arrays in some fashion and that M (or INV(M)) is
+C stored in IWORK and RWORK in some fashion. The SLAP
+C routines DSDCG and DSICCG are examples of this procedure.
+C
+C Two examples of matrix data structures are the: 1) SLAP
+C Triad format and 2) SLAP Column format.
+C
+C =================== S L A P Triad format ===================
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C***REFERENCES 1. Peter N. Brown and A. C. Hindmarsh,
+C "Reduced Storage Matrix Methods In Stiff ODE
+C Systems," LLNL report UCRL-95088, Rev. 1,
+C June 1987.
+C***ROUTINES CALLED DPIGMR, DORTH, DHEQR, DHELS, DRCAL, DXLCAL,
+C ISDGMR, DNRM2, DDOT, DAXPY, DSCAL, IDAMAX, D1MACH.
+C***END PROLOGUE DGMRES
+C The following is for optimized compilation on LLNL/LTSS Crays.
+CLLL. OPTIMIZE
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IERR, IUNIT, LRGW, LIGW, IGWK(LIGW)
+ INTEGER IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, SB(N), SX(N)
+ DOUBLE PRECISION RGWK(LRGW), RWORK(*)
+ EXTERNAL MATVEC, MSOLVE, D1MACH
+ INTEGER JPRE, KMP, MAXL, NMS, MAXLP1, NMSL, NRSTS, NRMAX
+ INTEGER I, IFLAG, LR, LDL, LHES, LGMR, LQ, LV, LW
+ DOUBLE PRECISION BNRM, RHOL, SUM
+C
+C***FIRST EXECUTABLE STATEMENT DGMRES
+ IERR = 0
+C ------------------------------------------------------------------
+C Load method parameters with user values or defaults.
+C ------------------------------------------------------------------
+ MAXL = IGWK(1)
+ IF (MAXL .EQ. 0) MAXL = 10
+ IF (MAXL .GT. N) MAXL = N
+ KMP = IGWK(2)
+ IF (KMP .EQ. 0) KMP = MAXL
+ IF (KMP .GT. MAXL) KMP = MAXL
+ JSCAL = IGWK(3)
+ JPRE = IGWK(4)
+C Check for consistent values of ITOL and JPRE.
+ IF( ITOL.EQ.1 .AND. JPRE.LT.0 ) GOTO 650
+ IF( ITOL.EQ.2 .AND. JPRE.GE.0 ) GOTO 650
+ NRMAX = IGWK(5)
+ IF( NRMAX.EQ.0 ) NRMAX = 10
+C If NRMAX .eq. -1, then set NRMAX = 0 to turn off restarting.
+ IF( NRMAX.EQ.-1 ) NRMAX = 0
+C If input value of TOL is zero, set it to its default value.
+ IF( TOL.EQ.0.0D0 ) TOL = 500.0*D1MACH(3)
+C
+C Initialize counters.
+ ITER = 0
+ NMS = 0
+ NRSTS = 0
+C ------------------------------------------------------------------
+C Form work array segment pointers.
+C ------------------------------------------------------------------
+ MAXLP1 = MAXL + 1
+ LV = 1
+ LR = LV + N*MAXLP1
+ LHES = LR + N + 1
+ LQ = LHES + MAXL*MAXLP1
+ LDL = LQ + 2*MAXL
+ LW = LDL + N
+ LXL = LW + N
+ LZ = LXL + N
+C
+C Load igwk(6) with required minimum length of the rgwk array.
+ IGWK(6) = LZ + N - 1
+ IF( LZ+N-1.GT.LRGW ) GOTO 640
+C ------------------------------------------------------------------
+C Calculate scaled-preconditioned norm of RHS vector b.
+C ------------------------------------------------------------------
+ IF (JPRE .LT. 0) THEN
+ CALL MSOLVE(N, B, RGWK(LR), NELT, IA, JA, A, ISYM,
+ $ RWORK, IWORK)
+ NMS = NMS + 1
+ ELSE
+ CALL DCOPY(N, B, 1, RGWK(LR), 1)
+ ENDIF
+ IF( JSCAL.EQ.2 .OR. JSCAL.EQ.3 ) THEN
+ SUM = 0.D0
+ DO 10 I = 1,N
+ SUM = SUM + (RGWK(LR-1+I)*SB(I))**2
+ 10 CONTINUE
+ BNRM = DSQRT(SUM)
+ ELSE
+ BNRM = DNRM2(N,RGWK(LR),1)
+ ENDIF
+C ------------------------------------------------------------------
+C Calculate initial residual.
+C ------------------------------------------------------------------
+ CALL MATVEC(N, X, RGWK(LR), NELT, IA, JA, A, ISYM)
+ DO 50 I = 1,N
+ RGWK(LR-1+I) = B(I) - RGWK(LR-1+I)
+ 50 CONTINUE
+C ------------------------------------------------------------------
+C If performing restarting, then load the residual into the
+C correct location in the Rgwk array.
+C ------------------------------------------------------------------
+ 100 CONTINUE
+ IF( NRSTS.GT.NRMAX ) GOTO 610
+ IF( NRSTS.GT.0 ) THEN
+C Copy the curr residual to different loc in the Rgwk array.
+ CALL DCOPY(N, RGWK(LDL), 1, RGWK(LR), 1)
+ ENDIF
+C ------------------------------------------------------------------
+C Use the DPIGMR algorithm to solve the linear system A*Z = R.
+C ------------------------------------------------------------------
+ CALL DPIGMR(N, RGWK(LR), SB, SX, JSCAL, MAXL, MAXLP1, KMP,
+ $ NRSTS, JPRE, MATVEC, MSOLVE, NMSL, RGWK(LZ), RGWK(LV),
+ $ RGWK(LHES), RGWK(LQ), LGMR, RWORK, IWORK, RGWK(LW),
+ $ RGWK(LDL), RHOL, NRMAX, B, BNRM, X, RGWK(LXL), ITOL,
+ $ TOL, NELT, IA, JA, A, ISYM, IUNIT, IFLAG, ERR)
+ ITER = ITER + LGMR
+ NMS = NMS + NMSL
+C
+C Increment X by the current approximate solution Z of A*Z = R.
+C
+ LZM1 = LZ - 1
+ DO 110 I = 1,N
+ X(I) = X(I) + RGWK(LZM1+I)
+ 110 CONTINUE
+ IF( IFLAG.EQ.0 ) GOTO 600
+ IF( IFLAG.EQ.1 ) THEN
+ NRSTS = NRSTS + 1
+ GOTO 100
+ ENDIF
+ IF( IFLAG.EQ.2 ) GOTO 620
+C ------------------------------------------------------------------
+C All returns are made through this section.
+C ------------------------------------------------------------------
+C The iteration has converged.
+C
+ 600 CONTINUE
+ IGWK(7) = NMS
+ RGWK(1) = RHOL
+ IERR = 0
+ RETURN
+C
+C Max number((NRMAX+1)*MAXL) of linear iterations performed.
+ 610 CONTINUE
+ IGWK(7) = NMS
+ RGWK(1) = RHOL
+ IERR = 1
+ RETURN
+C
+C GMRES failed to reduce last residual in MAXL iterations.
+C The iteration has stalled.
+ 620 CONTINUE
+ IGWK(7) = NMS
+ RGWK(1) = RHOL
+ IERR = 2
+ RETURN
+C Error return. Insufficient length for Rgwk array.
+ 640 CONTINUE
+ ERR = TOL
+ IERR = -1
+ RETURN
+C Error return. Inconsistent ITOL and JPRE values.
+ 650 CONTINUE
+ ERR = TOL
+ IERR = -2
+ RETURN
+C------------- LAST LINE OF DGMRES FOLLOWS ----------------------------
+ END
+*DECK DSDGMR
+ SUBROUTINE DSDGMR(N, B, X, NELT, IA, JA, A, ISYM, NSAVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW,
+ $ IWORK, LENIW )
+C***BEGIN PROLOGUE DSDGMR
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSDGMR-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Generalized Minimum Residual
+C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
+C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
+C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C***PURPOSE Diagonally scaled GMRES iterative sparse Ax=b solver.
+C This routine uses the generalized minimum residual
+C (GMRES) method with diagonal scaling to solve possibly
+C non-symmetric linear systems of the form: A*x = b.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE
+C INTEGER ITOL, ITMAX, IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR
+C DOUBLE PRECISION RWORK(LENW)
+C EXTERNAL MATVEC, MSOLVE
+C
+C CALL DSDGMR(N, B, X, NELT, IA, JA, A, ISYM, NSAVE,
+C $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+C $ RWORK, LENW, IWORK, LENIW)
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C NSAVE :IN Integer.
+C Number of direction vectors to save and orthogonalize against.
+C Must be greater than 1.
+C ITOL :IN Integer.
+C Flag to indicate the type of convergence criterion used.
+C ITOL=0 Means the iteration stops when the test described
+C below on the residual RL is satisfied. This is
+C the "Natural Stopping Criteria" for this routine.
+C Other values of ITOL cause extra, otherwise
+C unnecessary, computation per iteration and are
+C therefore much less efficient. See ISDGMR (the
+C stop test routine) for more information.
+C ITOL=1 Means the iteration stops when the first test
+C described below on the residual RL is satisfied,
+C and there is either right or no preconditioning
+C being used.
+C ITOL=2 Implies that the user is using left
+C preconditioning, and the second stopping criterion
+C below is used.
+C ITOL=3 Means the iteration stops when the third test
+C described below on Minv*Residual is satisfied, and
+C there is either left or no preconditioning begin
+C used.
+C ITOL=11 is often useful for checking and comparing
+C different routines. For this case, the user must
+C supply the "exact" solution or a very accurate
+C approximation (one with an error much less than
+C TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the
+C difference between the iterative approximation and
+C the user-supplied solution divided by the 2-norm
+C of the user-supplied solution is less than TOL.
+C Note that this requires the user to set up the
+C "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling
+C routine. The routine with this declaration should
+C be loaded before the stop test so that the correct
+C length is used by the loader. This procedure is
+C not standard Fortran and may not work correctly on
+C your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11
+C then this common block is indeed standard Fortran.
+C TOL :INOUT Double Precision.
+C Convergence criterion, as described below. If TOL is set
+C to zero on input, then a default value of 500*(the smallest
+C positive magnitude, machine epsilon) is used.
+C ITMAX :IN Integer.
+C Maximum number of iterations. This routine uses the default
+C of NRMAX = ITMAX/NSAVE to determine the when each restart
+C oshould ccur. See the description of NRMAX and MAXL in
+C DGMRES for a full and frightfully interesting discussion of
+C this topic.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL. Letting norm() denote the Euclidean
+C norm, ERR is defined as follows...
+C If ITOL=0, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
+C for right or no preconditioning, and
+C ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
+C norm(SB*(M-inverse)*B),
+C for left preconditioning.
+C If ITOL=1, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
+C since right or no preconditioning
+C being used.
+C If ITOL=2, then ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
+C norm(SB*(M-inverse)*B),
+C since left preconditioning is being
+C used.
+C If ITOL=3, then ERR = Max |(Minv*(B-A*X(L)))(i)/x(i)|
+C i=1,n
+C If ITOL=11, then ERR = norm(SB*(X(L)-SOLN))/norm(SB*SOLN).
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated for
+C RGWK or IGWK.
+C IERR = 2 => Routine DPIGMR failed to reduce the norm
+C of the current residual on its last call,
+C and so the iteration has stalled. In
+C this case, X equals the last computed
+C approximation. The user must either
+C increase MAXL, or choose a different
+C initial guess.
+C IERR =-1 => Insufficient length for RGWK array.
+C IGWK(6) contains the required minimum
+C length of the RGWK array.
+C IERR =-2 => Inconsistent ITOL and JPRE values.
+C For IERR <= 2, RGWK(1) = RHOL, which is the norm on the
+C left-hand-side of the relevant stopping test defined
+C below associated with the residual for the current
+C approximation X(L).
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array of size LENW.
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= 1 + N*(NSAVE+7) + NSAVE*(NSAVE+3).
+C For the recommended values of NSAVE (10), RWORK has size at
+C least 131 + 17*N.
+C IWORK :INOUT Integer IWORK(USER DEFINED >= 30).
+C Used to hold pointers into the RWORK array.
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace IWORK. LENIW >= 30.
+C
+C *Description:
+C DSDGMR solves a linear system A*X = B rewritten in the form:
+C
+C (SB*A*(M-inverse)*(SX-inverse))*(SX*M*X) = SB*B,
+C
+C with right preconditioning, or
+C
+C (SB*(M-inverse)*A*(SX-inverse))*(SX*X) = SB*(M-inverse)*B,
+C
+C with left preconditioning, where a is an n-by-n double
+C precision matrix,
+C X and B are N-vectors, SB and SX are diagonal scaling
+C matrices, and M is the diagonal of A. It uses
+C preconditioned Krylov subpace methods based on the
+C generalized minimum residual method (GMRES). This routine
+C is a driver routine which assumes a SLAP matrix data
+C structure and sets up the necessary information to do
+C diagonal preconditioning and calls the main GMRES routine
+C DGMRES for the solution of the linear system. DGMRES
+C optionally performs either the full orthogonalization
+C version of the GMRES algorithm or an incomplete variant of
+C it. Both versions use restarting of the linear iteration by
+C default, although the user can disable this feature.
+C
+C The GMRES algorithm generates a sequence of approximations
+C X(L) to the true solution of the above linear system. The
+C convergence criteria for stopping the iteration is based on
+C the size of the scaled norm of the residual R(L) = B -
+C A*X(L). The actual stopping test is either:
+C
+C norm(SB*(B-A*X(L))) .le. TOL*norm(SB*B),
+C
+C for right preconditioning, or
+C
+C norm(SB*(M-inverse)*(B-A*X(L))) .le.
+C TOL*norm(SB*(M-inverse)*B),
+C
+C for left preconditioning, where norm() denotes the euclidean
+C norm, and TOL is a positive scalar less than one input by
+C the user. If TOL equals zero when DSDGMR is called, then a
+C default value of 500*(the smallest positive magnitude,
+C machine epsilon) is used. If the scaling arrays SB and SX
+C are used, then ideally they should be chosen so that the
+C vectors SX*X(or SX*M*X) and SB*B have all their components
+C approximately equal to one in magnitude. If one wants to
+C use the same scaling in X and B, then SB and SX can be the
+C same array in the calling program.
+C
+C The following is a list of the other routines and their
+C functions used by GMRES:
+C DGMRES Contains the matrix structure independent driver
+C routine for GMRES.
+C DPIGMR Contains the main iteration loop for GMRES.
+C DORTH Orthogonalizes a new vector against older basis vects.
+C DHEQR Computes a QR decomposition of a Hessenberg matrix.
+C DHELS Solves a Hessenberg least-squares system, using QR
+C factors.
+C RLCALC Computes the scaled residual RL.
+C XLCALC Computes the solution XL.
+C ISDGMR User-replaceable stopping routine.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to be
+C the SLAP Column format. See above.
+C***REFERENCES 1. Peter N. Brown and A. C. Hindmarsh,
+C "Reduced Storage Matrix Methods In Stiff ODE
+C Systems," LLNL report UCRL-95088, Rev. 1,
+C June 1987.
+C***ROUTINES CALLED DS2Y, DCHKW, DSDS, DGMRES
+C***END PROLOGUE DSDGMR
+C The following is for optimized compilation on LLNL/LTSS Crays.
+CLLL. OPTIMIZE
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL
+ INTEGER ITMAX, ITER, IERR, IUNIT, LENW, LENIW, IWORK(LENIW)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
+ EXTERNAL DSMV, DSDI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSDGMR
+ IERR = 0
+ ERR = 0.0
+ IF( NSAVE.LE.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Set up the workspace. We assume MAXL=KMP=NSAVE.
+C Compute the inverse of the diagonal of the matrix.
+ LOCIGW = LOCIB
+ LOCIW = LOCIGW + 20
+C
+ LOCDIN = LOCRB
+ LOCRGW = LOCDIN + N
+ LOCW = LOCRGW + 1+N*(NSAVE+6)+NSAVE*(NSAVE+3)
+C
+ IWORK(4) = LOCDIN
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSDGMR', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ CALL DSDS(N, NELT, IA, JA, A, ISYM, RWORK(LOCDIN))
+C
+C Perform the Diagonaly Scaled Generalized Minimum
+C Residual iteration algorithm. The following DGMRES
+C defaults are used MAXL = KMP = NSAVE, JSCAL = 0,
+C JPRE = -1, NRMAX = ITMAX/NSAVE
+ IWORK(LOCIGW ) = NSAVE
+ IWORK(LOCIGW+1) = NSAVE
+ IWORK(LOCIGW+2) = 0
+ IWORK(LOCIGW+3) = -1
+ IWORK(LOCIGW+4) = ITMAX/NSAVE
+ MYITOL = 0
+C
+ CALL DGMRES( N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSDI,
+ $ MYITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, RWORK,
+ $ RWORK(LOCRGW), LENW-LOCRGW, IWORK(LOCIGW), 20,
+ $ RWORK, IWORK )
+C
+ IF( ITER.GT.ITMAX ) IERR = 2
+ RETURN
+C------------- LAST LINE OF DSDGMR FOLLOWS ----------------------------
+ END
+*DECK DSLUGM
+ SUBROUTINE DSLUGM(N, B, X, NELT, IA, JA, A, ISYM, NSAVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW,
+ $ IWORK, LENIW )
+C***BEGIN PROLOGUE DSLUGM
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSLUGM-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Generalized Minimum Residual
+C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
+C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
+C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C***PURPOSE Incomplete LU GMRES iterative sparse Ax=b solver.
+C This routine uses the generalized minimum residual
+C (GMRES) method with incomplete LU factorization for
+C preconditioning to solve possibly non-symmetric linear
+C systems of the form: Ax = b.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE
+C INTEGER ITOL, ITMAX, IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, SB(N), SX(N)
+C DOUBLE PRECISION RWORK(LENW)
+C EXTERNAL MATVEC, MSOLVE
+C
+C CALL DSLUGM(N, B, X, NELT, IA, JA, A, ISYM, NSAVE,
+C $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+C $ RWORK, LENW, IWORK, LENIW)
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C NSAVE :IN Integer.
+C Number of direction vectors to save and orthogonalize against.
+C Must be greater than 1.
+C ITOL :IN Integer.
+C Flag to indicate the type of convergence criterion used.
+C ITOL=0 Means the iteration stops when the test described
+C below on the residual RL is satisfied. This is
+C the "Natural Stopping Criteria" for this routine.
+C Other values of ITOL cause extra, otherwise
+C unnecessary, computation per iteration and are
+C therefore much less efficient. See ISDGMR (the
+C stop test routine) for more information.
+C ITOL=1 Means the iteration stops when the first test
+C described below on the residual RL is satisfied,
+C and there is either right or no preconditioning
+C being used.
+C ITOL=2 Implies that the user is using left
+C preconditioning, and the second stopping criterion
+C below is used.
+C ITOL=3 Means the iteration stops when the third test
+C described below on Minv*Residual is satisfied, and
+C there is either left or no preconditioning begin
+C used.
+C ITOL=11 is often useful for checking and comparing
+C different routines. For this case, the user must
+C supply the "exact" solution or a very accurate
+C approximation (one with an error much less than
+C TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the
+C difference between the iterative approximation and
+C the user-supplied solution divided by the 2-norm
+C of the user-supplied solution is less than TOL.
+C Note that this requires the user to set up the
+C "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling
+C routine. The routine with this declaration should
+C be loaded before the stop test so that the correct
+C length is used by the loader. This procedure is
+C not standard Fortran and may not work correctly on
+C your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11
+C then this common block is indeed standard Fortran.
+C TOL :INOUT Double Precision.
+C Convergence criterion, as described below. If TOL is set
+C to zero on input, then a default value of 500*(the smallest
+C positive magnitude, machine epsilon) is used.
+C ITMAX :IN Integer.
+C Maximum number of iterations. This routine uses the default
+C of NRMAX = ITMAX/NSAVE to determine the when each restart
+C should occur. See the description of NRMAX and MAXL in
+C DGMRES for a full and frightfully interesting discussion of
+C this topic.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL. Letting norm() denote the Euclidean
+C norm, ERR is defined as follows...
+C If ITOL=0, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
+C for right or no preconditioning, and
+C ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
+C norm(SB*(M-inverse)*B),
+C for left preconditioning.
+C If ITOL=1, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
+C since right or no preconditioning
+C being used.
+C If ITOL=2, then ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
+C norm(SB*(M-inverse)*B),
+C since left preconditioning is being
+C used.
+C If ITOL=3, then ERR = Max |(Minv*(B-A*X(L)))(i)/x(i)|
+C i=1,n
+C If ITOL=11, then ERR = norm(SB*(X(L)-SOLN))/norm(SB*SOLN).
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated for
+C RGWK or IGWK.
+C IERR = 2 => Routine DPIGMR failed to reduce the norm
+C of the current residual on its last call,
+C and so the iteration has stalled. In
+C this case, X equals the last computed
+C approximation. The user must either
+C increase MAXL, or choose a different
+C initial guess.
+C IERR =-1 => Insufficient length for RGWK array.
+C IGWK(6) contains the required minimum
+C length of the RGWK array.
+C IERR =-2 => Inconsistent ITOL and JPRE values.
+C For IERR <= 2, RGWK(1) = RHOL, which is the norm on the
+C left-hand-side of the relevant stopping test defined
+C below associated with the residual for the current
+C approximation X(L).
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array of size LENW.
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= 1 + N*(NSAVE+7) + NSAVE*(NSAVE+3)+NEL+NU.
+C For the recommended values, RWORK
+C has size at least 131 + 17*N + NEL + NU. Where NEL is the
+C number of non- zeros in the lower triangle of the matrix
+C (including the diagonal). NU is the number of nonzeros in
+C the upper triangle of the matrix (including the diagonal).
+C IWORK :INOUT Integer IWORK(LENIW).
+C Used to hold pointers into the RWORK array.
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK.
+C LENIW >= NEL+NU+4*N+32.
+C
+C *Description:
+C DSLUGM solves a linear system A*X = B rewritten in the form:
+C
+C (SB*A*(M-inverse)*(SX-inverse))*(SX*M*X) = SB*B,
+C
+C with right preconditioning, or
+C
+C (SB*(M-inverse)*A*(SX-inverse))*(SX*X) = SB*(M-inverse)*B,
+C
+C with left preconditioning, where a is an n-by-n double
+C precision matrix,
+C X and B are N-vectors, SB and SX are diagonal scaling
+C matrices, and M is the Incomplete LU factorization of A. It
+C uses preconditioned Krylov subpace methods based on the
+C generalized minimum residual method (GMRES). This routine
+C is a driver routine which assumes a SLAP matrix data
+C structure and sets up the necessary information to do
+C diagonal preconditioning and calls the main GMRES routine
+C DGMRES for the solution of the linear system. DGMRES
+C optionally performs either the full orthogonalization
+C version of the GMRES algorithm or an incomplete variant of
+C it. Both versions use restarting of the linear iteration by
+C default, although the user can disable this feature.
+C
+C The GMRES algorithm generates a sequence of approximations
+C X(L) to the true solution of the above linear system. The
+C convergence criteria for stopping the iteration is based on
+C the size of the scaled norm of the residual R(L) = B -
+C A*X(L). The actual stopping test is either:
+C
+C norm(SB*(B-A*X(L))) .le. TOL*norm(SB*B),
+C
+C for right preconditioning, or
+C
+C norm(SB*(M-inverse)*(B-A*X(L))) .le.
+C TOL*norm(SB*(M-inverse)*B),
+C
+C for left preconditioning, where norm() denotes the euclidean
+C norm, and TOL is a positive scalar less than one input by
+C the user. If TOL equals zero when DSLUGM is called, then a
+C default value of 500*(the smallest positive magnitude,
+C machine epsilon) is used. If the scaling arrays SB and SX
+C are used, then ideally they should be chosen so that the
+C vectors SX*X(or SX*M*X) and SB*B have all their components
+C approximately equal to one in magnitude. If one wants to
+C use the same scaling in X and B, then SB and SX can be the
+C same array in the calling program.
+C
+C The following is a list of the other routines and their
+C functions used by GMRES:
+C DGMRES Contains the matrix structure independent driver
+C routine for GMRES.
+C DPIGMR Contains the main iteration loop for GMRES.
+C DORTH Orthogonalizes a new vector against older basis vects.
+C DHEQR Computes a QR decomposition of a Hessenberg matrix.
+C DHELS Solves a Hessenberg least-squares system, using QR
+C factors.
+C RLCALC Computes the scaled residual RL.
+C XLCALC Computes the solution XL.
+C ISDGMR User-replaceable stopping routine.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to be
+C the SLAP Column format. See above.
+C***REFERENCES 1. Peter N. Brown and A. C. Hindmarsh,
+C "Reduced Storage Matrix Methods In Stiff ODE
+C Systems," LLNL report UCRL-95088, Rev. 1,
+C June 1987.
+C***ROUTINES CALLED DS2Y, DCHKW, DSILUS, DGMRES, DSMV, DSLUI
+C***END PROLOGUE DSLUGM
+C The following is for optimized compilation on LLNL/LTSS Crays.
+CLLL. OPTIMIZE
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL
+ INTEGER ITMAX, ITER, IERR, IUNIT, LENW, LENIW, IWORK(LENIW)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
+ EXTERNAL DSMV, DSLUI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSLUGM
+ IERR = 0
+ ERR = 0.0
+ IF( NSAVE.LE.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Count number of Non-Zero elements preconditioner ILU matrix.
+C Then set up the work arrays. We assume MAXL=KMP=NSAVE.
+ NL = 0
+ NU = 0
+ DO 20 ICOL = 1, N
+C Don't count diagonal.
+ JBGN = JA(ICOL)+1
+ JEND = JA(ICOL+1)-1
+ IF( JBGN.LE.JEND ) THEN
+CVD$ NOVECTOR
+ DO 10 J = JBGN, JEND
+ IF( IA(J).GT.ICOL ) THEN
+ NL = NL + 1
+ IF( ISYM.NE.0 ) NU = NU + 1
+ ELSE
+ NU = NU + 1
+ ENDIF
+ 10 CONTINUE
+ ENDIF
+ 20 CONTINUE
+C
+ LOCIGW = LOCIB
+ LOCIL = LOCIGW + 20
+ LOCJL = LOCIL + N+1
+ LOCIU = LOCJL + NL
+ LOCJU = LOCIU + NU
+ LOCNR = LOCJU + N+1
+ LOCNC = LOCNR + N
+ LOCIW = LOCNC + N
+C
+ LOCL = LOCRB
+ LOCDIN = LOCL + NL
+ LOCU = LOCDIN + N
+ LOCRGW = LOCU + NU
+ LOCW = LOCRGW + 1+N*(NSAVE+6)+NSAVE*(NSAVE+3)
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSLUGM', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(1) = LOCIL
+ IWORK(2) = LOCJL
+ IWORK(3) = LOCIU
+ IWORK(4) = LOCJU
+ IWORK(5) = LOCL
+ IWORK(6) = LOCDIN
+ IWORK(7) = LOCU
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+C Compute the Incomplete LU decomposition.
+ CALL DSILUS( N, NELT, IA, JA, A, ISYM, NL, IWORK(LOCIL),
+ $ IWORK(LOCJL), RWORK(LOCL), RWORK(LOCDIN), NU, IWORK(LOCIU),
+ $ IWORK(LOCJU), RWORK(LOCU), IWORK(LOCNR), IWORK(LOCNC) )
+C
+C Perform the Incomplet LU Preconditioned Generalized Minimum
+C Residual iteration algorithm. The following DGMRES
+C defaults are used MAXL = KMP = NSAVE, JSCAL = 0,
+C JPRE = -1, NRMAX = ITMAX/NSAVE
+ IWORK(LOCIGW ) = NSAVE
+ IWORK(LOCIGW+1) = NSAVE
+ IWORK(LOCIGW+2) = 0
+ IWORK(LOCIGW+3) = -1
+ IWORK(LOCIGW+4) = ITMAX/NSAVE
+ MYITOL = 0
+C
+ CALL DGMRES( N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSLUI,
+ $ MYITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, RWORK,
+ $ RWORK(LOCRGW), LENW-LOCRGW, IWORK(LOCIGW), 20,
+ $ RWORK, IWORK )
+C
+ IF( ITER.GT.ITMAX ) IERR = 2
+ RETURN
+C------------- LAST LINE OF DSLUGM FOLLOWS ----------------------------
+ END
+*DECK DHELS
+ SUBROUTINE DHELS(A, LDA, N, Q, B)
+C***BEGIN PROLOGUE DHEQR
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DHEQR-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Generalized Minimum Residual
+C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
+C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
+C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C***PURPOSE Internal routine for DGMRES.
+C***DESCRIPTION
+C This routine is extraced from the LINPACK routine SGESL with
+C changes due to the fact that A is an upper Hessenberg
+C matrix.
+C
+C DHELS solves the least squares problem:
+C
+C MIN(B-A*X,B-A*X)
+C
+C using the factors computed by DHEQR.
+C
+C *Usage:
+C INTEGER LDA, N
+C DOUBLE PRECISION A(LDA,1), B(1), Q(1)
+C
+C CALL DHELS(A, LDA, N, Q, B)
+C
+C *Arguments:
+C A :IN Double Precision A(LDA,N)
+C The output from DHEQR which contains the upper
+C triangular factor R in the QR decomposition of A.
+C LDA :IN Integer
+C The leading dimension of the array A.
+C N :IN Integer
+C A is originally an (N+1) by N matrix.
+C Q :IN Double Precision Q(2*N)
+C The coefficients of the N givens rotations
+C used in the QR factorization of A.
+C B :INOUT Double Precision B(N+1)
+C On input, B is the right hand side vector.
+C On output, B is the solution vector X.
+C *See Also:
+C DGMRES
+C
+C***ROUTINES CALLED DAXPY
+C***END PROLOGUE DHEQR
+C The following is for optimized compilation on LLNL/LTSS Crays.
+CLLL. OPTIMIZE
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER LDA, N
+ DOUBLE PRECISION A(LDA,1), B(1), Q(1)
+C
+C Local Variables.
+C
+ INTEGER IQ, K, KB, KP1
+ DOUBLE PRECISION C, S, T, T1, T2
+C
+C minimize(B-A*X,B-A*X). First form Q*B.
+C
+ DO 20 K = 1, N
+ KP1 = K + 1
+ IQ = 2*(K-1) + 1
+ C = Q(IQ)
+ S = Q(IQ+1)
+ T1 = B(K)
+ T2 = B(KP1)
+ B(K) = C*T1 - S*T2
+ B(KP1) = S*T1 + C*T2
+ 20 CONTINUE
+C
+C Now solve R*X = Q*B.
+C
+ DO 40 KB = 1, N
+ K = N + 1 - KB
+ B(K) = B(K)/A(K,K)
+ T = -B(K)
+ CALL DAXPY(K-1, T, A(1,K), 1, B(1), 1)
+ 40 CONTINUE
+ RETURN
+C------------- LAST LINE OF DHELS FOLLOWS ----------------------------
+ END
+*DECK DHEQR
+ SUBROUTINE DHEQR(A, LDA, N, Q, INFO, IJOB)
+C***BEGIN PROLOGUE DHEQR
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DHEQR-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Generalized Minimum Residual
+C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
+C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
+C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C***PURPOSE Internal routine for DGMRES.
+C***DESCRIPTION
+C This routine performs a QR decomposition of an upper
+C Hessenberg matrix A using Givens rotations. There are two
+C options available: 1) Performing a fresh decomposition 2)
+C updating the QR factors by adding a row and a column to the
+C matrix A.
+C
+C *Usage:
+C INTEGER LDA, N, INFO, IJOB
+C DOUBLE PRECISION A(LDA,1), Q(1)
+C
+C CALL DHEQR(A, LDA, N, Q, INFO, IJOB)
+C
+C *Arguments:
+C A :INOUT Double Precision A(LDA,N)
+C On input, the matrix to be decomposed.
+C On output, the upper triangular matrix R.
+C The factorization can be written Q*A = R, where
+C Q is a product of Givens rotations and R is upper
+C triangular.
+C LDA :IN Integer
+C The leading dimension of the array A.
+C N :IN Integer
+C A is an (N+1) by N Hessenberg matrix.
+C IJOB :IN Integer
+C = 1 means that a fresh decomposition of the
+C matrix A is desired.
+C .ge. 2 means that the current decomposition of A
+C will be updated by the addition of a row
+C and a column.
+C Q :OUT Double Precision Q(2*N)
+C The factors c and s of each Givens rotation used
+C in decomposing A.
+C INFO :OUT Integer
+C = 0 normal value.
+C = K if A(K,K) .eq. 0.0 . This is not an error
+C condition for this subroutine, but it does
+C indicate that DHELS will divide by zero
+C if called.
+C
+C *See Also:
+C DGMRES
+C
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DHEQR
+C The following is for optimized compilation on LLNL/LTSS Crays.
+CLLL. OPTIMIZE
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER LDA, N, INFO, IJOB
+ DOUBLE PRECISION A(LDA,*), Q(*)
+C
+C Local Variables.
+C
+ INTEGER I, IQ, J, K, KM1, KP1, NM1
+ DOUBLE PRECISION C, S, T, T1, T2
+C
+C***FIRST EXECUTABLE STATEMENT DHEQR
+ IF (IJOB .GT. 1) GO TO 70
+C -------------------------------------------------------------------
+C A new facorization is desired.
+C -------------------------------------------------------------------
+C QR decomposition without pivoting.
+C
+ INFO = 0
+ DO 60 K = 1, N
+ KM1 = K - 1
+ KP1 = K + 1
+C
+C Compute K-th column of R.
+C First, multiply the K-th column of a by the previous
+C K-1 Givens rotations.
+C
+ IF (KM1 .LT. 1) GO TO 20
+ DO 10 J = 1, KM1
+ I = 2*(J-1) + 1
+ T1 = A(J,K)
+ T2 = A(J+1,K)
+ C = Q(I)
+ S = Q(I+1)
+ A(J,K) = C*T1 - S*T2
+ A(J+1,K) = S*T1 + C*T2
+ 10 CONTINUE
+C
+C Compute Givens components C and S.
+C
+ 20 CONTINUE
+ IQ = 2*KM1 + 1
+ T1 = A(K,K)
+ T2 = A(KP1,K)
+ IF( T2.EQ.0.0D0 ) THEN
+ C = 1.0D0
+ S = 0.0D0
+ ELSEIF( ABS(T2).GE.ABS(T1) ) THEN
+ T = T1/T2
+ S = -1.0D0/DSQRT(1.0D0+T*T)
+ C = -S*T
+ ELSE
+ T = T2/T1
+ C = 1.0D0/DSQRT(1.0D0+T*T)
+ S = -C*T
+ ENDIF
+ Q(IQ) = C
+ Q(IQ+1) = S
+ A(K,K) = C*T1 - S*T2
+ IF( A(K,K).EQ.0.0D0 ) INFO = K
+ 60 CONTINUE
+ RETURN
+C -------------------------------------------------------------------
+C The old factorization of a will be updated. A row and a
+C column has been added to the matrix A. N by N-1 is now
+C the old size of the matrix.
+C -------------------------------------------------------------------
+ 70 CONTINUE
+ NM1 = N - 1
+C -------------------------------------------------------------------
+C Multiply the new column by the N previous Givens rotations.
+C -------------------------------------------------------------------
+ DO 100 K = 1,NM1
+ I = 2*(K-1) + 1
+ T1 = A(K,N)
+ T2 = A(K+1,N)
+ C = Q(I)
+ S = Q(I+1)
+ A(K,N) = C*T1 - S*T2
+ A(K+1,N) = S*T1 + C*T2
+ 100 CONTINUE
+C -------------------------------------------------------------------
+C Complete update of decomposition by forming last Givens
+C rotation, and multiplying it times the column
+C vector(A(N,N),A(NP1,N)).
+C -------------------------------------------------------------------
+ INFO = 0
+ T1 = A(N,N)
+ T2 = A(N+1,N)
+ IF ( T2.EQ.0.0D0 ) THEN
+ C = 1.0D0
+ S = 0.0D0
+ ELSEIF( ABS(T2).GE.ABS(T1) ) THEN
+ T = T1/T2
+ S = -1.0D0/DSQRT(1.0D0+T*T)
+ C = -S*T
+ ELSE
+ T = T2/T1
+ C = 1.0D0/DSQRT(1.0D0+T*T)
+ S = -C*T
+ ENDIF
+ IQ = 2*N - 1
+ Q(IQ) = C
+ Q(IQ+1) = S
+ A(N,N) = C*T1 - S*T2
+ IF (A(N,N) .EQ. 0.0D0) INFO = N
+ RETURN
+C------------- LAST LINE OF DHEQR FOLLOWS ----------------------------
+ END
+*DECK DORTH
+ SUBROUTINE DORTH(VNEW, V, HES, N, LL, LDHES, KMP, SNORMW)
+C***BEGIN PROLOGUE DORTH
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DORTH-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Generalized Minimum Residual
+C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
+C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
+C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C***PURPOSE Internal routine for DGMRES.
+C***DESCRIPTION
+C This routine orthogonalizes the vector VNEW against the
+C previous KMP vectors in the V array. It uses a modified
+C gram-schmidt orthogonalization procedure with conditional
+C reorthogonalization.
+C
+C *Usage:
+C INTEGER N, LL, LDHES, KMP
+C DOUBLE PRECISION VNEW, V, HES, SNORMW
+C DIMENSION VNEW(1), V(N,1), HES(LDHES,1)
+C
+C CALL DORTH(VNEW, V, HES, N, LL, LDHES, KMP, SNORMW)
+C
+C *Arguments:
+C VNEW :INOUT Double Precision VNEW(N)
+C On input, the vector of length n containing a scaled
+C product of the jacobian and the vector v(*,ll).
+C On output, the new vector orthogonal to v(*,i0) to v(*,ll),
+C where i0 = max(1, ll-kmp+1).
+C V :IN Double Precision V(N,1)
+C The n x ll array containing the previous ll
+C orthogonal vectors v(*,1) to v(*,ll).
+C HES :INOUT Double Precision HES(LDHES,1)
+C On input, an LL x LL upper hessenberg matrix containing,
+C in HES(I,K), K.lt.LL, the scaled inner products of
+C A*V(*,K) and V(*,i).
+C On return, column LL of HES is filled in with
+C the scaled inner products of A*V(*,LL) and V(*,i).
+C LDHES :IN Integer
+C The leading dimension of the HES array.
+C N :IN Integer
+C The order of the matrix A, and the length of VNEW.
+C LL :IN Integer
+C The current order of the matrix HES.
+C KMP :IN Integer
+C The number of previous vectors the new vector VNEW
+C must be made orthogonal to (KMP .le. MAXL).
+C SNORMW :OUT DOUBLE PRECISION
+C Scalar containing the l-2 norm of VNEW.
+C
+C *See Also:
+C DGMRES
+C
+C***ROUTINES CALLED DAXPY
+C***END PROLOGUE DORTH
+C The following is for optimized compilation on LLNL/LTSS Crays.
+CLLL. OPTIMIZE
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, LL, LDHES, KMP
+ DOUBLE PRECISION VNEW, V, HES, SNORMW
+ DIMENSION VNEW(1), V(N,1), HES(LDHES,1)
+C
+C Internal variables.
+C
+ INTEGER I, I0
+ DOUBLE PRECISION ARG, SUMDSQ, TEM, VNRM
+C
+C Get norm of unaltered VNEW for later use.
+C***FIRST EXECUTABLE STATEMENT DORTH
+ VNRM = DNRM2(N, VNEW, 1)
+C -------------------------------------------------------------------
+C Perform the modified gram-schmidt procedure on VNEW =A*V(LL).
+C Scaled inner products give new column of HES.
+C Projections of earlier vectors are subtracted from VNEW.
+C -------------------------------------------------------------------
+ I0 = MAX0(1,LL-KMP+1)
+ DO 10 I = I0,LL
+ HES(I,LL) = DDOT(N, V(1,I), 1, VNEW, 1)
+ TEM = -HES(I,LL)
+ CALL DAXPY(N, TEM, V(1,I), 1, VNEW, 1)
+ 10 CONTINUE
+C -------------------------------------------------------------------
+C Compute SNORMW = norm of VNEW. If VNEW is small compared
+C to its input value (in norm), then reorthogonalize VNEW to
+C V(*,1) through V(*,LL). Correct if relative correction
+C exceeds 1000*(unit roundoff). Finally, correct SNORMW using
+C the dot products involved.
+C -------------------------------------------------------------------
+ SNORMW = DNRM2(N, VNEW, 1)
+ IF (VNRM + 0.001D0*SNORMW .NE. VNRM) RETURN
+ SUMDSQ = 0.0D0
+ DO 30 I = I0,LL
+ TEM = -DDOT(N, V(1,I), 1, VNEW, 1)
+ IF (HES(I,LL) + 0.001D0*TEM .EQ. HES(I,LL)) GO TO 30
+ HES(I,LL) = HES(I,LL) - TEM
+ CALL DAXPY(N, TEM, V(1,I), 1, VNEW, 1)
+ SUMDSQ = SUMDSQ + TEM**2
+ 30 CONTINUE
+ IF (SUMDSQ .EQ. 0.0D0) RETURN
+ ARG = MAX(0.0D0,SNORMW**2 - SUMDSQ)
+ SNORMW = DSQRT(ARG)
+C
+ RETURN
+C------------- LAST LINE OF DORTH FOLLOWS ----------------------------
+ END
+*DECK DPIGMR
+ SUBROUTINE DPIGMR(N, R0, SR, SZ, JSCAL, MAXL, MAXLP1, KMP,
+ $ NRSTS, JPRE, MATVEC, MSOLVE, NMSL, Z, V, HES, Q, LGMR,
+ $ RPAR, IPAR, WK, DL, RHOL, NRMAX, B, BNRM, X, XL,
+ $ ITOL, TOL, NELT, IA, JA, A, ISYM, IUNIT, IFLAG, ERR)
+C***BEGIN PROLOGUE DPIGMR
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DPIGMR-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Generalized Minimum Residual
+C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
+C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
+C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C***PURPOSE Internal routine for DGMRES.
+C***DESCRIPTION
+C This routine solves the linear system A * Z = R0 using a
+C scaled preconditioned version of the generalized minimum
+C residual method. An initial guess of Z = 0 is assumed.
+C
+C *Usage:
+C EXTERNAL MATVEC, MSOLVE
+C INTEGER N,MAXL,MAXLP1,KMP,JPRE,NMSL,LGMR,IPAR,IFLAG,JSCAL,NRSTS
+C INTEGER NRMAX,ITOL,NELT,ISYM
+C DOUBLE PRECISION R0,SR,SZ,Z,V,HES,Q,RPAR,WK,DL,RHOL,BNRM,TOL,
+C $ A,B,X, R0(1), SR(1), SZ(1), Z(1), V(N,1),
+C $ HES(MAXLP1,1), Q(1), RPAR(1), IPAR(1), WK(1), DL(1),
+C $ IA(NELT), JA(NELT), A(NELT), B(1), X(1), XL(1)
+C
+C CALL DPIGMR(N, R0, SR, SZ, JSCAL, MAXL, MAXLP1, KMP,
+C $ NRSTS, JPRE, MATVEC, MSOLVE, NMSL, Z, V, HES, Q, LGMR,
+C $ RPAR, IPAR, WK, DL, RHOL, NRMAX, B, BNRM, X, XL,
+C $ ITOL, TOL, NELT, IA, JA, A, ISYM, IUNIT, IFLAG, ERR)
+C
+C *Arguments:
+C R0 :IN Double Precision R0(N)
+C R0 = the right hand side of the system A*Z = R0.
+C R0 is also used as work space when computing
+C the final approximation.
+C (R0 is the same as V(*,MAXL+1) in the call to DPIGMR.)
+C SR :IN Double Precision SR(N)
+C SR is a vector of length N containing the nonzero
+C elements of the diagonal scaling matrix for R0.
+C SZ :IN Double Precision SZ(N)
+C SZ is a vector of length N containing the nonzero
+C elements of the diagonal scaling matrix for Z.
+C JSCAL :IN Integer
+C A flag indicating whether arrays SR and SZ are used.
+C JSCAL=0 means SR and SZ are not used and the
+C algorithm will perform as if all
+C SR(i) = 1 and SZ(i) = 1.
+C JSCAL=1 means only SZ is used, and the algorithm
+C performs as if all SR(i) = 1.
+C JSCAL=2 means only SR is used, and the algorithm
+C performs as if all SZ(i) = 1.
+C JSCAL=3 means both SR and SZ are used.
+C N :IN Integer
+C The order of the matrix A, and the lengths
+C of the vectors SR, SZ, R0 and Z.
+C MAXL :IN Integer
+C The maximum allowable order of the matrix H.
+C MAXLP1 :IN Integer
+C MAXPL1 = MAXL + 1, used for dynamic dimensioning of HES.
+C KMP :IN Integer
+C The number of previous vectors the new vector VNEW
+C must be made orthogonal to. (KMP .le. MAXL)
+C NRSTS :IN Integer
+C Counter for the number of restarts on the current
+C call to DGMRES. If NRSTS .gt. 0, then the residual
+C R0 is already scaled, and so scaling of it is
+C not necessary.
+C JPRE :IN Integer
+C Preconditioner type flag.
+C WK :IN Double Precision WK(N)
+C A double precision work array of length N used by routine
+C MATVEC
+C and MSOLVE.
+C DL :INOUT Double Precision DL(N)
+C On input, a double precision work array of length N used for
+C calculation of the residual norm RHO when the method is
+C incomplete (KMP.lt.MAXL), and/or when using restarting.
+C On output, the scaled residual vector RL. It is only loaded
+C when performing restarts of the Krylov iteration.
+C NRMAX :IN Integer
+C The maximum number of restarts of the Krylov iteration.
+C NRMAX .gt. 0 means restarting is active, while
+C NRMAX = 0 means restarting is not being used.
+C B :IN Double Precision B(N)
+C The right hand side of the linear system A*X = B.
+C BNRM :IN Double Precision
+C The scaled norm of b.
+C X :IN Double Precision X(N)
+C The current approximate solution as of the last
+C restart.
+C XL :IN Double Precision XL(N)
+C An array of length N used to hold the approximate
+C solution X(L) when ITOL=11.
+C ITOL :IN Integer
+C A flag to indicate the type of convergence criterion
+C used. see the driver for its description.
+C TOL :IN Double Precision
+C The tolerance on residuals R0-A*Z in scaled norm.
+C NELT :IN Integer
+C The length of arrays IA, JA and A.
+C IA :IN Integer IA(NELT)
+C An integer array of length NELT containing matrix data.
+C It is passed directly to the MATVEC and MSOLVE routines.
+C JA :IN Integer JA(NELT)
+C An integer array of length NELT containing matrix data.
+C It is passed directly to the MATVEC and MSOLVE routines.
+C A :IN Double Precision A(NELT)
+C A double precision array of length NELT containing matrix
+C data. It is passed directly to the MATVEC and MSOLVE routines.
+C ISYM :IN Integer
+C A flag to indicate symmetric matrix storage.
+C If ISYM=0, all nonzero entries of the matrix are
+C stored. If ISYM=1, the matrix is symmetric and
+C only the upper or lower triangular part is stored.
+C IUNIT :IN Integer
+C The i/o unit number for writing intermediate residual
+C norm values.
+C Z :OUT Double Precision Z(N)
+C The final computed approximation to the solution
+C of the system A*Z = R0.
+C LGMR :OUT Integer
+C The number of iterations performed and
+C the current order of the upper hessenberg
+C matrix HES.
+C RPAR :IN Double Precision RPAR(*)
+C Double Precision work space passed directly to the MSOLVE
+C routine.
+C IPAR :IN Integer IPAR(*)
+C Integer work space passed directly to the MSOLVE
+C routine.
+C NMSL :OUT Integer
+C The number of calls to MSOLVE.
+C V :OUT Double Precision V(N,MAXLP1)
+C The N by (LGMR+1) array containing the LGMR
+C orthogonal vectors V(*,1) to V(*,LGMR).
+C HES :OUT Double Precision HES(MAXLP1,MAXL)
+C The upper triangular factor of the QR decomposition
+C of the (LGMR+1) by LGMR upper Hessenberg matrix whose
+C entries are the scaled inner-products of A*V(*,I)
+C and V(*,K).
+C Q :OUT Double Precision Q(2*MAXL)
+C A double precision array of length 2*MAXL containing the
+C components of the Givens rotations used in the QR
+C decomposition of HES. It is loaded in DHEQR and used in
+C DHELS.
+C RHOL :OUT Double Precision
+C A double precision scalar containing the norm of the final
+C residual.
+C IFLAG :OUT Integer
+C An integer error flag..
+C 0 means convergence in LGMR iterations, LGMR.le.MAXL.
+C 1 means the convergence test did not pass in MAXL
+C iterations, but the residual norm is .lt. norm(R0),
+C and so Z is computed.
+C 2 means the convergence test did not pass in MAXL
+C iterations, residual .ge. norm(R0), and Z = 0.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C
+C *See Also:
+C DGMRES
+C
+C***ROUTINES CALLED ISDGMR, MATVEC, MSOLVE, DORTH, DRLCAL, DHELS,
+C DHEQR, DXLCAL, DAXPY, DCOPY, DSCAL,
+C***END PROLOGUE DPIGMR
+C The following is for optimized compilation on LLNL/LTSS Crays.
+CLLL. OPTIMIZE
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ EXTERNAL MATVEC, MSOLVE
+ INTEGER N,MAXL,MAXLP1,KMP,JPRE,NMSL,LGMR,IFLAG,JSCAL,NRSTS
+ INTEGER NRMAX,ITOL,NELT,ISYM
+ DOUBLE PRECISION RHOL, BNRM, TOL
+ DOUBLE PRECISION R0(*), SR(*), SZ(*), Z(*), V(N,*)
+ DOUBLE PRECISION HES(MAXLP1,*), Q(*), RPAR(*), WK(*), DL(*)
+ DOUBLE PRECISION A(NELT), B(*), X(*), XL(*)
+ INTEGER IPAR(*), IA(NELT), JA(NELT)
+C
+C Local variables.
+C
+ INTEGER I, INFO, IP1, I2, J, K, LL, LLP1
+ DOUBLE PRECISION R0NRM,C,DLNRM,PROD,RHO,S,SNORMW,TEM
+C
+C Zero out the z array.
+C***FIRST EXECUTABLE STATEMENT DPIGMR
+ DO 5 I = 1,N
+ Z(I) = 0.0D0
+ 5 CONTINUE
+C
+ IFLAG = 0
+ LGMR = 0
+ NMSL = 0
+C Load ITMAX, the maximum number of iterations.
+ ITMAX =(NRMAX+1)*MAXL
+C -------------------------------------------------------------------
+C The initial residual is the vector R0.
+C Apply left precon. if JPRE < 0 and this is not a restart.
+C Apply scaling to R0 if JSCAL = 2 or 3.
+C -------------------------------------------------------------------
+ IF ((JPRE .LT. 0) .AND.(NRSTS .EQ. 0)) THEN
+ CALL DCOPY(N, R0, 1, WK, 1)
+ CALL MSOLVE(N, WK, R0, NELT, IA, JA, A, ISYM, RPAR, IPAR)
+ NMSL = NMSL + 1
+ ENDIF
+ IF (((JSCAL.EQ.2) .OR.(JSCAL.EQ.3)) .AND.(NRSTS.EQ.0)) THEN
+ DO 10 I = 1,N
+ V(I,1) = R0(I)*SR(I)
+ 10 CONTINUE
+ ELSE
+ DO 20 I = 1,N
+ V(I,1) = R0(I)
+ 20 CONTINUE
+ ENDIF
+ R0NRM = DNRM2(N, V, 1)
+ ITER = NRSTS*MAXL
+C
+C Call stopping routine ISDGMR.
+C
+ IF (ISDGMR(N, B, X, XL, NELT, IA, JA, A, ISYM, MSOLVE,
+ $ NMSL, ITOL, TOL, ITMAX, ITER, ERR, IUNIT, V(1,1), Z, WK,
+ $ RPAR, IPAR, R0NRM, BNRM, SR, SZ, JSCAL,
+ $ KMP, LGMR, MAXL, MAXLP1, V, Q, SNORMW, PROD, R0NRM,
+ $ HES, JPRE) .NE. 0) RETURN
+ TEM = 1.0D0/R0NRM
+ CALL DSCAL(N, TEM, V(1,1), 1)
+C
+C Zero out the HES array.
+C
+ DO 50 J = 1,MAXL
+ DO 40 I = 1,MAXLP1
+ HES(I,J) = 0.0D0
+ 40 CONTINUE
+ 50 CONTINUE
+C -------------------------------------------------------------------
+C main loop to compute the vectors V(*,2) to V(*,MAXL).
+C The running product PROD is needed for the convergence test.
+C -------------------------------------------------------------------
+ PROD = 1.0D0
+ DO 90 LL = 1,MAXL
+ LGMR = LL
+C -------------------------------------------------------------------
+C Unscale the current V(LL) and store in WK. Call routine
+C msolve to compute(M-inverse)*WK, where M is the
+C preconditioner matrix. Save the answer in Z. Call routine
+C MATVEC to compute VNEW = A*Z, where A is the the system
+C matrix. save the answer in V(LL+1). Scale V(LL+1). Call
+C routine DORTH to orthogonalize the new vector VNEW =
+C V(*,LL+1). Call routine DHEQR to update the factors of HES.
+C -------------------------------------------------------------------
+ IF ((JSCAL .EQ. 1) .OR.(JSCAL .EQ. 3)) THEN
+ DO 60 I = 1,N
+ WK(I) = V(I,LL)/SZ(I)
+ 60 CONTINUE
+ ELSE
+ CALL DCOPY(N, V(1,LL), 1, WK, 1)
+ ENDIF
+ IF (JPRE .GT. 0) THEN
+ CALL MSOLVE(N, WK, Z, NELT, IA, JA, A, ISYM, RPAR, IPAR)
+ NMSL = NMSL + 1
+ CALL MATVEC(N, Z, V(1,LL+1), NELT, IA, JA, A, ISYM)
+ ELSE
+ CALL MATVEC(N, WK, V(1,LL+1), NELT, IA, JA, A, ISYM)
+ ENDIF
+ IF (JPRE .LT. 0) THEN
+ CALL DCOPY(N, V(1,LL+1), 1, WK, 1)
+ CALL MSOLVE(N,WK,V(1,LL+1),NELT,IA,JA,A,ISYM,RPAR,IPAR)
+ NMSL = NMSL + 1
+ ENDIF
+ IF ((JSCAL .EQ. 2) .OR.(JSCAL .EQ. 3)) THEN
+ DO 65 I = 1,N
+ V(I,LL+1) = V(I,LL+1)*SR(I)
+ 65 CONTINUE
+ ENDIF
+ CALL DORTH(V(1,LL+1), V, HES, N, LL, MAXLP1, KMP, SNORMW)
+ HES(LL+1,LL) = SNORMW
+ CALL DHEQR(HES, MAXLP1, LL, Q, INFO, LL)
+ IF (INFO .EQ. LL) GO TO 120
+C -------------------------------------------------------------------
+C Update RHO, the estimate of the norm of the residual R0-A*ZL.
+C If KMP < MAXL, then the vectors V(*,1),...,V(*,LL+1) are not
+C necessarily orthogonal for LL > KMP. The vector DL must then
+C be computed, and its norm used in the calculation of RHO.
+C -------------------------------------------------------------------
+ PROD = PROD*Q(2*LL)
+ RHO = ABS(PROD*R0NRM)
+ IF ((LL.GT.KMP) .AND.(KMP.LT.MAXL)) THEN
+ IF (LL .EQ. KMP+1) THEN
+ CALL DCOPY(N, V(1,1), 1, DL, 1)
+ DO 75 I = 1,KMP
+ IP1 = I + 1
+ I2 = I*2
+ S = Q(I2)
+ C = Q(I2-1)
+ DO 70 K = 1,N
+ DL(K) = S*DL(K) + C*V(K,IP1)
+ 70 CONTINUE
+ 75 CONTINUE
+ ENDIF
+ S = Q(2*LL)
+ C = Q(2*LL-1)/SNORMW
+ LLP1 = LL + 1
+ DO 80 K = 1,N
+ DL(K) = S*DL(K) + C*V(K,LLP1)
+ 80 CONTINUE
+ DLNRM = DNRM2(N, DL, 1)
+ RHO = RHO*DLNRM
+ ENDIF
+ RHOL = RHO
+C -------------------------------------------------------------------
+C Test for convergence. If passed, compute approximation ZL.
+C If failed and LL < MAXL, then continue iterating.
+C -------------------------------------------------------------------
+ ITER = NRSTS*MAXL + LGMR
+ IF (ISDGMR(N, B, X, XL, NELT, IA, JA, A, ISYM, MSOLVE,
+ $ NMSL, ITOL, TOL, ITMAX, ITER, ERR, IUNIT, DL, Z, WK,
+ $ RPAR, IPAR, RHOL, BNRM, SR, SZ, JSCAL,
+ $ KMP, LGMR, MAXL, MAXLP1, V, Q, SNORMW, PROD, R0NRM,
+ $ HES, JPRE) .NE. 0) GO TO 200
+ IF (LL .EQ. MAXL) GO TO 100
+C -------------------------------------------------------------------
+C Rescale so that the norm of V(1,LL+1) is one.
+C -------------------------------------------------------------------
+ TEM = 1.0D0/SNORMW
+ CALL DSCAL(N, TEM, V(1,LL+1), 1)
+ 90 CONTINUE
+ 100 CONTINUE
+ IF (RHO .LT. R0NRM) GO TO 150
+ 120 CONTINUE
+ IFLAG = 2
+C
+C Load approximate solution with zero.
+C
+ DO 130 I = 1,N
+ Z(I) = 0.D0
+ 130 CONTINUE
+ RETURN
+ 150 IFLAG = 1
+C
+C Tolerance not met, but residual norm reduced.
+C
+ IF (NRMAX .GT. 0) THEN
+C
+C If performing restarting (NRMAX > 0) calculate the residual
+C vector RL and store it in the DL array. If the incomplete
+C version is being used (KMP < MAXL) then DL has already been
+C calculated up to a scaling factor. Use DRLCAL to calculate
+C the scaled residual vector.
+C
+ CALL DRLCAL(N, KMP, MAXL, MAXL, V, Q, DL, SNORMW, PROD,
+ $ R0NRM)
+ ENDIF
+C -------------------------------------------------------------------
+C Compute the approximation ZL to the solution. Since the
+C vector Z was used as work space, and the initial guess
+C of the linear iteration is zero, Z must be reset to zero.
+C -------------------------------------------------------------------
+ 200 CONTINUE
+ LL = LGMR
+ LLP1 = LL + 1
+ DO 210 K = 1,LLP1
+ R0(K) = 0.0D0
+ 210 CONTINUE
+ R0(1) = R0NRM
+ CALL DHELS(HES, MAXLP1, LL, Q, R0)
+ DO 220 K = 1,N
+ Z(K) = 0.0D0
+ 220 CONTINUE
+ DO 230 I = 1,LL
+ CALL DAXPY(N, R0(I), V(1,I), 1, Z, 1)
+ 230 CONTINUE
+ IF ((JSCAL .EQ. 1) .OR.(JSCAL .EQ. 3)) THEN
+ DO 240 I = 1,N
+ Z(I) = Z(I)/SZ(I)
+ 240 CONTINUE
+ ENDIF
+ IF (JPRE .GT. 0) THEN
+ CALL DCOPY(N, Z, 1, WK, 1)
+ CALL MSOLVE(N, WK, Z, NELT, IA, JA, A, ISYM, RPAR, IPAR)
+ NMSL = NMSL + 1
+ ENDIF
+ RETURN
+C------------- LAST LINE OF DPIGMR FOLLOWS ----------------------------
+ END
+*DECK DRLCAL
+ SUBROUTINE DRLCAL(N, KMP, LL, MAXL, V, Q, RL, SNORMW, PROD,
+ $ R0NRM)
+C***BEGIN PROLOGUE DRLCAL
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DRLCAL-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Generalized Minimum Residual
+C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
+C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
+C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C***PURPOSE Internal routine for DGMRES.
+C***DESCRIPTION
+C This routine calculates the scaled residual RL from the
+C V(I)'s.
+C *Usage:
+C INTEGER N, KMP, LL, MAXL
+C DOUBLE PRECISION SNORMW
+C DOUBLE PRECISION V(N,1), Q(1), RL(N)
+C
+C CALL DRLCAL(N, KMP, LL, MAXL, V, Q, RL, SNORMW, PROD,
+C $ R0NRM)
+C
+C *Arguments:
+C N :IN Integer
+C The order of the matrix A, and the lengths
+C of the vectors SR, SZ, R0 and Z.
+C KMP :IN Integer
+C The number of previous V vectors the new vector VNEW
+C must be made orthogonal to. (KMP .le. MAXL)
+C LL :IN Integer
+C The current dimension of the Krylov subspace.
+C MAXL :IN Integer
+C The maximum dimension of the Krylov subspace.
+C Q :IN Double Precision Q(2*MAXL)
+C A double precision array of length 2*MAXL containing the
+C components of the Givens rotations used in the QR
+C decomposition of HES. It is loaded in DHEQR and used in
+C DHELS.
+C PROD :IN Double Precision
+C The product s1*s2*...*sl = the product of the sines of the
+C givens rotations used in the QR factorization of
+C the hessenberg matrix HES.
+C R0NRM :IN Double Precision
+C The scaled norm of initial residual R0.
+C RL :OUT Double Precision RL(N)
+C The residual vector RL. This is either SB*(B-A*XL) if
+C not preconditioning or preconditioning on the right,
+C or SB*(M-inverse)*(B-A*XL) if preconditioning on the
+C left.
+C
+C *See Also:
+C DGMRES
+C
+C***ROUTINES CALLED DCOPY, DSCAL
+C***END PROLOGUE DRLCAL
+C The following is for optimized compilation on LLNL/LTSS Crays.
+CLLL. OPTIMIZE
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, KMP, LL, MAXL
+ DOUBLE PRECISION SNORMW
+ DOUBLE PRECISION V(N,*), Q(*), RL(N)
+C
+C Internal Variables.
+C
+ INTEGER I, IP1, I2, K
+C
+C***FIRST EXECUTABLE STATEMENT DRLCAL
+ IF (KMP .EQ. MAXL) THEN
+C
+C calculate RL. Start by copying V(*,1) into RL.
+C
+ CALL DCOPY(N, V(1,1), 1, RL, 1)
+ LLM1 = LL - 1
+ DO 20 I = 1,LLM1
+ IP1 = I + 1
+ I2 = I*2
+ S = Q(I2)
+ C = Q(I2-1)
+ DO 10 K = 1,N
+ RL(K) = S*RL(K) + C*V(K,IP1)
+ 10 CONTINUE
+ 20 CONTINUE
+ S = Q(2*LL)
+ C = Q(2*LL-1)/SNORMW
+ LLP1 = LL + 1
+ DO 30 K = 1,N
+ RL(K) = S*RL(K) + C*V(K,LLP1)
+ 30 CONTINUE
+ ENDIF
+C
+C When KMP < MAXL, RL vector already partially calculated.
+C Scale RL by R0NRM*PROD to obtain the residual RL.
+C
+ TEM = R0NRM*PROD
+ CALL DSCAL(N, TEM, RL, 1)
+ RETURN
+C------------- LAST LINE OF DRLCAL FOLLOWS ----------------------------
+ END
+*DECK DXLCAL
+ SUBROUTINE DXLCAL(N, LGMR, X, XL, ZL, HES, MAXLP1, Q, V, R0NRM,
+ $ WK, SZ, JSCAL, JPRE, MSOLVE, NMSL, RPAR, IPAR,
+ $ NELT, IA, JA, A, ISYM)
+C***BEGIN PROLOGUE DXLCAL
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DXLCAL-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Generalized Minimum Residual
+C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
+C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
+C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C***PURPOSE Internal routine for DGMRES.
+C***DESCRIPTION
+C This routine computes the solution XL, the current DGMRES
+C iterate, given the V(I)'s and the QR factorization of the
+C Hessenberg matrix HES. This routine is only called when
+C ITOL=11.
+C
+C *Usage:
+C EXTERNAL MSOLVE
+C DOUBLE PRECISION R0NRM
+C DOUBLE PRECISION X(N), XL(N), ZL(N), HES(MAXLP1,1), Q(1)
+C DOUBLE PRECISION V(N,1), WK(N), SZ(1), RPAR(1)
+C DOUBLE PRECISION A(NELT)
+C INTEGER N, LGMR, MAXLP1, JSCAL, JPRE, IPAR, NMSL, NELT, ISYM
+C INTEGER IPAR(1), IA(NELT), JA(NELT)
+C
+C CALL DXLCAL(N, LGMR, X, XL, ZL, HES, MAXLP1, Q, V, R0NRM,
+C $ WK, SZ, JSCAL, JPRE, MSOLVE, NMSL, RPAR, IPAR,
+C $ NELT, IA, JA, A, ISYM)
+C
+C *Arguments:
+C N :IN Integer
+C The order of the matrix A, and the lengths
+C of the vectors SR, SZ, R0 and Z.
+C LGMR :IN Integer
+C The number of iterations performed and
+C the current order of the upper Hessenberg
+C matrix HES.
+C X :IN Double Precision X(N)
+C The current approximate solution as of the last restart.
+C ZL :IN Double Precision ZL(N)
+C An array of length N used to hold the approximate
+C solution Z(L).
+C SZ :IN Double Precision SZ(N)
+C A vector of length N containing the nonzero
+C elements of the diagonal scaling matrix for Z.
+C JSCAL :IN Integer
+C A flag indicating whether arrays SR and SZ are used.
+C JSCAL=0 means SR and SZ are not used and the
+C algorithm will perform as if all
+C SR(i) = 1 and SZ(i) = 1.
+C JSCAL=1 means only SZ is used, and the algorithm
+C performs as if all SR(i) = 1.
+C JSCAL=2 means only SR is used, and the algorithm
+C performs as if all SZ(i) = 1.
+C JSCAL=3 means both SR and SZ are used.
+C MAXLP1 :IN Integer
+C MAXLP1 = MAXL + 1, used for dynamic dimensioning of HES.
+C MAXL is the maximum allowable order of the matrix HES.
+C JPRE :IN Integer
+C The preconditioner type flag.
+C WK :IN Double Precision WK(N)
+C A double precision work array of length N.
+C NMSL :IN Integer
+C The number of calls to MSOLVE.
+C V :IN Double Precision V(N,MAXLP1)
+C The N by(LGMR+1) array containing the LGMR
+C orthogonal vectors V(*,1) to V(*,LGMR).
+C HES :IN Double Precision HES(MAXLP1,MAXL)
+C The upper triangular factor of the QR decomposition
+C of the (LGMR+1) by LGMR upper Hessenberg matrix whose
+C entries are the scaled inner-products of A*V(*,i) and V(*,k).
+C Q :IN Double Precision Q(2*MAXL)
+C A double precision array of length 2*MAXL containing the
+C components of the givens rotations used in the QR
+C decomposition of HES. It is loaded in DHEQR.
+C R0NRM :IN Double Precision
+C The scaled norm of the initial residual for the
+C current call to DPIGMR.
+C RPAR :IN Double Precision RPAR(*)
+C Double Precision work space passed directly to the MSOLVE
+C routine.
+C IPAR :IN Integer IPAR(*)
+C Integer work space passed directly to the MSOLVE
+C routine.
+C NELT :IN Integer
+C The length of arrays IA, JA and A.
+C IA :IN Integer IA(NELT)
+C An integer array of length NELT containing matrix data.
+C It is passed directly to the MATVEC and MSOLVE routines.
+C JA :IN Integer JA(NELT)
+C An integer array of length NELT containing matrix data.
+C It is passed directly to the MATVEC and MSOLVE routines.
+C A :IN Double Precision A(NELT)
+C A double precision array of length NELT containing matrix
+C data.
+C It is passed directly to the MATVEC and MSOLVE routines.
+C ISYM :IN Integer
+C A flag to indicate symmetric matrix storage.
+C If ISYM=0, all nonzero entries of the matrix are
+C stored. If ISYM=1, the matrix is symmetric and
+C only the upper or lower triangular part is stored.
+C XL :OUT Double Precision XL(N)
+C An array of length N used to hold the approximate
+C solution X(L).
+C Warning: XL and ZL are the same array in the calling routine.
+C
+C *See Also:
+C DGMRES
+C
+C***ROUTINES CALLED MSOLVE, DHELS, DAXPY, DCOPY, DSCAL
+C***END PROLOGUE DXLCAL
+C The following is for optimized compilation on LLNL/LTSS Crays.
+CLLL. OPTIMIZE
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ EXTERNAL MSOLVE
+ INTEGER N, LGMR, MAXLP1, JSCAL, JPRE, IPAR(*), NMSL, NELT
+ INTEGER IA(NELT), JA(NELT), ISYM
+ DOUBLE PRECISION R0NRM, X(N), XL(N), ZL(N), HES(MAXLP1,*)
+ DOUBLE PRECISION Q(*), V(N,*), WK(N), SZ(*), RPAR(*), A(NELT)
+C
+C Internal variables.
+C
+ INTEGER I, K, LL, LLP1
+C
+C***FIRST EXECUTABLE STATEMENT DXLCAL
+ LL = LGMR
+ LLP1 = LL + 1
+ DO 10 K = 1,LLP1
+ WK(K) = 0.0D0
+ 10 CONTINUE
+ WK(1) = R0NRM
+ CALL DHELS(HES, MAXLP1, LL, Q, WK)
+ DO 20 K = 1,N
+ ZL(K) = 0.0D0
+ 20 CONTINUE
+ DO 30 I = 1,LL
+ CALL DAXPY(N, WK(I), V(1,I), 1, ZL, 1)
+ 30 CONTINUE
+ IF ((JSCAL .EQ. 1) .OR.(JSCAL .EQ. 3)) THEN
+ DO 40 K = 1,N
+ ZL(K) = ZL(K)/SZ(K)
+ 40 CONTINUE
+ ENDIF
+ IF (JPRE .GT. 0) THEN
+ CALL DCOPY(N, ZL, 1, WK, 1)
+ CALL MSOLVE(N, WK, ZL, NELT, IA, JA, A, ISYM, RPAR, IPAR)
+ NMSL = NMSL + 1
+ ENDIF
+C calculate XL from X and ZL.
+ DO 50 K = 1,N
+ XL(K) = X(K) + ZL(K)
+ 50 CONTINUE
+ RETURN
+C------------- LAST LINE OF DXLCAL FOLLOWS ----------------------------
+ END
+*DECK ISDGMR
+ FUNCTION ISDGMR(N, B, X, XL, NELT, IA, JA, A, ISYM, MSOLVE,
+ $ NMSL, ITOL, TOL, ITMAX, ITER, ERR, IUNIT, R, Z, DZ,
+ $ RWORK, IWORK, RNRM, BNRM, SB, SX, JSCAL,
+ $ KMP, LGMR, MAXL, MAXLP1, V, Q, SNORMW, PROD, R0NRM,
+ $ HES, JPRE)
+C***BEGIN PROLOGUE ISDGMR
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=INTEGER(ISDGMR-I)
+C Linear system, Sparse, Stop Test, GMRES
+C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
+C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
+C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C***PURPOSE Generalized Minimum Residual Stop Test.
+C This routine calculates the stop test for the Generalized
+C Minimum RESidual (GMRES) iteration scheme. It returns a
+C nonzero if the error estimate (the type of which is
+C determined by ITOL) is less than the user specified
+C tolerence TOL.
+C***DESCRIPTION
+C *Usage:
+C INTEGER KMP, LGMR, MAXL, MAXLP1, JPRE, NMSL
+C DOUBLE PRECISION DXNRM, RNRM, R0NRM, SNORMW, SOLNRM, PROD
+C DOUBLE PRECISION B(1), X(1), IA(1), JA(1), A(1), R(1), Z(1)
+C DOUBLE PRECISION DZ(1), RWORK(1), IWORK(1), SB(1), SX(1)
+C DOUBLE PRECISION Q(1), V(N,1), HES(MAXLP1,MAXL), XL(1)
+C EXTERNAL MSOLVE
+C
+C IF (ISDGMR(N, B, X, XL, NELT, IA, JA, A, ISYM, MSOLVE,
+C $ NMSL, ITOL, TOL, ITMAX, ITER, ERR, IUNIT, R, Z, DZ,
+C $ RWORK, IWORK, RNRM, BNRM, SB, SX, JSCAL,
+C $ KMP, LGMR, MAXL, MAXLP1, V, Q, SNORMW, PROD, R0NRM,
+C $ HES, JPRE) .NE. 0) THEN ITERATION DONE
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand-side vector.
+C X :IN Double Precision X(N).
+C Approximate solution vector as of the last restart.
+C XL :OUT Double Precision XL(N)
+C An array of length N used to hold the approximate
+C solution as of the current iteration. Only computed by
+C this routine when ITOL=11.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "Description", in the DGMRES,
+C DSLUGM and DSDGMR routines for more late breaking details...
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system Mz = r for z
+C given r with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays. The name of the MSOLVE routine must
+C be declared external in the calling program. The calling
+C sequence to MSLOVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and z is the solution upon return. RWORK is a
+C double precision
+C array that can be used to pass necessary preconditioning
+C information and/or workspace to MSOLVE. IWORK is an integer
+C work array for the same purpose as RWORK.
+C NMSL :INOUT Integer.
+C A counter for the number of calls to MSOLVE.
+C ITOL :IN Integer.
+C Flag to indicate the type of convergence criterion used.
+C ITOL=0 Means the iteration stops when the test described
+C below on the residual RL is satisfied. This is
+C the "Natural Stopping Criteria" for this routine.
+C Other values of ITOL cause extra, otherwise
+C unnecessary, computation per iteration and are
+C therefore much less efficient. See ISDGMR (the
+C stop test routine) for more information.
+C ITOL=1 Means the iteration stops when the first test
+C described below on the residual RL is satisfied,
+C and there is either right or no preconditioning
+C being used.
+C ITOL=2 Implies that the user is using left
+C preconditioning, and the second stopping criterion
+C below is used.
+C ITOL=3 Means the iteration stops when the third test
+C described below on Minv*Residual is satisfied, and
+C there is either left or no preconditioning begin
+C used.
+C ITOL=11 is often useful for checking and comparing
+C different routines. For this case, the user must
+C supply the "exact" solution or a very accurate
+C approximation (one with an error much less than
+C TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the
+C difference between the iterative approximation and
+C the user-supplied solution divided by the 2-norm
+C of the user-supplied solution is less than TOL.
+C Note that this requires the user to set up the
+C "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling
+C routine. The routine with this declaration should
+C be loaded before the stop test so that the correct
+C length is used by the loader. This procedure is
+C not standard Fortran and may not work correctly on
+C your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11
+C then this common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :IN Integer.
+C The iteration for which to check for convergence.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL. Letting norm() denote the Euclidean
+C norm, ERR is defined as follows..
+C
+C If ITOL=0, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
+C for right or no preconditioning, and
+C ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
+C norm(SB*(M-inverse)*B),
+C for left preconditioning.
+C If ITOL=1, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
+C since right or no preconditioning
+C being used.
+C If ITOL=2, then ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
+C norm(SB*(M-inverse)*B),
+C since left preconditioning is being
+C used.
+C If ITOL=3, then ERR = Max |(Minv*(B-A*X(L)))(i)/x(i)|
+C i=1,n
+C If ITOL=11, then ERR = norm(SB*(X(L)-SOLN))/norm(SB*SOLN).
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :INOUT Double Precision R(N).
+C Work array used in calling routine. It contains
+C information necessary to compute the residual RL = B-A*XL.
+C Z :WORK Double Precision Z(N).
+C Workspace used to hold the pseudo-residule M z = r.
+C DZ :WORK Double Precision DZ(N).
+C Workspace used to hold temporary vector(s).
+C RWORK :WORK Double Precision RWORK(USER DEFINABLE).
+C Double Precision array that can be used by MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINABLE).
+C Integer array that can be used by MSOLVE.
+C RNRM :IN Double Precision.
+C Norm of the current residual. Type of norm depends on ITOL.
+C BNRM :IN Double Precision.
+C Norm of the right hand side. Type of norm depends on ITOL.
+C SB :IN Double Precision SB(N).
+C Scaling vector for B.
+C SX :IN Double Precision SX(N).
+C Scaling vector for X.
+C JSCAL :IN Integer.
+C Flag indicating if scaling arrays SB and SX are being
+C used in the calling routine DPIGMR.
+C JSCAL=0 means SB and SX are not used and the
+C algorithm will perform as if all
+C SB(i) = 1 and SX(i) = 1.
+C JSCAL=1 means only SX is used, and the algorithm
+C performs as if all SB(i) = 1.
+C JSCAL=2 means only SB is used, and the algorithm
+C performs as if all SX(i) = 1.
+C JSCAL=3 means both SB and SX are used.
+C KMP :IN Integer
+C The number of previous vectors the new vector VNEW
+C must be made orthogonal to. (KMP .le. MAXL)
+C LGMR :IN Integer
+C The number of GMRES iterations performed on the current call
+C to DPIGMR (i.e., # iterations since the last restart) and
+C the current order of the upper hessenberg
+C matrix HES.
+C MAXL :IN Integer
+C The maximum allowable order of the matrix H.
+C MAXLP1 :IN Integer
+C MAXPL1 = MAXL + 1, used for dynamic dimensioning of HES.
+C V :IN Double Precision V(N,MAXLP1)
+C The N by (LGMR+1) array containing the LGMR
+C orthogonal vectors V(*,1) to V(*,LGMR).
+C Q :IN Double Precision Q(2*MAXL)
+C A double precision array of length 2*MAXL containing the
+C components of the Givens rotations used in the QR
+C decomposition
+C of HES.
+C SNORMW :IN Double Precision
+C A scalar containing the scaled norm of VNEW before it
+C is renormalized in DPIGMR.
+C PROD :IN Double Precision
+C The product s1*s2*...*sl = the product of the sines of the
+C givens rotations used in the QR factorization of
+C the hessenberg matrix HES.
+C R0NRM :IN Double Precision
+C The scaled norm of initial residual R0.
+C HES :IN Double Precision HES(MAXLP1,MAXL)
+C The upper triangular factor of the QR decomposition
+C of the (LGMR+1) by LGMR upper Hessenberg matrix whose
+C entries are the scaled inner-products of A*V(*,I)
+C and V(*,K).
+C JPRE :IN Integer
+C Preconditioner type flag.
+C
+C *Description
+C When using the GMRES solver, the preferred value for ITOL
+C is 0. This is due to the fact that when ITOL=0 the norm of
+C the residual required in the stopping test is obtained for
+C free, since this value is already calculated in the GMRES
+C algorithm. The variable RNRM contains the appropriate
+C norm, which is equal to norm(SB*(RL - A*XL)) when right or
+C no preconditioning is being performed, and equal to
+C norm(SB*Minv*(RL - A*XL)) when using left preconditioning.
+C Here, norm() is the Euclidean norm. Nonzero values of ITOL
+C require additional work to calculate the actual scaled
+C residual or its scaled/preconditioned form, and/or the
+C approximate solution XL. Hence, these values of ITOL will
+C not be as efficient as ITOL=0.
+C
+C***ROUTINES CALLED MSOLVE, DNRM2, DCOPY,
+C***END PROLOG ISDGMR
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER KMP, LGMR, MAXL, MAXLP1, JPRE, NMSL
+ DOUBLE PRECISION DXNRM, RNRM, R0NRM, SNORMW, SOLNRM, PROD
+ DOUBLE PRECISION B(*), X(*), IA(*), JA(*), A(*), R(*), Z(*), DZ(*)
+ DOUBLE PRECISION RWORK(*), IWORK(*), SB(*), SX(*), Q(*), V(N,*)
+ DOUBLE PRECISION HES(MAXLP1,MAXL), XL(*)
+ EXTERNAL MSOLVE
+ COMMON /SOLBLK/ SOLN(1)
+ SAVE SOLNRM
+C
+C***FIRST EXECUTABLE STATEMENT ISDGMR
+ ISDGMR = 0
+ IF ( ITOL.EQ.0 ) THEN
+C
+C Use input from DPIGMR to determine if stop conditions are met.
+C
+ ERR = RNRM/BNRM
+ ENDIF
+ IF ( (ITOL.GT.0) .AND. (ITOL.LE.3) ) THEN
+C
+C Use DRLCAL to calculate the scaled residual vector.
+C Store answer in R.
+C
+ IF ( LGMR.NE.0 ) CALL DRLCAL(N, KMP, LGMR, MAXL, V, Q, R,
+ $ SNORMW, PROD, R0NRM)
+ IF ( ITOL.LE.2 ) THEN
+C err = ||Residual||/||RightHandSide||(2-Norms).
+ ERR = DNRM2(N, R, 1)/BNRM
+C
+C Unscale R by R0NRM*PROD when KMP < MAXL.
+C
+ IF ( (KMP.LT.MAXL) .AND. (LGMR.NE.0) ) THEN
+ TEM = 1.0D0/(R0NRM*PROD)
+ CALL DSCAL(N, TEM, R, 1)
+ ENDIF
+ ELSEIF ( ITOL.EQ.3 ) THEN
+C err = Max |(Minv*Residual)(i)/x(i)|
+C When jpre .lt. 0, r already contains Minv*Residual.
+ IF ( JPRE.GT.0 ) THEN
+ CALL MSOLVE(N, R, DZ, NELT, IA, JA, A, ISYM, RWORK,
+ $ IWORK)
+ NMSL = NMSL + 1
+ ENDIF
+C
+C Unscale R by R0NRM*PROD when KMP < MAXL.
+C
+ IF ( (KMP.LT.MAXL) .AND. (LGMR.NE.0) ) THEN
+ TEM = 1.0D0/(R0NRM*PROD)
+ CALL DSCAL(N, TEM, R, 1)
+ ENDIF
+C
+ FUZZ = D1MACH(1)
+ IELMAX = 1
+ RATMAX = ABS(DZ(1))/MAX(ABS(X(1)),FUZZ)
+ DO 25 I = 2, N
+ RAT = ABS(DZ(I))/MAX(ABS(X(I)),FUZZ)
+ IF( RAT.GT.RATMAX ) THEN
+ IELMAX = I
+ RATMAX = RAT
+ ENDIF
+ 25 CONTINUE
+ ERR = RATMAX
+ IF( RATMAX.LE.TOL ) ISDGMR = 1
+ IF( IUNIT.GT.0 ) WRITE(IUNIT,1020) ITER, IELMAX, RATMAX
+ RETURN
+ ENDIF
+ ENDIF
+ IF ( ITOL.EQ.11 ) THEN
+C
+C Use DXLCAL to calculate the approximate solution XL.
+C
+ IF ( (LGMR.NE.0) .AND. (ITER.GT.0) ) THEN
+ CALL DXLCAL(N, LGMR, X, XL, XL, HES, MAXLP1, Q, V, R0NRM,
+ $ DZ, SX, JSCAL, JPRE, MSOLVE, NMSL, RWORK, IWORK,
+ $ NELT, IA, JA, A, ISYM)
+ ELSEIF ( ITER.EQ.0 ) THEN
+C Copy X to XL to check if initial guess is good enough.
+ CALL DCOPY(N, X, 1, XL, 1)
+ ELSE
+C Return since this is the first call to DPIGMR on a restart.
+ RETURN
+ ENDIF
+C
+ IF ((JSCAL .EQ. 0) .OR.(JSCAL .EQ. 2)) THEN
+C err = ||x-TrueSolution||/||TrueSolution||(2-Norms).
+ IF ( ITER.EQ.0 ) SOLNRM = DNRM2(N, SOLN, 1)
+ DO 30 I = 1, N
+ DZ(I) = XL(I) - SOLN(I)
+ 30 CONTINUE
+ ERR = DNRM2(N, DZ, 1)/SOLNRM
+ ELSE
+ IF (ITER .EQ. 0) THEN
+ SOLNRM = 0.D0
+ DO 40 I = 1,N
+ SOLNRM = SOLNRM + (SX(I)*SOLN(I))**2
+ 40 CONTINUE
+ SOLNRM = DSQRT(SOLNRM)
+ ENDIF
+ DXNRM = 0.D0
+ DO 50 I = 1,N
+ DXNRM = DXNRM + (SX(I)*(XL(I)-SOLN(I)))**2
+ 50 CONTINUE
+ DXNRM = DSQRT(DXNRM)
+C err = ||SX*(x-TrueSolution)||/||SX*TrueSolution|| (2-Norms).
+ ERR = DXNRM/SOLNRM
+ ENDIF
+ ENDIF
+C
+ IF( IUNIT.NE.0 ) THEN
+ IF( ITER.EQ.0 ) THEN
+ WRITE(IUNIT,1000) N, ITOL, MAXL, KMP
+ ENDIF
+ WRITE(IUNIT,1010) ITER, RNRM/BNRM, ERR
+ ENDIF
+ IF ( ERR.LE.TOL ) ISDGMR = 1
+C
+ RETURN
+ 1000 FORMAT(' Generalized Minimum Residual(',I3,I3,') for ',
+ $ 'N, ITOL = ',I5, I5,
+ $ /' ITER',' Natral Err Est',' Error Estimate')
+ 1010 FORMAT(1X,I4,1X,E16.7,1X,E16.7)
+ 1020 FORMAT(1X,' ITER = ',I5, ' IELMAX = ',I5,
+ $ ' |R(IELMAX)/X(IELMAX)| = ',E12.5)
+C------------- LAST LINE OF ISDGMR FOLLOWS ----------------------------
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/dir.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dir.f
new file mode 100644
index 0000000000..8860000a02
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dir.f
@@ -0,0 +1,1283 @@
+*DECK DIR
+ SUBROUTINE DIR(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, DZ,
+ $ RWORK, IWORK)
+C***BEGIN PROLOGUE DIR
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DIR-D),
+C Linear system, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned Iterative Refinement sparse Ax = b solver.
+C Routine to solve a general linear system Ax = b using
+C iterative refinement with a matrix splitting.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, IWORK(USER DEFINABLE)
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N), DZ(N)
+C DOUBLE PRECISION RWORK(USER DEFINABLE)
+C EXTERNAL MATVEC, MSOLVE
+C
+C CALL DIR(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSLOVE, ITOL,
+C $ TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, DZ, RWORK, IWORK)
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Integer A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "Description", below
+C for more late breaking details...
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MATVEC :EXT External.
+C Name of a routine which performs the matrix vector multiply
+C Y = A*X given A and X. The name of the MATVEC routine must
+C be declared external in the calling program. The calling
+C sequence to MATVEC is:
+C CALL MATVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C Where N is the number of unknowns, Y is the product A*X
+C upon return, X is an input vector, NELT is the number of
+C non-zeros in the SLAP IA, JA, A storage for the matrix A.
+C ISYM is a flag which, if non-zero, denotes that A is
+C symmetric and only the lower or upper triangle is stored.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for
+C Z given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine must
+C be declared external in the calling program. The calling
+C sequence to MSOLVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. IA, JA, A and
+C ISYM are defined as above. RWORK is a double precision array
+C that can be used to pass necessary preconditioning information
+C and/or workspace to MSOLVE. IWORK is an integer work array
+C for the same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :WORK Double Precision R(N).
+C Z :WORK Double Precision Z(N).
+C DZ :WORK Double Precision DZ(N).
+C RWORK :WORK Double Precision RWORK(USER DEFINABLE).
+C Double Precision array that can be used by MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINABLE).
+C Integer array that can be used by MSOLVE.
+C
+C *Description:
+C The basic algorithm for iterative refinement (also known as
+C iterative improvement) is:
+C
+C n+1 n -1 n
+C X = X + M (B - AX ).
+C
+C -1 -1
+C If M = A then this is the standard iterative refinement
+C algorithm and the "subtraction" in the residual calculation
+C should be done in double precision (which it is not in this
+C routine). If M = DIAG(A), the diagonal of A, then iterative
+C refinement is known as Jacobi's method. The SLAP routine
+C DSJAC implements this iterative strategy. If M = L, the
+C lower triangle of A, then iterative refinement is known as
+C Gauss-Seidel. The SLAP routine DSGS implements this
+C iterative strategy.
+C
+C This routine does not care what matrix data structure is
+C used for A and M. It simply calls the MATVEC and MSOLVE
+C routines, with the arguments as described above. The user
+C could write any type of structure and the appropriate MATVEC
+C and MSOLVE routines. It is assumed that A is stored in the
+C IA, JA, A arrays in some fashion and that M (or INV(M)) is
+C stored in IWORK and RWORK) in some fashion. The SLAP
+C routines DSJAC and DSGS are examples of this procedure.
+C
+C Two examples of matrix data structures are the: 1) SLAP
+C Triad format and 2) SLAP Column format.
+C
+C =================== S L A P Triad format ===================
+C
+C In this format only the non-zeros are stored. They may
+C appear in *ANY* order. The user supplies three arrays of
+C length NELT, where NELT is the number of non-zeros in the
+C matrix: (IA(NELT), JA(NELT), A(NELT)). For each non-zero
+C the user puts the row and column index of that matrix
+C element in the IA and JA arrays. The value of the non-zero
+C matrix element is placed in the corresponding location of
+C the A array. This is an extremely easy data structure to
+C generate. On the other hand it is not too efficient on
+C vector computers for the iterative solution of linear
+C systems. Hence, SLAP changes this input data structure to
+C the SLAP Column format for the iteration (but does not
+C change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Examples:
+C See the SLAP routines DSJAC, DSGS
+C
+C *Precision: Double Precision
+C *See Also:
+C DSJAC, DSGS
+C***REFERENCES 1. Gene Golub \& Charles Van Loan, "Matrix
+C Computations", John Hopkins University Press; 3
+C (1983) IBSN 0-8018-3010-9.
+C***ROUTINES CALLED MATVEC, MSOLVE, ISDIR.
+C***END PROLOGUE DIR
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+ INTEGER ITOL, ITMAX, ITER, IERR, IUNIT, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N)
+ DOUBLE PRECISION DZ(N), RWORK(*)
+ EXTERNAL MSOLVE, MATVEC, ISDIR
+C
+C Check some of the input data.
+C***FIRST EXECUTABLE STATEMENT DIR
+ ITER = 0
+ IERR = 0
+ IF( N.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ TOLMIN = 500.0*D1MACH(3)
+ IF( TOL.LT.TOLMIN ) THEN
+ TOL = TOLMIN
+ IERR = 4
+ ENDIF
+C
+C Calculate initial residual and pseudo-residual, and check
+C stopping criterion.
+ CALL MATVEC(N, X, R, NELT, IA, JA, A, ISYM)
+ DO 10 I = 1, N
+ R(I) = B(I) - R(I)
+ 10 CONTINUE
+ CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C
+ IF( ISDIR(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, R, Z, DZ, RWORK,
+ $ IWORK, BNRM, SOLNRM) .NE. 0 ) GO TO 200
+ IF( IERR.NE.0 ) RETURN
+C
+C ***** iteration loop *****
+C
+ DO 100 K=1,ITMAX
+ ITER = K
+C
+C Calculate new iterate x, new residual r, and new
+C pseudo-resid z.
+ DO 20 I = 1, N
+ X(I) = X(I) + Z(I)
+ 20 CONTINUE
+ CALL MATVEC(N, X, R, NELT, IA, JA, A, ISYM)
+ DO 30 I = 1, N
+ R(I) = B(I) - R(I)
+ 30 CONTINUE
+ CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C
+C check stopping criterion.
+ IF( ISDIR(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, R, Z, DZ, RWORK,
+ $ IWORK, BNRM, SOLNRM) .NE. 0 ) GO TO 200
+C
+ 100 CONTINUE
+C
+C ***** end of loop *****
+C Stopping criterion not satisfied.
+ ITER = ITMAX + 1
+ IERR = 2
+C
+ 200 RETURN
+C------------- LAST LINE OF DIR FOLLOWS -------------------------------
+ END
+*DECK DSJAC
+ SUBROUTINE DSJAC(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSJAC
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSJAC-D),
+C Linear system, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Jacobi's method iterative sparse Ax = b solver.
+C Routine to solve a general linear system Ax = b using
+C Jacobi iteration.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
+C
+C CALL DSJAC(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Integer A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace.
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK. LENW >= 4*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Used to hold pointers into the double precision workspace,
+C RWORK. Upon return the following locations of IWORK hold
+C information which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK. LENIW >= 10.
+C
+C *Description:
+C Jacobi's method solves the linear system Ax=b with the
+C basic iterative method (where A = L + D + U):
+C
+C n+1 -1 n n
+C X = D (B - LX - UX )
+C
+C n -1 n
+C = X + D (B - AX )
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which one
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to be
+C the SLAP Column format. See above.
+C
+C *See Also:
+C DSGS, DIR
+C
+C***REFERENCES (NONE)
+C***ROUTINES CALLED DS2Y, DDCHKW, DSDS, DIR, DSMV, DSDI
+C***END PROLOGUE DSJAC
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+ INTEGER ITER, IUNIT, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(NELT), RWORK(LENW)
+ EXTERNAL DSMV, DSDI
+ PARAMETER(LOCRB=1,LOCIB=11)
+C
+C Compute the inverse of the diagonal of the matrix. This
+C will be used as the precontioner.
+C***FIRST EXECUTABLE STATEMENT DSJAC
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ LOCIW = LOCIB
+ LOCD = LOCRB
+ LOCR = LOCD + N
+ LOCZ = LOCR + N
+ LOCDZ = LOCZ + N
+ LOCW = LOCDZ + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSJAC', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(4) = LOCD
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+ CALL DS2Y(N, NELT, IA, JA, A, ISYM )
+ CALL DSDS(N, NELT, IA, JA, A, ISYM, RWORK(LOCD))
+C
+C Set up the work array and perform the iterative refinement.
+ CALL DIR(N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSDI, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK(LOCR), RWORK(LOCZ),
+ $ RWORK(LOCDZ), RWORK, IWORK )
+ RETURN
+C------------- LAST LINE OF DSJAC FOLLOWS -----------------------------
+ END
+*DECK DSGS
+ SUBROUTINE DSGS(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSGS
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSGS-S),
+C Linear system, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Gauss-Seidel method iterative sparse Ax = b solver.
+C Routine to solve a general linear system Ax = b using
+C Gauss-Seidel iteration.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(NEL+2*N+1), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(NEL+3*N)
+C
+C CALL DSGS(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Integer A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the lower
+C lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace. NEL is the number
+C of non-zeros in the lower triangle of the matrix (including
+C the diagonal).
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= NEL+3*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Integer array used for workspace. NEL is the number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal).
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK. LENIW >=
+C NEL+N+11.
+C
+C *Description
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to be
+C the SLAP Column format. See above.
+C
+C *See Also:
+C DSJAC, DIR
+C***REFERENCES (NONE)
+C***ROUTINES CALLED DS2Y, DCHKW, DS2LT, SDIR, DSMV, DSLI
+C***END PROLOGUE DSGS
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+ INTEGER ITER, IUNIT, IWORK(10)
+ DOUBLE PRECISION B(N), X(N), A(N), TOL, ERR, RWORK(1)
+ EXTERNAL DSMV, DSLI
+ PARAMETER(LOCRB=1,LOCIB=11)
+C
+C Modify the SLAP matrix data structure to YSMP-Column.
+C***FIRST EXECUTABLE STATEMENT DSGS
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Count number of elements in lower triangle of the matrix.
+ IF( ISYM.EQ.0 ) THEN
+ NEL = 0
+ DO 20 ICOL = 1, N
+ JBGN = JA(ICOL)
+ JEND = JA(ICOL+1)-1
+ DO 10 J = JBGN, JEND
+ IF( IA(J).GE.ICOL ) NEL = NEL + 1
+ 10 CONTINUE
+ 20 CONTINUE
+ ELSE
+ NEL = JA(N+1)-1
+ ENDIF
+C
+C Set up the work arrays. Then store the lower triangle of
+C the matrix.
+C
+ LOCJEL = LOCIB
+ LOCIEL = LOCJEL + N+1
+ LOCIW = LOCIEL + NEL
+C
+ LOCEL = LOCRB
+ LOCR = LOCEL + NEL
+ LOCZ = LOCR + N
+ LOCDZ = LOCZ + N
+ LOCW = LOCDZ + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSGS', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(1) = NEL
+ IWORK(2) = LOCIEL
+ IWORK(3) = LOCJEL
+ IWORK(4) = LOCEL
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+ CALL DS2LT( N, NELT, IA, JA, A, ISYM, NEL, IWORK(LOCIEL),
+ $ IWORK(LOCJEL), RWORK(LOCEL) )
+C
+C Call iterative refinement routine.
+ CALL DIR(N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSLI,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK(LOCR),
+ $ RWORK(LOCZ), RWORK(LOCDZ), RWORK, IWORK )
+C
+C Set the amount of Integer and Double Precision Workspace used.
+ IWORK(9) = LOCIW+N+NELT
+ IWORK(10) = LOCW+NELT
+ RETURN
+C------------- LAST LINE OF DSGS FOLLOWS ------------------------------
+ END
+*DECK DSILUR
+ SUBROUTINE DSILUR(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSILUR
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSILUR-S),
+C Linear system, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Incomplete LU Iterative Refinement sparse Ax = b solver.
+C Routine to solve a general linear system Ax = b using
+C the incomplete LU decomposition with iterative refinement.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(NEL+NU+4*N+2), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(NEL+NU+4*N)
+C
+C CALL DSILUR(N, B, X, NELT, IA, JA, A, ISYM, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW)
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Integer A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "Description",
+C below. If the SLAP Triad format is chosen it is changed
+C internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Matrix A is not Positive Definite.
+C $(p,Ap) < 0.0$.
+C IERR = 7 => Incomplete factorization broke down
+C and was fudged. Resulting preconditioning may
+C be less than the best.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace. NEL is the number
+C of non-zeros in the lower triangle of the matrix (including
+C the diagonal). NU is the number of nonzeros in the upper
+C triangle of the matrix (including the diagonal).
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= NEL+NU+4*N.
+C IWORK :WORK Integer IWORK(LENIW).
+C Integer array used for workspace. NEL is the number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal). NU is the number of nonzeros in the upper
+C triangle of the matrix (including the diagonal).
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the integer workspace, IWORK. LENIW >=
+C NEL+NU+4*N+10.
+C
+C *Description
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to be
+C the SLAP Column format. See above.
+C
+C *Portability:
+C DSJAC, DSGS, DIR
+C***REFERENCES (NONE)
+C***ROUTINES CALLED DS2Y, DCHKW, DSILUS, DIR, DSMV, DSLUI
+C***END PROLOGUE DSILUR
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+ EXTERNAL DSMV, DSLUI
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSILUR
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Count number of Non-Zero elements in preconditioner ILU
+C matrix. Then set up the work arrays.
+ NL = 0
+ NU = 0
+ DO 20 ICOL = 1, N
+C Don't count diagonal.
+ JBGN = JA(ICOL)+1
+ JEND = JA(ICOL+1)-1
+ IF( JBGN.LE.JEND ) THEN
+CVD$ NOVECTOR
+ DO 10 J = JBGN, JEND
+ IF( IA(J).GT.ICOL ) THEN
+ NL = NL + 1
+ IF( ISYM.NE.0 ) NU = NU + 1
+ ELSE
+ NU = NU + 1
+ ENDIF
+ 10 CONTINUE
+ ENDIF
+ 20 CONTINUE
+C
+ LOCIL = LOCIB
+ LOCJL = LOCIL + N+1
+ LOCIU = LOCJL + NL
+ LOCJU = LOCIU + NU
+ LOCNR = LOCJU + N+1
+ LOCNC = LOCNR + N
+ LOCIW = LOCNC + N
+C
+ LOCL = LOCRB
+ LOCDIN = LOCL + NL
+ LOCU = LOCDIN + N
+ LOCR = LOCU + NU
+ LOCZ = LOCR + N
+ LOCDZ = LOCZ + N
+ LOCW = LOCDZ + N
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSILUR', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(1) = LOCIL
+ IWORK(2) = LOCJL
+ IWORK(3) = LOCIU
+ IWORK(4) = LOCJU
+ IWORK(5) = LOCL
+ IWORK(6) = LOCDIN
+ IWORK(7) = LOCU
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+C Compute the Incomplete LU decomposition.
+ CALL DSILUS( N, NELT, IA, JA, A, ISYM, NL, IWORK(LOCIL),
+ $ IWORK(LOCJL), RWORK(LOCL), RWORK(LOCDIN), NU, IWORK(LOCIU),
+ $ IWORK(LOCJU), RWORK(LOCU), IWORK(LOCNR), IWORK(LOCNC) )
+C
+C Do the Preconditioned Iterative Refinement iteration.
+ CALL DIR(N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSLUI,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK(LOCR),
+ $ RWORK(LOCZ), RWORK(LOCDZ), RWORK, IWORK)
+ RETURN
+C------------- LAST LINE OF DSILUR FOLLOWS ----------------------------
+ END
+*DECK ISDIR
+ FUNCTION ISDIR(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL, TOL,
+ $ ITMAX, ITER, ERR, IERR, IUNIT, R, Z, DZ, RWORK, IWORK,
+ $ BNRM, SOLNRM)
+C***BEGIN PROLOGUE ISDIR
+C***REFER TO DIR, DSJAC, DSGS
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 880320 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(ISDIR-S),
+C Linear system, Sparse, Stop Test
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned Iterative Refinement Stop Test.
+C This routine calculates the stop test for the iterative
+C refinement iteration scheme. It returns a nonzero if the
+C error estimate (the type of which is determined by ITOL)
+C is less than the user specified tolerance TOL.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+C INTEGER IERR, IUNIT, IWORK(USER DEFINED)
+C DOUBLE PRECISION B(N), X(N), A(N), TOL, ERR, R(N), Z(N), DZ(N)
+C DOUBLE PRECISION RWORK(USER DEFINED), BNRM, SOLNRM
+C EXTERNAL MSOLVE
+C
+C IF( ISDIR(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, R, Z, DZ, RWORK, IWORK,
+C $ BNRM, SOLNRM) .NE. 0 ) THEN ITERATION DONE
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :IN Double Precision X(N).
+C The current approximate solution vector.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "C *Description" in the
+C DIR routine.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system Mz = r for
+C z given r with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays. The name of the MSOLVE routine must
+C be declared external in the calling program. The calling
+C sequence to MSOLVE is:
+C CALL MSOLVE(N, R, Z, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and z is the solution upon return. RWORK is a double
+C precision array that can be used to pass necessary
+C preconditioning information and/or workspace to MSOLVE.
+C IWORK is an integer work array for the same purpose as RWORK.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITER :IN Integer.
+C Current iteration count.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in the X(N) approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Error flag. IERR is set to 3 if ITOL is not on of the
+C acceptable values, see above.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :IN Double Precision R(N).
+C The residual R = B-AX.
+C Z :WORK Double Precision Z(N).
+C Workspace used to hold the pseudo-residual M z = r.
+C DZ :WORK Double Precision DZ(N).
+C Workspace used to hold temporary vector(s).
+C RWORK :WORK Double Precision RWORK(USER DEFINABLE).
+C Double Precision array that can be used by MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINABLE).
+C Integer array that can be used by MSOLVE.
+C BNRM :INOUT Double Precision.
+C Norm of the right hand side. Type of norm depends on ITOL.
+C Calculated only on the first call.
+C SOLNRM :INOUT Double Precision.
+C 2-Norm of the true solution, SOLN. Only computed and used
+C if ITOL = 11.
+C
+C *Function Return Values:
+C 0 : Error estimate (determined by ITOL) is *NOT* less than the
+C specified tolerance, TOL. The iteration must continue.
+C 1 : Error estimate (determined by ITOL) is less than the
+C specified tolerance, TOL. The iteration can be considered
+C complete.
+C
+C *Precision: Double Precision
+C *See Also:
+C DIR, DSJAC, DSGS
+C
+C *Cautions:
+C This routine will attempt to write to the fortran logical output
+C unit IUNIT, if IUNIT .ne. 0. Thus, the user must make sure that
+C this logical unit must be attached to a file or terminal
+C before calling this routine with a non-zero value for IUNIT.
+C This routine does not check for the validity of a non-zero IUNIT
+C unit number.
+C
+C***REFERENCES (NONE)
+C***ROUTINES CALLED MSOLVE, DNRM2
+C***COMMON BLOCKS SOLBLK
+C***END PROLOGUE ISDIR
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
+ INTEGER IUNIT, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(NELT), R(N), Z(N), DZ(N), RWORK(*)
+ EXTERNAL MSOLVE
+ COMMON /SOLBLK/ SOLN(1)
+C
+C***FIRST EXECUTABLE STATEMENT ISDIR
+ ISDIR = 0
+ IF( ITOL.EQ.1 ) THEN
+C err = ||Residual||/||RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) BNRM = DNRM2(N, B, 1)
+ ERR = DNRM2(N, R, 1)/BNRM
+ ELSE IF( ITOL.EQ.2 ) THEN
+C -1 -1
+C err = ||M Residual||/||M RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) THEN
+ CALL MSOLVE(N, B, DZ, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ BNRM = DNRM2(N, DZ, 1)
+ ENDIF
+ ERR = DNRM2(N, Z, 1)/BNRM
+ ELSE IF( ITOL.EQ.11 ) THEN
+C err = ||x-TrueSolution||/||TrueSolution|| (2-Norms).
+ IF( ITER.EQ.0 ) SOLNRM = DNRM2(N, SOLN, 1)
+ DO 10 I = 1, N
+ DZ(I) = X(I) - SOLN(I)
+ 10 CONTINUE
+ ERR = DNRM2(N, DZ, 1)/SOLNRM
+ ELSE
+C
+C If we get here ITOL is not one of the acceptable values.
+ ERR = 1.0E10
+ IERR = 3
+ ENDIF
+C
+ IF( IUNIT.NE.0 ) THEN
+ WRITE(IUNIT,1000) ITER,ERR
+ ENDIF
+C
+ IF( ERR.LE.TOL ) ISDIR = 1
+C
+ RETURN
+ 1000 FORMAT(5X,'ITER = ',I4,' Error Estimate = ',E16.7)
+C------------- LAST LINE OF ISDIR FOLLOWS -----------------------------
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/dlapqc.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dlapqc.f
new file mode 100644
index 0000000000..b7200ec8c2
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dlapqc.f
@@ -0,0 +1,794 @@
+ PROGRAM DLAPQC
+C***BEGIN PROLOGUE DLAPQC
+C***SUBSIDIARY
+C***PURPOSE Driver for testing SLATEC Sparse Linear Algebra Package
+C (SLAP) Version 2.0.
+C***LIBRARY SLATEC(SLAP)
+C***CATEGORY D2A4, D2B4
+C***TYPE SINGLE (DLAPQC-S)
+C***KEYWORDS QUICK CHECK DRIVER, SLAP
+C***AUTHOR Mark K. Seager (LLNL)
+C seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550
+C (415)423-3141
+C***DESCRIPTION
+C
+C *Usage:
+C One input data record is required
+C READ (LIN,990) KPRINT
+C 999 FORMAT (I1)
+C
+C *Arguments:
+C KPRINT = 0 Quick checks - No printing.
+C Driver - Short pass or fail message printed.
+C 1 Quick checks - No message printed for passed tests,
+C short message printed for failed tests.
+C Driver - Short pass or fail message printed.
+C 2 Quick checks - Print short message for passed tests,
+C fuller information for failed tests.
+C Driver - Pass or fail message printed.
+C 3 Quick checks - Print complete quick check results.
+C Driver - Pass or fail message printed.
+C 4 Quick checks - Print complete quick check results.
+C Prints matricies, etc. Very verbose.
+C Driver - Pass or fail message printed.
+C
+C *Description:
+C The
+C Sparse Linear Algebra Package
+C
+C @@@@@@@ @ @@@ @@@@@@@@
+C @ @ @ @ @ @ @
+C @ @ @ @ @ @
+C @@@@@@@ @ @ @ @@@@@@@@
+C @ @ @@@@@@@@@ @
+C @ @ @ @ @ @
+C @@@@@@@ @@@@@@@@@ @ @ @
+C
+C @ @ @@@@@@@ @@@@@
+C @ @ @ @ @ @@
+C @ @ @@@@@@@ @ @@ @ @ @ @
+C @ @ @ @ @@ @ @@@@@@ @ @ @
+C @ @ @@@@@@@@@ @ @ @ @ @
+C @ @ @ @ @ @@@ @@ @
+C @@@ @@@@@@@ @ @@@@@@@@@ @@@ @@@@@
+C
+C----------------------------------------------------------------------
+C Written By
+C Mark K. Seager (LLNL)
+C Lawrence Livermore National Lab.
+C PO Box 808, L-300
+C Livermore, CA 94550
+C (415) 423-3141
+C seager@lll-crg.llnl.gov
+C----------------------------------------------------------------------
+C This is a SLATEC Quick Checks program to test the *SLAP*
+C Version 2.0 package. It generates a "random" matrix (See
+C DRMGEN) and then runs all the various methods with all the
+C various preconditoners and all the various stop tests.
+C
+C It is assumed that the test is being run interactively and
+C that STDIN (STANDARD INPUT) is Fortran I/O unit I1MACH(1)
+C and STDOUT (STANDARD OUTPUT) is unit I1MACH(2).
+C
+C *************************************************************
+C **** WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+C *************************************************************
+C **** THIS PROGRAM WILL NOT FUNCTION PROPERLY IF THE FORTRAN
+C **** I/O UNITS I1MACH(1) and I1MACH(2) are not connected
+C **** to the program for I/O.
+C *************************************************************
+C
+C All errors in the driver are handled with the SLATEC error
+C handler (revision date 851111).
+C
+C***REFERENCES (NONE)
+C***ROUTINES CALLED DS2Y, DCPPLT, DSJAC, DSGS, DSILUR, DSDCG, DSICCG,
+C DSDCGN, DSLUCN, DSDBCG, DSLUBC, DSDCGS, DSLUCS,
+C DSDOMN, DSLUOM, DSDCMR, DSLUCM
+C***REVISION HISTORY (YYMMDD)
+C 880601 DATE WRITTEN
+C 881213 Revised to meet the new SLATEC prologue standards.
+C***END PROLOGUE DLAPQC
+ PARAMETER(MAXN=441, MXNELT=50000, MAXIW=50000, MAXRW=50000)
+C$$$ PARAMETER(MAXN=25, MXNELT=50000, MAXIW=50000, MAXRW=50000)
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ CHARACTER*72 MESG
+ DOUBLE PRECISION A(MXNELT), F(MAXN), XITER(MAXN), RWORK(MAXRW)
+ INTEGER IA(MXNELT), JA(MXNELT), IWORK(MAXIW)
+ COMMON /SOLBLK/ SOLN(MAXN)
+C
+C The following lines is for the braindammaged Sun FPE handler.
+C
+C$$$ integer oldmode, fpmode
+C$$$ oldmode = fpmode( 62464 )
+C
+C READ KPRINT PARAMETER
+C
+C***FIRST EXECUTABLE STATEMENT DLAPQC
+C
+ ISTDI = I1MACH(1)
+ ISTDO = I1MACH(2)
+ NFAIL = 0
+ READ(ISTDI,990) KPRINT
+ 990 FORMAT(I1)
+ CALL XSETUN(LUN)
+ IF( KPRINT.LE.1 ) THEN
+ CALL XSETF(0)
+ ELSE
+ CALL XSETF(1)
+ ENDIF
+ CALL XERMAX(1000)
+C
+C Maximum problem sizes.
+C
+ NELTMX = MXNELT
+ NMAX = MAXN
+ LENIW = MAXIW
+ LENW = MAXRW
+C
+C Set some input data.
+C
+ N = NMAX
+ ITMAX = N
+ IOUT = KPRINT
+ FACTOR = 1.2
+ IF( IOUT.LT.3 ) THEN
+ IUNIT = 0
+ ELSE
+ IUNIT = ISTDO
+ ENDIF
+C
+C Set the Error tolerance to depend on the machine epsilon.
+C
+ TOL = MAX(1.0D3*D1MACH(3),1.0D-6)
+C
+C Test routines using various convergence criteria.
+C
+ DO 10 KASE = 3, 3
+ IF(KASE .EQ. 1 .OR. KASE .EQ. 2) ITOL = KASE
+ IF(KASE .EQ. 3) ITOL = 11
+C
+C Test routines using nonsymmetric (ISYM=0) and symmetric
+C storage (ISYM=1). For ISYM=0 a really non-symmetric matrix
+C is generated. The amount of non-symmetry is controlled by
+C user.
+C
+ DO 20 ISYM = 0, 1
+C
+C Set up a random matrix.
+C
+ CALL DRMGEN( NELTMX, FACTOR, IERR, N, NELT,
+ $ ISYM, IA, JA, A, F, SOLN, RWORK, IWORK, IWORK(N+1) )
+ IF( IERR.NE.0 ) THEN
+ MESG = 'DLAPQC -- Fatal error (i1) generating '//
+ $ '*RANDOM* Matrix.'
+ CALL XERRWV( MESG,LEN(MESG),IERR,2,1,IERR,0,
+ $ 0,0.0,0.0 )
+ ENDIF
+ IF( ISYM.EQ.0 ) THEN
+ DENS = FLOAT(NELT)/FLOAT(N*N)
+ ELSE
+ DENS = FLOAT(2*NELT)/FLOAT(N*N)
+ ENDIF
+ IF( IOUT.GE.2 ) THEN
+ WRITE(ISTDO,1020) N, NELT, DENS
+ WRITE(ISTDO,1030) TOL
+ ENDIF
+C
+C Convert to the SLAP-Column format and
+C write out matrix in SLAP-Column format, if desired.
+C
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+ IF( IOUT.GE.4 ) THEN
+ WRITE(ISTDO,1040) (K,IA(K),JA(K),A(K),K=1,NELT)
+ CALL DCPPLT( N, NELT, IA, JA, A, ISYM, ISTDO )
+ ENDIF
+C
+C**********************************************************************
+C BEGINING OF SLAP QUICK TESTS
+C**********************************************************************
+C
+C * * * * * * DSJAC * * * * * *
+C
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSJAC ', ITOL, ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSJAC(N, F, XITER, NELT, IA, JA, A, ISYM,
+ $ ITOL, TOL, 2*ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK, LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSJAC ',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+C
+C * * * * * DSGS * * * * *
+C
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSGS ',ITOL,ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSGS(N, F, XITER, NELT, IA, JA, A, ISYM,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK, LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSGS ',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+C
+C * * * * * * DSILUR * * * * * *
+C
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSILUR',ITOL,ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSILUR(N, F, XITER, NELT, IA, JA, A, ISYM,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK, LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSILUR',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+C
+C * * * * * * DSDCG * * * * * *
+C
+ IF( ISYM.EQ.1 ) THEN
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSDCG',ITOL,ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSDCG(N, F, XITER, NELT, IA, JA, A, ISYM,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK, LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSDCG ',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+ ENDIF
+C
+C * * * * * * DSICCG * * * * * *
+C
+ IF( ISYM.EQ.1 ) THEN
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSICCG',ITOL,ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSICCG(N, F, XITER, NELT, IA, JA, A, ISYM,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK,
+ $ LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSICCG',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+ ENDIF
+C
+C * * * * * * DSDCGN * * * * * *
+C
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSDCGN',ITOL,ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSDCGN(N, F, XITER, NELT, IA, JA, A, ISYM, ITOL,
+ $ TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW,
+ $ IWORK, LENIW )
+C
+ CALL DUTERR( 'DSDCGN',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+C
+C * * * * * * DSLUCN * * * * * *
+C
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSLUCN',ITOL,ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSLUCN(N, F, XITER, NELT, IA, JA, A, ISYM, ITOL,
+ $ TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW,
+ $ IWORK, LENIW )
+C
+ CALL DUTERR( 'DSLUCN',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+C
+C * * * * * * DSDBCG * * * * * *
+C
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSDBCG',ITOL,ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSDBCG(N, F, XITER, NELT, IA, JA, A, ISYM, ITOL,
+ $ TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW,
+ $ IWORK, LENIW )
+C
+ CALL DUTERR( 'DSDBCG',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+C
+C * * * * * * DSLUBC * * * * * *
+C
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSLUBC',ITOL,ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSLUBC(N, F, XITER, NELT, IA, JA, A, ISYM,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK, LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSLUBC',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+C
+C * * * * * * DSDCGS * * * * * *
+C
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSDCGS',ITOL,ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSDCGS(N, F, XITER, NELT, IA, JA, A, ISYM, ITOL,
+ $ TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW,
+ $ IWORK, LENIW )
+C
+ CALL DUTERR( 'DSDCGS',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+C
+C * * * * * * DSLUCS * * * * * *
+C
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1000) 'DSLUCS',ITOL,ISYM
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSLUCS(N, F, XITER, NELT, IA, JA, A, ISYM,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK, LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSLUCS',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+C
+C * * * * * * DSDOMN * * * * * *
+C
+CVD$ NOVECTOR
+ DO 30 NSAVE = 0, 3
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1010) 'DSDOMN',ITOL, ISYM, NSAVE
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSDOMN(N, F, XITER, NELT, IA, JA, A,
+ $ ISYM, NSAVE, ITOL, TOL, ITMAX, ITER, ERR, IERR,
+ $ IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSDOMN',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+ 30 CONTINUE
+C
+C * * * * * * DSLUOM * * * * * *
+C
+CVD$ NOVECTOR
+ DO 40 NSAVE=0,3
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1010) 'DSLUOM',ITOL, ISYM, NSAVE
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSLUOM(N, F, XITER, NELT, IA, JA, A,
+ $ ISYM, NSAVE, ITOL, TOL, ITMAX, ITER, ERR, IERR,
+ $ IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSLUOM',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+ 40 CONTINUE
+C
+C * * * * * * DSDGMR * * * * * *
+C
+CVD$ NOVECTOR
+ DO 50 NSAVE = 5, 12
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1010) 'DSDGMR',ITOL, ISYM, NSAVE
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+ ITOLGM = 0
+C
+ CALL DSDGMR(N, F, XITER, NELT, IA, JA, A,
+ $ ISYM, NSAVE, ITOLGM, TOL, ITMAX, ITER, ERR, IERR,
+ $ IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSDGMR',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+ 50 CONTINUE
+C
+C * * * * * * DSLUGM * * * * * *
+C
+CVD$ NOVECTOR
+ DO 60 NSAVE = 5, 12
+ IF( IOUT.GE.3 ) THEN
+ WRITE(ISTDO,1010) 'DSLUGM',ITOL, ISYM, NSAVE
+ ENDIF
+ CALL DFILL( N, XITER, 0.0D0 )
+C
+ CALL DSLUGM(N, F, XITER, NELT, IA, JA, A,
+ $ ISYM, NSAVE, ITOL, TOL, ITMAX, ITER, ERR, IERR,
+ $ IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+ CALL DUTERR( 'DSLUGM',IERR,IOUT,NFAIL,ISTDO,ITER,ERR )
+ 60 CONTINUE
+ 20 CONTINUE
+ 10 CONTINUE
+C
+ IF( NFAIL.EQ.0 ) THEN
+ WRITE(ISTDO,1050)
+ ELSE
+ WRITE(ISTDO,1060) NFAIL
+ ENDIF
+C
+ STOP 'All Done'
+C
+ 1000 FORMAT(/1X,A6,' : ITOL = ',I2,' ISYM = ',I1)
+ 1010 FORMAT(/1X,A6,' : ITOL = ',I2,' ISYM = ',I1,' NSAVE = ',I2)
+ 1020 FORMAT(/' * RANDOM Matrix of size',I5,'*'
+ $ /' ',
+ $ 'Number of non-zeros & Density = ', I5,E16.7)
+ 1030 FORMAT(' Error tolerance = ',E16.7)
+ 1040 FORMAT(/' ***** SLAP Column Matrix *****'/
+ $ ' Indx ia ja a'/(1X,I4,1X,I4,1X,I4,1X,E16.7))
+ 1050 FORMAT(//
+ $ '*******************************************************'/
+ $ '**** All SLAP Double Precision Quick Checks Passed ****'/
+ $ '**** No Errors ****'/
+ $ '*******************************************************')
+ 1060 FORMAT(//
+ $ '************************************************'/
+ $ '** ===>',I3,' Failures detected <=== **'/
+ $ '** SLAP Double Precision Quick Checks **'/
+ $ '** Set KPRINT = 3 for DEBUG information and **'/
+ $ '** rerun the tests to determine the problem. **'/
+ $ '************************************************')
+ END
+*DECK DUTERR
+ SUBROUTINE DUTERR( METHOD, IERR, IOUT, NFAIL, ISTDO, ITER, ERR )
+C***BEGIN PROLOGUE DUTERR
+C***SUBSIDIARY
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DUTERR-D),
+C Linear system, Sparse, Iterative Precondition
+C***AUTHOR Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Output error messages for the SLAP Quick Check
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DUTERR
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ CHARACTER*6 METHOD
+ INTEGER IERR, IOUT, NFAIL, ISTDO, ITER
+ DOUBLE PRECISION ERR
+C
+C***FIRST EXECUTABLE STATEMENT DUTERR
+ IF( IERR.NE.0 ) NFAIL = NFAIL+1
+ IF( IOUT.EQ.1 .AND. IERR.NE.0 ) THEN
+ WRITE(ISTDO,1000) METHOD
+ ENDIF
+ IF( IOUT.EQ.2 ) THEN
+ IF( IERR.EQ.0 ) THEN
+ WRITE(ISTDO,1010) METHOD
+ ELSE
+ WRITE(ISTDO,1020) METHOD,IERR,ITER,ERR
+ ENDIF
+ ENDIF
+ IF( IOUT.GE.3 ) THEN
+ IF( IERR.EQ.0 ) THEN
+ WRITE(ISTDO,1030) METHOD,IERR,ITER,ERR
+ ELSE
+ WRITE(ISTDO,1020) METHOD,IERR,ITER,ERR
+ ENDIF
+ ENDIF
+ RETURN
+ 1000 FORMAT( 1X,A6,' : **** FAILURE ****')
+ 1010 FORMAT( 1X,A6,' : **** PASSED ****')
+ 1020 FORMAT(' **************** WARNING ***********************'/
+ $ ' **** ',A6,' Quick Test FAILED: IERR = ',I5,' ****'/
+ $ ' **************** WARNING ***********************'/
+ $ ' Iteration Count = ',I3,' Stop Test = ',E12.6)
+ 1030 FORMAT(' ***************** PASSED ***********************'/
+ $ ' **** ',A6,' Quick Test PASSED: IERR = ',I5,' ****'/
+ $ ' ***************** PASSED ***********************'/
+ $ ' Iteration Count = ',I3,' Stop Test = ',E12.6)
+C------------- LAST LINE OF DUTERR FOLLOWS ----------------------------
+ END
+*DECK DRMGEN
+ SUBROUTINE DRMGEN( NELTMX, FACTOR, IERR, N, NELT, ISYM,
+ $ IA, JA, A, F, SOLN, DSUM, ITMP, IDIAG )
+C***BEGIN PROLOGUE DRMGEN
+C***SUBSIDIARY
+C***PURPOSE This routine generates a "Random" symmetric or
+C non-symmetric matrix of size N for use in the SLAP
+C Quick Checks.
+C***LIBRARY SLATEC(SLAP)
+C***AUTHOR Seager, Mark K., (LLNL)
+C seager@lll-crg.llnl.gov
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550
+C (415)423-3141
+C***DESCRIPTION
+C
+C *Usage:
+C INTEGER NELTMX, IERR, N, NELT, ISYM,
+C INTEGER IA(NELTMX), JA(NELTMX), ITMP(N), IDIAG(N)
+C DOUBLE PRECISION FACTOR, A(NELTMX), F(N), SOLN(N), DSUM(N)
+C
+C CALL DRMGEN( NELTMX, FACTOR, IERR, N, NELT, ISYM,
+C $ IA, JA, A, F, SOLN, DSUM, ITMP, IDIAG )
+C
+C *Arguments:
+C
+C NELTMX :IN Integer.
+C Maximum number of non-zeros that can be created by this
+C routine for storage in the IA, JA, A arrays, see below.
+C FACTOR :IN Double Precision.
+C Non-zeros in the upper triangle are set to FACTOR times
+C the coresponding entry in the lower triangle when a non-
+C symmetric matrix is requested (See ISYM, below).
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => everything went OK.
+C = 1 => Ran out of space trying to create matrix.
+C Set NELTMX to something larger and retry.
+C N :IN Integer.
+C Size of the linear system to generate (number of unknowns).
+C NELT :OUT Integer.
+C Number of non-zeros stored in the IA, JA, A arrays, see below.
+C ISYM :IN Integer.
+C Flag to indicate the type of matrix to generate:
+C ISYM = 0 => Non-Symmetric Matrix (See FACTOR, above).
+C = 1 => Symmetric Matrix.
+C IA :OUT Integer IA(NELTMX).
+C Stores the row indicies for the non-zeros.
+C JA :OUT Integer JA(NELTMX).
+C Stores the column indicies for the non-zeros.
+C A :OUT Double Precision A(NELTMX).
+C Stores the values of the non-zeros.
+C F :OUT Double Precision F(N).
+C The right hand side of the linear system. Obtained by mult-
+C iplying the matrix time SOLN, see below.
+C SOLN :OUT Double Precision SOLN(N).
+C The true solution to the linear system. Each component is
+C chosen at random (0.0 Read only the matrix.
+C = 1 => Read matrix and RHS (if present).
+C = 2 => Read matrix and SOLN (if present).
+C = 3 => Read matrix, RHS and SOLN (if present).
+C On output JOB indicates what operations were actually
+C performed.
+C -3 => Unable to parse matrix "CODE" from input file
+C to determine if only the lower triangle of matrix
+C is stored.
+C -2 => Number of non-zeros (NELT) too large.
+C -1 => System size (N) too large.
+C JOB = 0 => Read in only the matrix.
+C = 1 => Read in the matrix and RHS.
+C = 2 => Read in the matrix and SOLN.
+C = 3 => Read in the matrix, RHS and SOLN.
+C = 10 => Read in only the matrix *STRUCTURE*, but no
+C non-zero entries. Hence, A(*) is not referenced
+C and has the return values the same as the input.
+C = 11 => Read in the matrix *STRUCTURE* and RHS.
+C = 12 => Read in the matrix *STRUCTURE* and SOLN.
+C = 13 => Read in the matrix *STRUCTURE*, RHS and SOLN.
+C
+C *Precision: Double Precision
+C *Portability:
+C You must make sure that IUNIT is a valid Fortran logical
+C I/O device unit number and that the unit number has been
+C associated with a file or the console. This is a system
+C dependent function.
+C
+C***LONG DESCRIPTION
+C The format for the output is as follows. On the first line
+C are counters and flags: N, NELT, ISYM, IRHS, ISOLN. N, NELT
+C and ISYM are described above. IRHS is a flag indicating if
+C the RHS was written out (1 is yes, 0 is no). ISOLN is a
+C flag indicating if the SOLN was written out (1 is yes, 0 is
+C no). The format for the fist line is: 5i10. Then comes the
+C NELT Triad's IA(I), JA(I) and A(I), I = 1, NELT. The format
+C for these lines is : 1X,I5,1X,I5,1X,E16.7. Then comes
+C RHS(I), I = 1, N, if IRHS = 1. Then comes SOLN(I), I = 1,
+C N, if ISOLN = 1. The format for these lines is: 1X,E16.7.
+C
+C =================== S L A P Triad format ===================
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DBHIN
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, JOB
+ DOUBLE PRECISION A(NELT), RHS(N), SOLN(N)
+C
+C Local Variables
+C
+ CHARACTER*80 TITLE
+ CHARACTER*3 CODE
+ CHARACTER*16 PNTFMT, RINFMT
+ CHARACTER*20 NVLFMT, RHSFMT
+C
+ INTEGER NLINE, NPLS, NRILS, NNVLS, NRHSLS, NROW, NCOL, NIND, NELE
+C
+C Read Matrices In BOEING-HARWELL format.
+C
+C NLINE Number of Data (after the header) lines in the file.
+C NPLS Number of lines for the Column Pointer data in the file.
+C NRILS Number of lines for the Row indicies in the data file.
+C NNVLS Number of lines for the Matrix elements in the data file.
+C NRHSLS Number of lines for the RHS in the data file.
+C
+C***FIRST EXECUTABLE STATEMENT DBHIN
+ READ(IUNIT,9000) TITLE
+ READ(IUNIT,9010) NLINE, NPLS, NRILS, NNVLS, NRHSLS
+ READ(IUNIT,9020) CODE, NROW, NCOL, NIND, NELE
+ READ(IUNIT,9030) PNTFMT, RINFMT, NVLFMT, RHSFMT
+C
+ IF( NROW.GT.N ) THEN
+ N = NROW
+ JOBRET = -1
+ GOTO 999
+ ENDIF
+ IF( NIND.GT.NELT ) THEN
+ NELT = NIND
+ JOBRET = -2
+ GOTO 999
+ ENDIF
+C
+C Set the parameters.
+C
+ N = NROW
+ NELT = NIND
+ IF( CODE.EQ.'RUA' ) THEN
+ ISYM = 0
+ ELSE IF( CODE.EQ.'RSA' ) THEN
+ ISYM = 1
+ ELSE
+ JOBRET = -3
+ GOTO 999
+ ENDIF
+ READ(IUNIT,PNTFMT) (JA(I), I = 1, N+1)
+ READ(IUNIT,RINFMT) (IA(I), I = 1, NELT)
+ JOBRET = 10
+ IF( NNVLS.GT.0 ) THEN
+ READ(IUNIT,NVLFMT) (A(I), I = 1, NELT)
+ JOBRET = 0
+ ENDIF
+ IF( NRHSLS.GT.0 .AND. MOD(JOB,2).EQ.1 ) THEN
+ READ(5,RHSFMT) (RHS(I), I = 1, N)
+ JOBRET = JOBRET + 1
+ ENDIF
+C
+C Now loop thru the IA(i) array making sure that the Diagonal
+C matrix element appears first in the column. Then sort the
+C rest of the column in ascending order.
+C
+CVD$R NOCONCUR
+CVD$R NOVECTOR
+ DO 70 ICOL = 1, N
+ IBGN = JA(ICOL)
+ IEND = JA(ICOL+1)-1
+ DO 30 I = IBGN, IEND
+ IF( IA(I).EQ.ICOL ) THEN
+C Swap the diag element with the first element in the column.
+ ITEMP = IA(I)
+ IA(I) = IA(IBGN)
+ IA(IBGN) = ITEMP
+ TEMP = A(I)
+ A(I) = A(IBGN)
+ A(IBGN) = TEMP
+ GOTO 40
+ ENDIF
+ 30 CONTINUE
+ 40 IBGN = IBGN + 1
+ IF( IBGN.LT.IEND ) THEN
+ DO 60 I = IBGN, IEND
+ DO 50 J = I+1, IEND
+ IF( IA(I).GT.IA(J) ) THEN
+ ITEMP = IA(I)
+ IA(I) = IA(J)
+ IA(J) = ITEMP
+ TEMP = A(I)
+ A(I) = A(J)
+ A(J) = TEMP
+ ENDIF
+ 50 CONTINUE
+ 60 CONTINUE
+ ENDIF
+ 70 CONTINUE
+C
+C Set return flag.
+ 999 JOB = JOBRET
+ RETURN
+ 9000 FORMAT( A80 )
+ 9010 FORMAT( 5I14 )
+ 9020 FORMAT( A3, 11X, 4I14 )
+ 9030 FORMAT( 2A16, 2A20 )
+C------------- LAST LINE OF DBHIN FOLLOWS ------------------------------
+ END
+*DECK DCHKW
+ SUBROUTINE DCHKW( NAME, LOCIW, LENIW, LOCW, LENW,
+ $ IERR, ITER, ERR )
+C***BEGIN PROLOGUE DCHKW
+C***DATE WRITTEN 880225 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. R2
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DCHKW-D),
+C SLAP, Error Checking, Workspace Checking
+C***AUTHOR Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP WORK/IWORK Array Bounds Checker.
+C This routine checks the work array lengths and inter-
+C faces to the SLATEC error handler if a problem is
+C found.
+C***DESCRIPTION
+C *Usage:
+C CHARACTER*(*) NAME
+C INTEGER LOCIW, LENIW, LOCW, LENW, IERR, ITER
+C DOUBLE PRECISION ERR
+C
+C CALL DCHKW( NAME, LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+C
+C *Arguments:
+C NAME :IN Character*(*).
+C Name of the calling routine. This is used in the output
+C message, if an error is detected.
+C LOCIW :IN Integer.
+C Location of the first free element in the integer workspace
+C array.
+C LENIW :IN Integer.
+C Length of the integer workspace array.
+C LOCW :IN Integer.
+C Location of the first free element in the double precision
+C workspace array.
+C LENRW :IN Integer.
+C Length of the double precision workspace array.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated for
+C WORK or IWORK.
+C ITER :OUT Integer.
+C Set to 0 if an error is detected.
+C ERR :OUT Double Precision.
+C Set to a very large number if an error is detected.
+C
+C *Precision: Double Precision
+C
+C***REFERENCES (NONE)
+C***ROUTINES CALLED D1MACH, XERRWV
+C***END PROLOGUE DCHKW
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ CHARACTER*(*) NAME
+ CHARACTER*72 MESG
+ INTEGER LOCIW, LENIW, LOCW, LENW, IERR, ITER
+ DOUBLE PRECISION ERR, D1MACH
+ EXTERNAL D1MACH, XERRWV
+C
+C Check the Integer workspace situation.
+C***FIRST EXECUTABLE STATEMENT DCHKW
+ IERR = 0
+ IF( LOCIW.GT.LENIW ) THEN
+ IERR = 1
+ ITER = 0
+ ERR = D1MACH(2)
+ MESG = NAME // ': INTEGER work array too short. '//
+ $ ' IWORK needs i1: have allocated i2.'
+ CALL XERRWV( MESG, LEN(MESG), 1, 1, 2, LOCIW, LENIW,
+ $ 0, 0.0, 0.0 )
+ ENDIF
+C
+C Check the Double Precision workspace situation.
+ IF( LOCW.GT.LENW ) THEN
+ IERR = 1
+ ITER = 0
+ ERR = D1MACH(2)
+ MESG = NAME // ': DOUBLE PRECISION work array too short. '//
+ $ ' RWORK needs i1: have allocated i2.'
+ CALL XERRWV( MESG, LEN(MESG), 1, 1, 2, LOCW, LENW,
+ $ 0, 0.0, 0.0 )
+ ENDIF
+ RETURN
+C------------- LAST LINE OF DCHKW FOLLOWS ----------------------------
+ END
+*DECK QS2I1D
+ SUBROUTINE QS2I1D( IA, JA, A, N, KFLAG )
+C***BEGIN PROLOGUE QS2I1D
+C***DATE WRITTEN 761118 (YYMMDD)
+C***REVISION DATE 890125 (YYMMDD)
+C***CATEGORY NO. N6A2A
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=INTEGER(QS2I1D-I),
+C QUICKSORT,DOUBLETON QUICKSORT,SORT,SORTING
+C***AUTHOR Jones, R. E., (SNLA)
+C Kahaner, D. K., (NBS)
+C Seager, M. K., (LLNL) seager@lll-crg.llnl.gov
+C Wisniewski, J. A., (SNLA)
+C***PURPOSE Sort an integer array also moving an integer and DP array
+C This routine sorts the integer array IA and makes the
+C same interchanges in the integer array JA and the
+C double precision array A. The array IA may be sorted
+C in increasing order or decreas- ing order. A slightly
+C modified QUICKSORT algorithm is used.
+C
+C***DESCRIPTION
+C Written by Rondall E Jones
+C Modified by John A. Wisniewski to use the Singleton QUICKSORT
+C algorithm. date 18 November 1976.
+C
+C Further modified by David K. Kahaner
+C National Bureau of Standards
+C August, 1981
+C
+C Even further modification made to bring the code up to the
+C Fortran 77 level and make it more readable and to carry
+C along one integer array and one double precision array during
+C the sort by
+C Mark K. Seager
+C Lawrence Livermore National Laboratory
+C November, 1987
+C This routine was adapted from the ISORT routine.
+C
+C ABSTRACT
+C This routine sorts an integer array IA and makes the same
+C interchanges in the integer array JA and the double precision
+C array A.
+C The array a may be sorted in increasing order or decreasing
+C order. A slightly modified quicksort algorithm is used.
+C
+C DESCRIPTION OF PARAMETERS
+C IA - Integer array of values to be sorted.
+C JA - Integer array to be carried along.
+C A - Double Precision array to be carried along.
+C N - Number of values in integer array IA to be sorted.
+C KFLAG - Control parameter
+C = 1 means sort IA in INCREASING order.
+C =-1 means sort IA in DECREASING order.
+C
+C***REFERENCES
+C Singleton, R. C., Algorithm 347, "An Efficient Algorithm for
+C Sorting with Minimal Storage", cacm, Vol. 12, No. 3, 1969,
+C Pp. 185-187.
+C***ROUTINES CALLED XERROR
+C***END PROLOGUE QS2I1D
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+CVD$R NOVECTOR
+CVD$R NOCONCUR
+ DIMENSION IL(21),IU(21)
+ INTEGER IA(N),JA(N),IT,IIT,JT,JJT
+ DOUBLE PRECISION A(N), TA, TTA
+C
+C***FIRST EXECUTABLE STATEMENT QS2I1D
+ NN = N
+ IF (NN.LT.1) THEN
+ CALL XERROR ( 'QS2I1D- the number of values to be sorted was no
+ $T POSITIVE.',59,1,1)
+ RETURN
+ ENDIF
+ IF( N.EQ.1 ) RETURN
+ KK = IABS(KFLAG)
+ IF ( KK.NE.1 ) THEN
+ CALL XERROR ( 'QS2I1D- the sort control parameter, k, was not 1
+ $ OR -1.',55,2,1)
+ RETURN
+ ENDIF
+C
+C Alter array IA to get decreasing order if needed.
+C
+ IF( KFLAG.LT.1 ) THEN
+ DO 20 I=1,NN
+ IA(I) = -IA(I)
+ 20 CONTINUE
+ ENDIF
+C
+C Sort IA and carry JA and A along.
+C And now...Just a little black magic...
+ M = 1
+ I = 1
+ J = NN
+ R = .375
+ 210 IF( R.LE.0.5898437 ) THEN
+ R = R + 3.90625E-2
+ ELSE
+ R = R-.21875
+ ENDIF
+ 225 K = I
+C
+C Select a central element of the array and save it in location
+C it, jt, at.
+C
+ IJ = I + IDINT( DBLE(J-I)*R )
+ IT = IA(IJ)
+ JT = JA(IJ)
+ TA = A(IJ)
+C
+C If first element of array is greater than it, interchange with it.
+C
+ IF( IA(I).GT.IT ) THEN
+ IA(IJ) = IA(I)
+ IA(I) = IT
+ IT = IA(IJ)
+ JA(IJ) = JA(I)
+ JA(I) = JT
+ JT = JA(IJ)
+ A(IJ) = A(I)
+ A(I) = TA
+ TA = A(IJ)
+ ENDIF
+ L=J
+C
+C If last element of array is less than it, swap with it.
+C
+ IF( IA(J).LT.IT ) THEN
+ IA(IJ) = IA(J)
+ IA(J) = IT
+ IT = IA(IJ)
+ JA(IJ) = JA(J)
+ JA(J) = JT
+ JT = JA(IJ)
+ A(IJ) = A(J)
+ A(J) = TA
+ TA = A(IJ)
+C
+C If first element of array is greater than it, swap with it.
+C
+ IF ( IA(I).GT.IT ) THEN
+ IA(IJ) = IA(I)
+ IA(I) = IT
+ IT = IA(IJ)
+ JA(IJ) = JA(I)
+ JA(I) = JT
+ JT = JA(IJ)
+ A(IJ) = A(I)
+ A(I) = TA
+ TA = A(IJ)
+ ENDIF
+ ENDIF
+C
+C Find an element in the second half of the array which is
+C smaller than it.
+C
+ 240 L=L-1
+ IF( IA(L).GT.IT ) GO TO 240
+C
+C Find an element in the first half of the array which is
+C greater than it.
+C
+ 245 K=K+1
+ IF( IA(K).LT.IT ) GO TO 245
+C
+C Interchange these elements.
+C
+ IF( K.LE.L ) THEN
+ IIT = IA(L)
+ IA(L) = IA(K)
+ IA(K) = IIT
+ JJT = JA(L)
+ JA(L) = JA(K)
+ JA(K) = JJT
+ TTA = A(L)
+ A(L) = A(K)
+ A(K) = TTA
+ GOTO 240
+ ENDIF
+C
+C Save upper and lower subscripts of the array yet to be sorted.
+C
+ IF( L-I.GT.J-K ) THEN
+ IL(M) = I
+ IU(M) = L
+ I = K
+ M = M+1
+ ELSE
+ IL(M) = K
+ IU(M) = J
+ J = L
+ M = M+1
+ ENDIF
+ GO TO 260
+C
+C Begin again on another portion of the unsorted array.
+C
+ 255 M = M-1
+ IF( M.EQ.0 ) GO TO 300
+ I = IL(M)
+ J = IU(M)
+ 260 IF( J-I.GE.1 ) GO TO 225
+ IF( I.EQ.J ) GO TO 255
+ IF( I.EQ.1 ) GO TO 210
+ I = I-1
+ 265 I = I+1
+ IF( I.EQ.J ) GO TO 255
+ IT = IA(I+1)
+ JT = JA(I+1)
+ TA = A(I+1)
+ IF( IA(I).LE.IT ) GO TO 265
+ K=I
+ 270 IA(K+1) = IA(K)
+ JA(K+1) = JA(K)
+ A(K+1) = A(K)
+ K = K-1
+ IF( IT.LT.IA(K) ) GO TO 270
+ IA(K+1) = IT
+ JA(K+1) = JT
+ A(K+1) = TA
+ GO TO 265
+C
+C Clean up, if necessary.
+C
+ 300 IF( KFLAG.LT.1 ) THEN
+ DO 310 I=1,NN
+ IA(I) = -IA(I)
+ 310 CONTINUE
+ ENDIF
+ RETURN
+C------------- LAST LINE OF QS2I1D FOLLOWS ----------------------------
+ END
+*DECK DS2Y
+ SUBROUTINE DS2Y(N, NELT, IA, JA, A, ISYM )
+C***BEGIN PROLOGUE DS2Y
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DS2Y-D),
+C Linear system, SLAP Sparse
+C***AUTHOR Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP Triad to SLAP Column Format Converter.
+C Routine to convert from the SLAP Triad to SLAP Column
+C format.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+C DOUBLE PRECISION A(NELT)
+C
+C CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C NELT :IN Integer.
+C Number of non-zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "LONG
+C DESCRIPTION", below. If the SLAP Triad format is used
+C this format is translated to the SLAP Column format by
+C this routine.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the lower
+C triangle of the matrix is stored.
+C
+C *Precision: Double Precision
+C
+C***LONG DESCRIPTION
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures. If the SLAP Triad format is give
+C as input then this routine transforms it into SLAP Column
+C format. The way this routine tells which format is given as
+C input is to look at JA(N+1). If JA(N+1) = NELT+1 then we
+C have the SLAP Column format. If that equality does not hold
+C then it is assumed that the IA, JA, A arrays contain the
+C SLAP Triad format.
+C
+C =================== S L A P Triad format ===================
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C***REFERENCES (NONE)
+C***ROUTINES CALLED QS2I1D
+C***END PROLOGUE DS2Y
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+ DOUBLE PRECISION A(NELT)
+C
+C Check to see if the (IA,JA,A) arrays are in SLAP Column
+C format. If it's not then transform from SLAP Triad.
+C***FIRST EXECUTABLE STATEMENT DS2LT
+ IF( JA(N+1).EQ.NELT+1 ) RETURN
+C
+C Sort into ascending order by COLUMN (on the ja array).
+C This will line up the columns.
+C
+ CALL QS2I1D( JA, IA, A, NELT, 1 )
+C
+C Loop over each column to see where the column indicies change
+C in the column index array ja. This marks the beginning of the
+C next column.
+C
+CVD$R NOVECTOR
+ JA(1) = 1
+ DO 20 ICOL = 1, N-1
+ DO 10 J = JA(ICOL)+1, NELT
+ IF( JA(J).NE.ICOL ) THEN
+ JA(ICOL+1) = J
+ GOTO 20
+ ENDIF
+ 10 CONTINUE
+ 20 CONTINUE
+ JA(N+1) = NELT+1
+C
+C Mark the n+2 element so that future calls to a SLAP routine
+C utilizing the YSMP-Column storage format will be able to tell.
+C
+ JA(N+2) = 0
+C
+C Now loop thru the ia(i) array making sure that the Diagonal
+C matrix element appears first in the column. Then sort the
+C rest of the column in ascending order.
+C
+ DO 70 ICOL = 1, N
+ IBGN = JA(ICOL)
+ IEND = JA(ICOL+1)-1
+ DO 30 I = IBGN, IEND
+ IF( IA(I).EQ.ICOL ) THEN
+C Swap the diag element with the first element in the column.
+ ITEMP = IA(I)
+ IA(I) = IA(IBGN)
+ IA(IBGN) = ITEMP
+ TEMP = A(I)
+ A(I) = A(IBGN)
+ A(IBGN) = TEMP
+ GOTO 40
+ ENDIF
+ 30 CONTINUE
+ 40 IBGN = IBGN + 1
+ IF( IBGN.LT.IEND ) THEN
+ DO 60 I = IBGN, IEND
+ DO 50 J = I+1, IEND
+ IF( IA(I).GT.IA(J) ) THEN
+ ITEMP = IA(I)
+ IA(I) = IA(J)
+ IA(J) = ITEMP
+ TEMP = A(I)
+ A(I) = A(J)
+ A(J) = TEMP
+ ENDIF
+ 50 CONTINUE
+ 60 CONTINUE
+ ENDIF
+ 70 CONTINUE
+ RETURN
+C------------- LAST LINE OF DS2Y FOLLOWS ----------------------------
+ END
+*DECK DCPPLT
+ SUBROUTINE DCPPLT( N, NELT, IA, JA, A, ISYM, IUNIT )
+C***BEGIN PROLOGUE DCPPLT
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DCPPLT-D),
+C Linear system, SLAP Sparse, Diagnostics
+C***AUTHOR Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Printer Plot of SLAP Column Format Matrix.
+C Routine to print out a SLAP Column format matrix in
+C a "printer plot" graphical representation.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(N+1), ISYM, IUNIT
+C DOUBLE PRECISION A(NELT)
+C
+C CALL DCPPLT( N, NELT, IA, JA, A, ISYM, IUNIT )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C NELT :IN Integer.
+C Number of non-zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(N+1).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in the SLAP
+C Column format. See "LONG DESCRIPTION", below.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the lower
+C triangle of the matrix is stored.
+C IUNIT :IN Integer.
+C Fortran logical I/O device unit number to write the matrix
+C to. This unit must be connected in a system dependent fashion
+C to a file or the console or you will get a nasty message
+C from the Fortran I/O libraries.
+C
+C *Precision: Double Precision
+C *Portability:
+C You must make sure that IUNIT is a valid Fortran logical
+C I/O device unit number and that the unit number has been
+C associated with a file or the console. This is a system
+C dependent function.
+C
+C***LONG DESCRIPTION
+C This routine prints out a SLAP Column format matrix to the
+C Fortran logical I/O unit number IUNIT. The numbers them
+C selves are not printed out, but rather a one character
+C representation of the numbers. Elements of the matrix that
+C are not represented in the (IA,JA,A) arrays are denoted by
+C ' ' character (a blank). Elements of A that are *ZERO* (and
+C hence should really not be stored) are denoted by a '0'
+C character. Elements of A that are *POSITIVE* are denoted by
+C 'D' if they are Diagonal elements and '#' if they are off
+C Diagonal elements. Elements of A that are *NEGATIVE* are
+C denoted by 'N' if they are Diagonal elements and '*' if
+C they are off Diagonal elements.
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DCPPLT
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+ DOUBLE PRECISION A(NELT)
+ CHARACTER*225 CHMAT(225)
+C
+C Set up the character matrix...
+C***FIRST EXECUTABLE STATEMENT DCPPLT
+ NMAX = MIN( 225, N)
+ DO 10 I = 1, NMAX
+ CHMAT(I)(1:NMAX) = ' '
+ 10 CONTINUE
+ DO 30 ICOL = 1, NMAX
+ JBGN = JA(ICOL)
+ JEND = JA(ICOL+1)-1
+ DO 20 J = JBGN, JEND
+ IROW = IA(J)
+ IF( IROW.LE.NMAX ) THEN
+ IF( ISYM.NE.0 ) THEN
+C Put in non-dym part as well...
+ IF( A(J).EQ.0.0D0 ) THEN
+ CHMAT(IROW)(ICOL:ICOL) = '0'
+ ELSEIF( A(J).GT.0.0D0 ) THEN
+ CHMAT(IROW)(ICOL:ICOL) = '#'
+ ELSE
+ CHMAT(IROW)(ICOL:ICOL) = '*'
+ ENDIF
+ ENDIF
+ IF( IROW.EQ.ICOL ) THEN
+C Diagonal entry.
+ IF( A(J).EQ.0.0D0 ) THEN
+ CHMAT(IROW)(ICOL:ICOL) = '0'
+ ELSEIF( A(J).GT.0.0D0 ) THEN
+ CHMAT(IROW)(ICOL:ICOL) = 'D'
+ ELSE
+ CHMAT(IROW)(ICOL:ICOL) = 'N'
+ ENDIF
+ ELSE
+C Off-Diagonal entry
+ IF( A(J).EQ.0.0D0 ) THEN
+ CHMAT(IROW)(ICOL:ICOL) = '0'
+ ELSEIF( A(J).GT.0.0D0 ) THEN
+ CHMAT(IROW)(ICOL:ICOL) = '#'
+ ELSE
+ CHMAT(IROW)(ICOL:ICOL) = '*'
+ ENDIF
+ ENDIF
+ ENDIF
+ 20 CONTINUE
+ 30 CONTINUE
+C
+C Write out the heading.
+ WRITE(IUNIT,1000) N, NELT, FLOAT(NELT)/FLOAT(N*N)
+ WRITE(IUNIT,1010) (MOD(I,10),I=1,NMAX)
+C
+C Write out the character representations matrix elements.
+ DO 40 IROW = 1, NMAX
+ WRITE(IUNIT,1020) IROW, CHMAT(IROW)(1:NMAX)
+ 40 CONTINUE
+ RETURN
+ 1000 FORMAT(/'**** Picture of Column SLAP matrix follows ****'/
+ $ ' N, NELT and Density = ',2I10,E16.7)
+ 1010 FORMAT(4X,255(I1))
+ 1020 FORMAT(1X,I3,A)
+C------------- LAST LINE OF DCPPLT FOLLOWS ----------------------------
+ END
+*DECK DTOUT
+ SUBROUTINE DTOUT( N, NELT, IA, JA, A, ISYM, SOLN, RHS,
+ $ IUNIT, JOB )
+C***BEGIN PROLOGUE DTOUT
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DTOUT-D),
+C Linear system, SLAP Sparse, Diagnostics
+C***AUTHOR Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Write out SLAP Triad Format Linear System.
+C Routine to write out a SLAP Triad format matrix and
+C right hand side and solution to the system, if known.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, IUNIT, JOB
+C DOUBLE PRECISION A(NELT), SOLN(N), RHS(N)
+C
+C CALL DTOUT( N, NELT, IA, JA, A, ISYM, SOLN, RHS, IUNIT, JOB )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C NELT :IN Integer.
+C Number of non-zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in the SLAP
+C Triad format. See "LONG DESCRIPTION", below.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the lower
+C triangle of the matrix is stored.
+C SOLN :IN Double Precision SOLN(N).
+C The solution to the linear system, if known. This array
+C is accessed if and only if JOB is set to print it out,
+C see below.
+C RHS :IN Double Precision RHS(N).
+C The right hand side vector. This array is accessed if and
+C only if JOB is set to print it out, see below.
+C IUNIT :IN Integer.
+C Fortran logical I/O device unit number to write the matrix
+C to. This unit must be connected in a system dependent fashion
+C to a file or the console or you will get a nasty message
+C from the Fortran I/O libraries.
+C JOB :IN Integer.
+C Flag indicating what I/O operations to perform.
+C JOB = 0 => Print only the matrix.
+C = 1 => Print matrix and RHS.
+C = 2 => Print matrix and SOLN.
+C = 3 => Print matrix, RHS and SOLN.
+C
+C *Precision: Double Precision
+C *Portability:
+C You must make sure that IUNIT is a valid Fortran logical
+C I/O device unit number and that the unit number has been
+C associated with a file or the console. This is a system
+C dependent function.
+C
+C***LONG DESCRIPTION
+C The format for the output is as follows. On the first line
+C are counters and flags: N, NELT, ISYM, IRHS, ISOLN. N, NELT
+C and ISYM are described above. IRHS is a flag indicating if
+C the RHS was written out (1 is yes, 0 is no). ISOLN is a
+C flag indicating if the SOLN was written out (1 is yes, 0 is
+C no). The format for the fist line is: 5i10. Then comes the
+C NELT Triad's IA(I), JA(I) and A(I), I = 1, NELT. The format
+C for these lines is : 1X,I5,1X,I5,1X,E16.7. Then comes
+C RHS(I), I = 1, N, if IRHS = 1. Then comes SOLN(I), I = 1,
+C N, if ISOLN = 1. The format for these lines is: 1X,E16.7.
+C
+C =================== S L A P Triad format ===================
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DTOUT
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, JOB
+ DOUBLE PRECISION A(NELT), RHS(N), SOLN(N)
+C
+C Local variables.
+C
+ INTEGER IRHS, ISOLN, I
+C
+C If RHS and SOLN are to be printed also.
+C Write out the information heading.
+C***FIRST EXECUTABLE STATEMENT DTOUT
+ IRHS = 0
+ ISOLN = 0
+ IF( JOB.EQ.1 .OR. JOB.EQ.3 ) IRHS = 1
+ IF( JOB.GT.1 ) ISOLN = 1
+ WRITE(IUNIT,1000) N, NELT, ISYM, IRHS, ISOLN
+C
+C Write out the matrix non-zeros in Triad format.
+ DO 10 I = 1, NELT
+ WRITE(IUNIT,1010) IA(I), JA(I), A(I)
+ 10 CONTINUE
+C
+C If requested, write out the rhs.
+ IF( IRHS.EQ.1 ) THEN
+ WRITE(IUNIT,1020) (RHS(I),I=1,N)
+ ENDIF
+C
+C If requested, write out the soln.
+ IF( ISOLN.EQ.1 ) THEN
+ WRITE(IUNIT,1020) (SOLN(I),I=1,N)
+ ENDIF
+ RETURN
+ 1000 FORMAT(5I10)
+ 1010 FORMAT(1X,I5,1X,I5,1X,E16.7)
+ 1020 FORMAT(1X,E16.7)
+C------------- LAST LINE OF DTOUT FOLLOWS ----------------------------
+ END
+*DECK DTIN
+ SUBROUTINE DTIN( N, NELT, IA, JA, A, ISYM, SOLN, RHS,
+ $ IUNIT, JOB )
+C***BEGIN PROLOGUE DTIN
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DTIN-D),
+C Linear system, SLAP Sparse, Diagnostics
+C***AUTHOR Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Read in SLAP Triad Format Linear System.
+C Routine to read in a SLAP Triad format matrix and
+C right hand side and solution to the system, if known.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, IUNIT, JOB
+C DOUBLE PRECISION A(NELT), SOLN(N), RHS(N)
+C
+C CALL DTIN( N, NELT, IA, JA, A, ISYM, SOLN, RHS, IUNIT, JOB )
+C
+C *Arguments:
+C N :OUT Integer
+C Order of the Matrix.
+C NELT :INOUT Integer.
+C On input NELT is the maximum number of non-zeros that
+C can be stored in the IA, JA, A arrays.
+C On output NELT is the number of non-zeros stored in A.
+C IA :OUT Integer IA(NELT).
+C JA :OUT Integer JA(NELT).
+C A :OUT Double Precision A(NELT).
+C On output these arrays hold the matrix A in the SLAP
+C Triad format. See "LONG DESCRIPTION", below.
+C ISYM :OUT Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the lower
+C triangle of the matrix is stored.
+C SOLN :OUT Double Precision SOLN(N).
+C The solution to the linear system, if present. This array
+C is accessed if and only if JOB to read it in, see below.
+C If the user requests that SOLN be read in, but it is not in
+C the file, then it is simply zeroed out.
+C RHS :OUT Double Precision RHS(N).
+C The right hand side vector. This array is accessed if and
+C only if JOB is set to read it in, see below.
+C If the user requests that RHS be read in, but it is not in
+C the file, then it is simply zeroed out.
+C IUNIT :IN Integer.
+C Fortran logical I/O device unit number to write the matrix
+C to. This unit must be connected in a system dependent fashion
+C to a file or the console or you will get a nasty message
+C from the Fortran I/O libraries.
+C JOB :INOUT Integer.
+C Flag indicating what I/O operations to perform.
+C On input JOB indicates what Input operations to try to
+C perform.
+C JOB = 0 => Read only the matrix.
+C = 1 => Read matrix and RHS (if present).
+C = 2 => Read matrix and SOLN (if present).
+C = 3 => Read matrix, RHS and SOLN (if present).
+C On output JOB indicates what operations were actually
+C performed.
+C JOB = 0 => Read in only the matrix.
+C = 1 => Read in the matrix and RHS.
+C = 2 => Read in the matrix and SOLN.
+C = 3 => Read in the matrix, RHS and SOLN.
+C
+C *Precision: Double Precision
+C *Portability:
+C You must make sure that IUNIT is a valid Fortran logical
+C I/O device unit number and that the unit number has been
+C associated with a file or the console. This is a system
+C dependent function.
+C
+C***LONG DESCRIPTION
+C The format for the output is as follows. On the first line
+C are counters and flags: N, NELT, ISYM, IRHS, ISOLN. N, NELT
+C and ISYM are described above. IRHS is a flag indicating if
+C the RHS was written out (1 is yes, 0 is no). ISOLN is a
+C flag indicating if the SOLN was written out (1 is yes, 0 is
+C no). The format for the fist line is: 5i10. Then comes the
+C NELT Triad's IA(I), JA(I) and A(I), I = 1, NELT. The format
+C for these lines is : 1X,I5,1X,I5,1X,E16.7. Then comes
+C RHS(I), I = 1, N, if IRHS = 1. Then comes SOLN(I), I = 1,
+C N, if ISOLN = 1. The format for these lines is: 1X,E16.7.
+C
+C =================== S L A P Triad format ===================
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DTIN
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, JOB
+ DOUBLE PRECISION A(NELT), RHS(N), SOLN(N)
+C
+C Local variables.
+C
+ INTEGER IRHS, ISOLN, I, NELTMAX
+C
+C Read in the information heading.
+C***FIRST EXECUTABLE STATEMENT DTIN
+ NELTMAX = NELT
+ READ(IUNIT,1000) N, NELT, ISYM, IRHS, ISOLN
+ NELT = MIN( NELT, NELTMAX )
+C
+C Read in the matrix non-zeros in Triad format.
+ DO 10 I = 1, NELT
+ READ(IUNIT,1010) IA(I), JA(I), A(I)
+ 10 CONTINUE
+C
+C If requested, read in the rhs.
+ JOBRET = 0
+ IF( JOB.EQ.1 .OR. JOB.EQ.3 ) THEN
+C
+C Check to see if rhs is in the file.
+ IF( IRHS.EQ.1 ) THEN
+ JOBRET = 1
+ READ(IUNIT,1020) (RHS(I),I=1,N)
+ ELSE
+ DO 20 I = 1, N
+ RHS(I) = 0.0D0
+ 20 CONTINUE
+ ENDIF
+ ENDIF
+C
+C If requested, read in the soln.
+ IF( JOB.GT.1 ) THEN
+C
+C Check to see if soln is in the file.
+ IF( ISOLN.EQ.1 ) THEN
+ JOBRET = JOBRET + 2
+ READ(IUNIT,1020) (SOLN(I),I=1,N)
+ ELSE
+ DO 30 I = 1, N
+ SOLN(I) = 0.0D0
+ 30 CONTINUE
+ ENDIF
+ ENDIF
+C
+ JOB = JOBRET
+ RETURN
+ 1000 FORMAT(5I10)
+ 1010 FORMAT(1X,I5,1X,I5,1X,E16.7)
+ 1020 FORMAT(1X,E16.7)
+C------------- LAST LINE OF DTIN FOLLOWS ----------------------------
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/dmset.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dmset.f
new file mode 100644
index 0000000000..42b1bd19a7
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dmset.f
@@ -0,0 +1,1222 @@
+*DECK DSDS
+ SUBROUTINE DSDS(N, NELT, IA, JA, A, ISYM, DINV)
+C***BEGIN PROLOGUE DSDS
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSDS-D),
+C SLAP Sparse, Diagonal
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Diagonal Scaling Preconditioner SLAP Set Up.
+C Routine to compute the inverse of the diagonal of a matrix
+C stored in the SLAP Column format.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+C DOUBLE PRECISION A(NELT), DINV(N)
+C
+C CALL DSDS( N, NELT, IA, JA, A, ISYM, DINV )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C NELT :IN Integer.
+C Number of elements in arrays IA, JA, and A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in the SLAP Column
+C format. See "Description", below.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C DINV :OUT Double Precision DINV(N).
+C Upon return this array holds 1./DIAG(A).
+C
+C *Description
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP format all of the "inner loops" of this
+C routine should vectorize on machines with hardware support
+C for vector gather/scatter operations. Your compiler may
+C require a compiler directive to convince it that there are
+C no implicit vector dependencies. Compiler directives for
+C the Alliant FX/Fortran and CRI CFT/CFT77 compilers are
+C supplied with the standard SLAP distribution.
+C
+C *Precision: Double Precision
+C
+C *Cautions:
+C This routine assumes that the diagonal of A is all non-zero
+C and that the operation DINV = 1.0/DIAG(A) will not underflow
+C or overflow. This is done so that the loop vectorizes.
+C Matricies with zero or near zero or very large entries will
+C have numerical difficulties and must be fixed before this
+C routine is called.
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSDS
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+ DOUBLE PRECISION A(NELT), DINV(N)
+C
+C Assume the Diagonal elements are the first in each column.
+C This loop should *VECTORIZE*. If it does not you may have
+C to add a compiler directive. We do not check for a zero
+C (or near zero) diagonal element since this would interfere
+C with vectorization. If this makes you nervous put a check
+C in! It will run much slower.
+C***FIRST EXECUTABLE STATEMENT DSDS
+ 1 CONTINUE
+ DO 10 ICOL = 1, N
+ DINV(ICOL) = 1.0D0/A(JA(ICOL))
+ 10 CONTINUE
+C
+ RETURN
+C------------- LAST LINE OF DSDS FOLLOWS ----------------------------
+ END
+*DECK DSDSCL
+ SUBROUTINE DSDSCL( N, NELT, IA, JA, A, ISYM, X, B, DINV, JOB,
+ $ ITOL )
+C***BEGIN PROLOGUE DSDSCL
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSDSCL-D),
+C SLAP Sparse, Diagonal
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Diagonal Scaling of system Ax = b.
+C This routine scales (and unscales) the system Ax = b
+C by symmetric diagonal scaling. The new system is:
+C -1/2 -1/2 1/2 -1/2
+C D AD (D x) = D b
+C when scaling is selected with the JOB parameter. When
+C unscaling is selected this process is reversed.
+C The true solution is also scaled or unscaled if ITOL is set
+C appropriately, see below.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, JOB, ITOL
+C DOUBLE PRECISION A(NELT), DINV(N)
+C
+C CALL DSDSCL( N, NELT, IA, JA, A, ISYM, X, B, DINV, JOB, ITOL )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C NELT :IN Integer.
+C Number of elements in arrays IA, JA, and A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays should hold the matrix A in the SLAP Column
+C format. See "Description", below.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C X :INOUT Double Precision X(N).
+C Initial guess that will be later used in the iterative
+C solution.
+C of the scaled system.
+C B :INOUT Double Precision B(N).
+C Right hand side vector.
+C DINV :OUT Double Precision DINV(N).
+C Upon return this array holds 1./DIAG(A).
+C JOB :IN Integer.
+C Flag indicating weather to scale or not. JOB nonzero means
+C do scaling. JOB = 0 means do unscaling.
+C ITOL :IN Integer.
+C Flag indicating what type of error estimation to do in the
+C iterative method. When ITOL = 11 the exact solution from
+C common block solblk will be used. When the system is scaled
+C then the true solution must also be scaled. If ITOL is not
+C 11 then this vector is not referenced.
+C
+C *Common Blocks:
+C SOLN :INOUT Double Precision SOLN(N). COMMON BLOCK /SOLBLK/
+C The true solution, SOLN, is scaled (or unscaled) if ITOL is
+C set to 11, see above.
+C
+C *Description
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP format all of the "inner loops" of this
+C routine should vectorize on machines with hardware support
+C for vector gather/scatter operations. Your compiler may
+C require a compiler directive to convince it that there are
+C no implicit vector dependencies. Compiler directives for
+C the Alliant FX/Fortran and CRI CFT/CFT77 compilers are
+C supplied with the standard SLAP distribution.
+C
+C *Precision: Double Precision
+C
+C *Cautions:
+C This routine assumes that the diagonal of A is all non-zero
+C and that the operation DINV = 1.0/DIAG(A) will not under-
+C flow or overflow. This is done so that the loop vectorizes.
+C Matricies with zero or near zero or very large entries will
+C have numerical difficulties and must be fixed before this
+C routine is called.
+C
+C *See Also:
+C DSDCG
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSDSCL
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, JOB, ITOL
+ DOUBLE PRECISION A(NELT), X(N), B(N), DINV(N)
+ COMMON /SOLBLK/ SOLN(1)
+C
+C SCALING...
+C
+ IF( JOB.NE.0 ) THEN
+ DO 10 ICOL = 1, N
+ DINV(ICOL) = 1.0D0/SQRT( A(JA(ICOL)) )
+ 10 CONTINUE
+ ELSE
+C
+C UNSCALING...
+C
+ DO 15 ICOL = 1, N
+ DINV(ICOL) = 1.0D0/DINV(ICOL)
+ 15 CONTINUE
+ ENDIF
+C
+ DO 30 ICOL = 1, N
+ JBGN = JA(ICOL)
+ JEND = JA(ICOL+1)-1
+ DI = DINV(ICOL)
+ DO 20 J = JBGN, JEND
+ A(J) = DINV(IA(J))*A(J)*DI
+ 20 CONTINUE
+ 30 CONTINUE
+C
+ DO 40 ICOL = 1, N
+ B(ICOL) = B(ICOL)*DINV(ICOL)
+ X(ICOL) = X(ICOL)/DINV(ICOL)
+ 40 CONTINUE
+C
+C Check to see if we need to scale the "true solution" as well.
+C
+ IF( ITOL.EQ.11 ) THEN
+ DO 50 ICOL = 1, N
+ SOLN(ICOL) = SOLN(ICOL)/DINV(ICOL)
+ 50 CONTINUE
+ ENDIF
+C
+ RETURN
+ END
+*DECK DSD2S
+ SUBROUTINE DSD2S(N, NELT, IA, JA, A, ISYM, DINV)
+C***BEGIN PROLOGUE DSD2S
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSD2S-D),
+C SLAP Sparse, Diagonal
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Diagonal Scaling Preconditioner SLAP Normal Eqns Set Up.
+C Routine to compute the inverse of the diagonal of the
+C matrix A*A'. Where A is stored in SLAP-Column format.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+C DOUBLE PRECISION A(NELT), DINV(N)
+C
+C CALL DSD2S( N, NELT, IA, JA, A, ISYM, DINV )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C NELT :IN Integer.
+C Number of elements in arrays IA, JA, and A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays should hold the matrix A in the SLAP Column
+C format. See "Description", below.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C DINV :OUT Double Precision DINV(N).
+C Upon return this array holds 1./DIAG(A*A').
+C
+C *Description
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP format all of the "inner loops" of this
+C routine should vectorize on machines with hardware support
+C for vector gather/scatter operations. Your compiler may
+C require a compiler directive to convince it that there are
+C no implicit vector dependencies. Compiler directives for
+C the Alliant FX/Fortran and CRI CFT/CFT77 compilers are
+C supplied with the standard SLAP distribution.
+C
+C *Precision: Double Precision
+C
+C *Cautions:
+C This routine assumes that the diagonal of A is all non-zero
+C and that the operation DINV = 1.0/DIAG(A*A') will not under-
+C flow or overflow. This is done so that the loop vectorizes.
+C Matricies with zero or near zero or very large entries will
+C have numerical difficulties and must be fixed before this
+C routine is called.
+C
+C *See Also:
+C DSDCGN
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSD2S
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+ DOUBLE PRECISION A(NELT), DINV(N)
+C
+C***FIRST EXECUTABLE STATEMENT DSD2S
+ DO 10 I = 1, N
+ DINV(I) = 0.
+ 10 CONTINUE
+C
+C Loop over each column.
+CVD$R NOCONCUR
+ DO 40 I = 1, N
+ KBGN = JA(I)
+ KEND = JA(I+1) - 1
+C
+C Add in the contributions for each row that has a non-zero
+C in this column.
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NODEPCHK
+ DO 20 K = KBGN, KEND
+ DINV(IA(K)) = DINV(IA(K)) + A(K)**2
+ 20 CONTINUE
+ IF( ISYM.EQ.1 ) THEN
+C
+C Lower triangle stored by columns => upper triangle stored by
+C rows with Diagonal being the first entry. Loop across the
+C rest of the row.
+ KBGN = KBGN + 1
+ IF( KBGN.LE.KEND ) THEN
+ DO 30 K = KBGN, KEND
+ DINV(I) = DINV(I) + A(K)**2
+ 30 CONTINUE
+ ENDIF
+ ENDIF
+ 40 CONTINUE
+ DO 50 I=1,N
+ DINV(I) = 1./DINV(I)
+ 50 CONTINUE
+C
+ RETURN
+C------------- LAST LINE OF DSD2S FOLLOWS ----------------------------
+ END
+*DECK DS2LT
+ SUBROUTINE DS2LT( N, NELT, IA, JA, A, ISYM, NEL, IEL, JEL, EL )
+C***BEGIN PROLOGUE DS2LT
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DS2LT-D),
+C Linear system, SLAP Sparse, Lower Triangle
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Lower Triangle Preconditioner SLAP Set Up.
+C Routine to store the lower triangle of a matrix stored
+C in the Slap Column format.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+C INTEGER NEL, IEL(N+1), JEL(NEL), NROW(N)
+C DOUBLE PRECISION A(NELT), EL(NEL)
+C
+C CALL DS2LT( N, NELT, IA, JA, A, ISYM, NEL, IEL, JEL, EL )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C NELT :IN Integer.
+C Number of non-zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays should hold the matrix A in the SLAP Column
+C format. See "Description", below.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the lower
+C triangle of the matrix is stored.
+C NEL :OUT Integer.
+C Number of non-zeros in the lower triangle of A. Also
+C coresponds to the length of the JEL, EL arrays.
+C IEL :OUT Integer IEL(N+1).
+C JEL :OUT Integer JEL(NEL).
+C EL :OUT Double Precision EL(NEL).
+C IEL, JEL, EL contain the lower triangle of the A matrix
+C stored in SLAP Column format. See "Description", below
+C for more details bout the SLAP Column format.
+C
+C *Description
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DS2LT
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+ INTEGER NEL, IEL(NEL), JEL(NEL)
+ DOUBLE PRECISION A(NELT), EL(NELT)
+C***FIRST EXECUTABLE STATEMENT DS2LT
+ IF( ISYM.EQ.0 ) THEN
+C
+C The matrix is stored non-symmetricly. Pick out the lower
+C triangle.
+C
+ NEL = 0
+ DO 20 ICOL = 1, N
+ JEL(ICOL) = NEL+1
+ JBGN = JA(ICOL)
+ JEND = JA(ICOL+1)-1
+CVD$ NOVECTOR
+ DO 10 J = JBGN, JEND
+ IF( IA(J).GE.ICOL ) THEN
+ NEL = NEL + 1
+ IEL(NEL) = IA(J)
+ EL(NEL) = A(J)
+ ENDIF
+ 10 CONTINUE
+ 20 CONTINUE
+ JEL(N+1) = NEL+1
+ ELSE
+C
+C The matrix is symmetric and only the lower triangle is
+C stored. Copy it to IEL, JEL, EL.
+C
+ NEL = NELT
+ DO 30 I = 1, NELT
+ IEL(I) = IA(I)
+ EL(I) = A(I)
+ 30 CONTINUE
+ DO 40 I = 1, N+1
+ JEL(I) = JA(I)
+ 40 CONTINUE
+ ENDIF
+ RETURN
+C------------- LAST LINE OF DS2LT FOLLOWS ----------------------------
+ END
+*DECK DSICS
+ SUBROUTINE DSICS(N, NELT, IA, JA, A, ISYM, NEL, IEL, JEL,
+ $ EL, D, R, IWARN )
+C***BEGIN PROLOGUE DSICS
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSICS-D),
+C Linear system, SLAP Sparse, Iterative Precondition
+C Incomplete Cholesky Factorization.
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Incompl Cholesky Decomposition Preconditioner SLAP Set Up.
+C Routine to generate the Incomplete Cholesky decomposition,
+C L*D*L-trans, of a symmetric positive definite matrix, A,
+C which is stored in SLAP Column format. The unit lower
+C triangular matrix L is stored by rows, and the inverse of
+C the diagonal matrix D is stored.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+C INTEGER NEL, IEL(NEL), JEL(N+1), IWARN
+C DOUBLE PRECISION A(NELT), EL(NEL), D(N), R(N)
+C
+C CALL DSICS( N, NELT, IA, JA, A, ISYM, NEL, IEL, JEL, EL, D, R,
+C $ IWARN )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C NELT :IN Integer.
+C Number of elements in arrays IA, JA, and A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in the SLAP Column
+C format. See "Description", below.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the lower
+C triangle of the matrix is stored.
+C NEL :OUT Integer.
+C Number of non-zeros in the lower triangle of A. Also
+C coresponds to the length of the JEL, EL arrays.
+C IEL :OUT Integer IEL(N+1).
+C JEL :OUT Integer JEL(NEL).
+C EL :OUT Double Precision EL(NEL).
+C IEL, JEL, EL contain the unit lower triangular factor of the
+C incomplete decomposition of the A matrix stored in SLAP
+C Row format. The Diagonal of ones *IS* stored. See
+C "Description", below for more details about the SLAP Row fmt.
+C D :OUT Double Precision D(N)
+C Upon return this array holds D(I) = 1./DIAG(A).
+C R :WORK Double Precision R(N).
+C Temporary double precision workspace needed for the
+C factorization.
+C IWARN :OUT Integer.
+C This is a warning variable and is zero if the IC factoriza-
+C tion goes well. It is set to the row index corresponding to
+C the last zero pivot found. See "Description", below.
+C
+C *Description
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C ==================== S L A P Row format ====================
+C This routine requires that the matrix A be stored in the
+C SLAP Row format. In this format the non-zeros are stored
+C counting across rows (except for the diagonal entry, which
+C must appear first in each "row") and are stored in the
+C double precision
+C array A. In other words, for each row in the matrix put the
+C diagonal entry in A. Then put in the other non-zero
+C elements going across the row (except the diagonal) in
+C order. The JA array holds the column index for each
+C non-zero. The IA array holds the offsets into the JA, A
+C arrays for the beginning of each row. That is,
+C JA(IA(IROW)), A(IA(IROW)) points to the beginning of the
+C IROW-th row in JA and A. JA(IA(IROW+1)-1), A(IA(IROW+1)-1)
+C points to the end of the IROW-th row. Note that we always
+C have IA(N+1) = NELT+1, where N is the number of rows in
+C the matrix and NELT is the number of non-zeros in the
+C matrix.
+C
+C Here is an example of the SLAP Row storage format for a 5x5
+C Matrix (in the A and JA arrays '|' denotes the end of a row):
+C
+C 5x5 Matrix SLAP Row format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 12 15 | 22 21 | 33 35 | 44 | 55 51 53
+C |21 22 0 0 0| JA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| IA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP format some of the "inner loops" of this
+C routine should vectorize on machines with hardware support
+C for vector gather/scatter operations. Your compiler may
+C require a compiler directive to convince it that there are
+C no implicit vector dependencies. Compiler directives for
+C the Alliant FX/Fortran and CRI CFT/CFT77 compilers are
+C supplied with the standard SLAP distribution.
+C
+C The IC factorization is not alway exist for SPD matricies.
+C In the event that a zero pivot is found it is set to be 1.0
+C and the factorization procedes. The integer variable IWARN
+C is set to the last row where the Diagonal was fudged. This
+C eventuality hardly ever occurs in practice
+C
+C *Precision: Double Precision
+C
+C *See Also:
+C SCG, DSICCG
+C***REFERENCES 1. Gene Golub & Charles Van Loan, "Matrix Computations",
+C John Hopkins University Press; 3 (1983) IBSN
+C 0-8018-3010-9.
+C***ROUTINES CALLED XERRWV
+C***END PROLOGUE DSICS
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+ INTEGER NEL, IEL(NEL), JEL(NEL)
+ DOUBLE PRECISION A(NELT), EL(NEL), D(N), R(N)
+C
+C Set the lower triangle in IEL, JEL, EL
+C***FIRST EXECUTABLE STATEMENT DSICS
+ IWARN = 0
+C
+C All matrix elements stored in IA, JA, A. Pick out the lower
+C triangle (making sure that the Diagonal of EL is one) and
+C store by rows.
+C
+ NEL = 1
+ IEL(1) = 1
+ JEL(1) = 1
+ EL(1) = 1.0D0
+ D(1) = A(1)
+CVD$R NOCONCUR
+ DO 30 IROW = 2, N
+C Put in the Diagonal.
+ NEL = NEL + 1
+ IEL(IROW) = NEL
+ JEL(NEL) = IROW
+ EL(NEL) = 1.0D0
+ D(IROW) = A(JA(IROW))
+C
+C Look in all the lower triangle columns for a matching row.
+C Since the matrix is symmetric, we can look across the
+C irow-th row by looking down the irow-th column (if it is
+C stored ISYM=0)...
+ IF( ISYM.EQ.0 ) THEN
+ ICBGN = JA(IROW)
+ ICEND = JA(IROW+1)-1
+ ELSE
+ ICBGN = 1
+ ICEND = IROW-1
+ ENDIF
+ DO 20 IC = ICBGN, ICEND
+ IF( ISYM.EQ.0 ) THEN
+ ICOL = IA(IC)
+ IF( ICOL.GE.IROW ) GOTO 20
+ ELSE
+ ICOL = IC
+ ENDIF
+ JBGN = JA(ICOL)+1
+ JEND = JA(ICOL+1)-1
+ IF( JBGN.LE.JEND .AND. IA(JEND).GE.IROW ) THEN
+CVD$ NOVECTOR
+ DO 10 J = JBGN, JEND
+ IF( IA(J).EQ.IROW ) THEN
+ NEL = NEL + 1
+ JEL(NEL) = ICOL
+ EL(NEL) = A(J)
+ GOTO 20
+ ENDIF
+ 10 CONTINUE
+ ENDIF
+ 20 CONTINUE
+ 30 CONTINUE
+ IEL(N+1) = NEL+1
+C
+C Sort ROWS of lower triangle into descending order (count out
+C along rows out from Diagonal).
+C
+ DO 60 IROW = 2, N
+ IBGN = IEL(IROW)+1
+ IEND = IEL(IROW+1)-1
+ IF( IBGN.LT.IEND ) THEN
+ DO 50 I = IBGN, IEND-1
+CVD$ NOVECTOR
+ DO 40 J = I+1, IEND
+ IF( JEL(I).GT.JEL(J) ) THEN
+ JELTMP = JEL(J)
+ JEL(J) = JEL(I)
+ JEL(I) = JELTMP
+ ELTMP = EL(J)
+ EL(J) = EL(I)
+ EL(I) = ELTMP
+ ENDIF
+ 40 CONTINUE
+ 50 CONTINUE
+ ENDIF
+ 60 CONTINUE
+C
+C Perform the Incomplete Cholesky decomposition by looping
+C over the rows.
+C Scale the first column. Use the structure of A to pick out
+C the rows with something in column 1.
+C
+ IRBGN = JA(1)+1
+ IREND = JA(2)-1
+ DO 65 IRR = IRBGN, IREND
+ IR = IA(IRR)
+C Find the index into EL for EL(1,IR).
+C Hint: it's the second entry.
+ I = IEL(IR)+1
+ EL(I) = EL(I)/D(1)
+ 65 CONTINUE
+C
+ DO 110 IROW = 2, N
+C
+C Update the IROW-th diagonal.
+C
+ DO 66 I = 1, IROW-1
+ R(I) = 0.0D0
+ 66 CONTINUE
+ IBGN = IEL(IROW)+1
+ IEND = IEL(IROW+1)-1
+ IF( IBGN.LE.IEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NODEPCHK
+ DO 70 I = IBGN, IEND
+ R(JEL(I)) = EL(I)*D(JEL(I))
+ D(IROW) = D(IROW) - EL(I)*R(JEL(I))
+ 70 CONTINUE
+C
+C Check to see if we gota problem with the diagonal.
+C
+ IF( D(IROW).LE.0.0D0 ) THEN
+ IF( IWARN.EQ.0 ) IWARN = IROW
+ D(IROW) = 1.0D0
+ ENDIF
+ ENDIF
+C
+C Update each EL(IROW+1:N,IROW), if there are any.
+C Use the structure of A to determine the Non-zero elements
+C of the IROW-th column of EL.
+C
+ IRBGN = JA(IROW)
+ IREND = JA(IROW+1)-1
+ DO 100 IRR = IRBGN, IREND
+ IR = IA(IRR)
+ IF( IR.LE.IROW ) GOTO 100
+C Find the index into EL for EL(IR,IROW)
+ IBGN = IEL(IR)+1
+ IEND = IEL(IR+1)-1
+ IF( JEL(IBGN).GT.IROW ) GOTO 100
+ DO 90 I = IBGN, IEND
+ IF( JEL(I).EQ.IROW ) THEN
+ ICEND = IEND
+ 91 IF( JEL(ICEND).GE.IROW ) THEN
+ ICEND = ICEND - 1
+ GOTO 91
+ ENDIF
+C Sum up the EL(IR,1:IROW-1)*R(1:IROW-1) contributions.
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NODEPCHK
+ DO 80 IC = IBGN, ICEND
+ EL(I) = EL(I) - EL(IC)*R(JEL(IC))
+ 80 CONTINUE
+ EL(I) = EL(I)/D(IROW)
+ GOTO 100
+ ENDIF
+ 90 CONTINUE
+C
+C If we get here, we have real problems...
+
+C WHL: Usually this means the matrix A is not symmetric.
+C The preconditioner is fragile in the sense that it can fail with
+C very small departures from symmetry (due to roundoff errors).
+
+ CALL XERRWV('DSICS -- A and EL data structure mismatch'//
+ $ ' in row (i1)',53,1,2,1,IROW,0,0,0.0,0.0)
+ 100 CONTINUE
+ 110 CONTINUE
+C
+C Replace diagonals by their inverses.
+C
+CVD$ CONCUR
+ DO 120 I =1, N
+ D(I) = 1.0D0/D(I)
+ 120 CONTINUE
+ RETURN
+C------------- LAST LINE OF DSICS FOLLOWS ----------------------------
+ END
+*DECK DSILUS
+ SUBROUTINE DSILUS(N, NELT, IA, JA, A, ISYM, NL, IL, JL,
+ $ L, DINV, NU, IU, JU, U, NROW, NCOL)
+C***BEGIN PROLOGUE DSILUS
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSILUS-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Incomplete LU Factorization
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Incomplete LU Decomposition Preconditioner SLAP Set Up.
+C Routine to generate the incomplete LDU decomposition of a
+C matrix. The unit lower triangular factor L is stored by
+C rows and the unit upper triangular factor U is stored by
+C columns. The inverse of the diagonal matrix D is stored.
+C No fill in is allowed.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+C INTEGER NL, IL(N+1), JL(NL), NU, IU(N+1), JU(NU)
+C INTEGER NROW(N), NCOL(N)
+C DOUBLE PRECISION A(NELT), L(NL), U(NU), DINV(N)
+C
+C CALL DSILUS( N, NELT, IA, JA, A, ISYM, NL, IL, JL, L,
+C $ DINV, NU, IU, JU, U, NROW, NCOL )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C NELT :IN Integer.
+C Number of elements in arrays IA, JA, and A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays should hold the matrix A in the SLAP Column
+C format. See "Description", below.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the lower
+C triangle of the matrix is stored.
+C NL :OUT Integer.
+C Number of non-zeros in the EL array.
+C IL :OUT Integer IL(N+1).
+C JL :OUT Integer JL(NL).
+C L :OUT Double Precision L(NL).
+C IL, JL, L contain the unit ower triangular factor of the
+C incomplete decomposition of some matrix stored in SLAP
+C Row format. The Diagonal of ones *IS* stored. See
+C "DESCRIPTION", below for more details about the SLAP format.
+C NU :OUT Integer.
+C Number of non-zeros in the U array.
+C IU :OUT Integer IU(N+1).
+C JU :OUT Integer JU(NU).
+C U :OUT Double Precision U(NU).
+C IU, JU, U contain the unit upper triangular factor of the
+C incomplete decomposition of some matrix stored in SLAP
+C Column format. The Diagonal of ones *IS* stored. See
+C "Description", below for more details about the SLAP
+C format.
+C NROW :WORK Integer NROW(N).
+C NROW(I) is the number of non-zero elements in the I-th row
+C of L.
+C NCOL :WORK Integer NCOL(N).
+C NCOL(I) is the number of non-zero elements in the I-th
+C column of U.
+C
+C *Description
+C IL, JL, L should contain the unit lower triangular factor of
+C the incomplete decomposition of the A matrix stored in SLAP
+C Row format. IU, JU, U should contain the unit upper factor
+C of the incomplete decomposition of the A matrix stored in
+C SLAP Column format This ILU factorization can be computed by
+C the DSILUS routine. The diagonals (which is all one's) are
+C stored.
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C ==================== S L A P Row format ====================
+C This routine requires that the matrix A be stored in the
+C SLAP Row format. In this format the non-zeros are stored
+C counting across rows (except for the diagonal entry, which
+C must appear first in each "row") and are stored in the
+C double precision
+C array A. In other words, for each row in the matrix put the
+C diagonal entry in A. Then put in the other non-zero
+C elements going across the row (except the diagonal) in
+C order. The JA array holds the column index for each
+C non-zero. The IA array holds the offsets into the JA, A
+C arrays for the beginning of each row. That is,
+C JA(IA(IROW)), A(IA(IROW)) points to the beginning of the
+C IROW-th row in JA and A. JA(IA(IROW+1)-1), A(IA(IROW+1)-1)
+C points to the end of the IROW-th row. Note that we always
+C have IA(N+1) = NELT+1, where N is the number of rows in
+C the matrix and NELT is the number of non-zeros in the
+C matrix.
+C
+C Here is an example of the SLAP Row storage format for a 5x5
+C Matrix (in the A and JA arrays '|' denotes the end of a row):
+C
+C 5x5 Matrix SLAP Row format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 12 15 | 22 21 | 33 35 | 44 | 55 51 53
+C |21 22 0 0 0| JA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| IA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *See Also:
+C SILUR
+C***REFERENCES 1. Gene Golub & Charles Van Loan, "Matrix Computations",
+C John Hopkins University Press; 3 (1983) IBSN
+C 0-8018-3010-9.
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSILUS
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NL, IL(NL), JL(NL)
+ INTEGER NU, IU(NU), JU(NU), NROW(N), NCOL(N)
+ DOUBLE PRECISION A(NELT), L(NL), DINV(N), U(NU)
+C
+C Count number of elements in each row of the lower triangle.
+C***FIRST EXECUTABLE STATEMENT DSILUS
+ DO 10 I=1,N
+ NROW(I) = 0
+ NCOL(I) = 0
+ 10 CONTINUE
+CVD$R NOCONCUR
+CVD$R NOVECTOR
+ DO 30 ICOL = 1, N
+ JBGN = JA(ICOL)+1
+ JEND = JA(ICOL+1)-1
+ IF( JBGN.LE.JEND ) THEN
+ DO 20 J = JBGN, JEND
+ IF( IA(J).LT.ICOL ) THEN
+ NCOL(ICOL) = NCOL(ICOL) + 1
+ ELSE
+ NROW(IA(J)) = NROW(IA(J)) + 1
+ IF( ISYM.NE.0 ) NCOL(IA(J)) = NCOL(IA(J)) + 1
+ ENDIF
+ 20 CONTINUE
+ ENDIF
+ 30 CONTINUE
+ JU(1) = 1
+ IL(1) = 1
+ DO 40 ICOL = 1, N
+ IL(ICOL+1) = IL(ICOL) + NROW(ICOL)
+ JU(ICOL+1) = JU(ICOL) + NCOL(ICOL)
+ NROW(ICOL) = IL(ICOL)
+ NCOL(ICOL) = JU(ICOL)
+ 40 CONTINUE
+C
+C Copy the matrix A into the L and U structures.
+ DO 60 ICOL = 1, N
+ DINV(ICOL) = A(JA(ICOL))
+ JBGN = JA(ICOL)+1
+ JEND = JA(ICOL+1)-1
+ IF( JBGN.LE.JEND ) THEN
+ DO 50 J = JBGN, JEND
+ IROW = IA(J)
+ IF( IROW.LT.ICOL ) THEN
+C Part of the upper triangle.
+ IU(NCOL(ICOL)) = IROW
+ U(NCOL(ICOL)) = A(J)
+ NCOL(ICOL) = NCOL(ICOL) + 1
+ ELSE
+C Part of the lower triangle (stored by row).
+ JL(NROW(IROW)) = ICOL
+ L(NROW(IROW)) = A(J)
+ NROW(IROW) = NROW(IROW) + 1
+ IF( ISYM.NE.0 ) THEN
+C Symmetric...Copy lower triangle into upper triangle as well.
+ IU(NCOL(IROW)) = ICOL
+ U(NCOL(IROW)) = A(J)
+ NCOL(IROW) = NCOL(IROW) + 1
+ ENDIF
+ ENDIF
+ 50 CONTINUE
+ ENDIF
+ 60 CONTINUE
+C
+C Sort the rows of L and the columns of U.
+ DO 110 K = 2, N
+ JBGN = JU(K)
+ JEND = JU(K+1)-1
+ IF( JBGN.LT.JEND ) THEN
+ DO 80 J = JBGN, JEND-1
+ DO 70 I = J+1, JEND
+ IF( IU(J).GT.IU(I) ) THEN
+ ITEMP = IU(J)
+ IU(J) = IU(I)
+ IU(I) = ITEMP
+ TEMP = U(J)
+ U(J) = U(I)
+ U(I) = TEMP
+ ENDIF
+ 70 CONTINUE
+ 80 CONTINUE
+ ENDIF
+ IBGN = IL(K)
+ IEND = IL(K+1)-1
+ IF( IBGN.LT.IEND ) THEN
+ DO 100 I = IBGN, IEND-1
+ DO 90 J = I+1, IEND
+ IF( JL(I).GT.JL(J) ) THEN
+ JTEMP = JU(I)
+ JU(I) = JU(J)
+ JU(J) = JTEMP
+ TEMP = L(I)
+ L(I) = L(J)
+ L(J) = TEMP
+ ENDIF
+ 90 CONTINUE
+ 100 CONTINUE
+ ENDIF
+ 110 CONTINUE
+C
+C Perform the incomplete LDU decomposition.
+ DO 300 I=2,N
+C
+C I-th row of L
+ INDX1 = IL(I)
+ INDX2 = IL(I+1) - 1
+ IF(INDX1 .GT. INDX2) GO TO 200
+ DO 190 INDX=INDX1,INDX2
+ IF(INDX .EQ. INDX1) GO TO 180
+ INDXR1 = INDX1
+ INDXR2 = INDX - 1
+ INDXC1 = JU(JL(INDX))
+ INDXC2 = JU(JL(INDX)+1) - 1
+ IF(INDXC1 .GT. INDXC2) GO TO 180
+ 160 KR = JL(INDXR1)
+ 170 KC = IU(INDXC1)
+ IF(KR .GT. KC) THEN
+ INDXC1 = INDXC1 + 1
+ IF(INDXC1 .LE. INDXC2) GO TO 170
+ ELSEIF(KR .LT. KC) THEN
+ INDXR1 = INDXR1 + 1
+ IF(INDXR1 .LE. INDXR2) GO TO 160
+ ELSEIF(KR .EQ. KC) THEN
+ L(INDX) = L(INDX) - L(INDXR1)*DINV(KC)*U(INDXC1)
+ INDXR1 = INDXR1 + 1
+ INDXC1 = INDXC1 + 1
+ IF(INDXR1 .LE. INDXR2 .AND. INDXC1 .LE. INDXC2) GO TO 160
+ ENDIF
+ 180 L(INDX) = L(INDX)/DINV(JL(INDX))
+ 190 CONTINUE
+C
+C ith column of u
+ 200 INDX1 = JU(I)
+ INDX2 = JU(I+1) - 1
+ IF(INDX1 .GT. INDX2) GO TO 260
+ DO 250 INDX=INDX1,INDX2
+ IF(INDX .EQ. INDX1) GO TO 240
+ INDXC1 = INDX1
+ INDXC2 = INDX - 1
+ INDXR1 = IL(IU(INDX))
+ INDXR2 = IL(IU(INDX)+1) - 1
+ IF(INDXR1 .GT. INDXR2) GO TO 240
+ 210 KR = JL(INDXR1)
+ 220 KC = IU(INDXC1)
+ IF(KR .GT. KC) THEN
+ INDXC1 = INDXC1 + 1
+ IF(INDXC1 .LE. INDXC2) GO TO 220
+ ELSEIF(KR .LT. KC) THEN
+ INDXR1 = INDXR1 + 1
+ IF(INDXR1 .LE. INDXR2) GO TO 210
+ ELSEIF(KR .EQ. KC) THEN
+ U(INDX) = U(INDX) - L(INDXR1)*DINV(KC)*U(INDXC1)
+ INDXR1 = INDXR1 + 1
+ INDXC1 = INDXC1 + 1
+ IF(INDXR1 .LE. INDXR2 .AND. INDXC1 .LE. INDXC2) GO TO 210
+ ENDIF
+ 240 U(INDX) = U(INDX)/DINV(IU(INDX))
+ 250 CONTINUE
+C
+C ith diagonal element
+ 260 INDXR1 = IL(I)
+ INDXR2 = IL(I+1) - 1
+ IF(INDXR1 .GT. INDXR2) GO TO 300
+ INDXC1 = JU(I)
+ INDXC2 = JU(I+1) - 1
+ IF(INDXC1 .GT. INDXC2) GO TO 300
+ 270 KR = JL(INDXR1)
+ 280 KC = IU(INDXC1)
+ IF(KR .GT. KC) THEN
+ INDXC1 = INDXC1 + 1
+ IF(INDXC1 .LE. INDXC2) GO TO 280
+ ELSEIF(KR .LT. KC) THEN
+ INDXR1 = INDXR1 + 1
+ IF(INDXR1 .LE. INDXR2) GO TO 270
+ ELSEIF(KR .EQ. KC) THEN
+ DINV(I) = DINV(I) - L(INDXR1)*DINV(KC)*U(INDXC1)
+ INDXR1 = INDXR1 + 1
+ INDXC1 = INDXC1 + 1
+ IF(INDXR1 .LE. INDXR2 .AND. INDXC1 .LE. INDXC2) GO TO 270
+ ENDIF
+C
+ 300 CONTINUE
+C
+C replace diagonal lts by their inverses.
+CVD$ VECTOR
+ DO 430 I=1,N
+ DINV(I) = 1./DINV(I)
+ 430 CONTINUE
+C
+ RETURN
+C------------- LAST LINE OF DSILUS FOLLOWS ----------------------------
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/dmvops.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dmvops.f
new file mode 100644
index 0000000000..610c9cc7a5
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/dmvops.f
@@ -0,0 +1,1476 @@
+*DECK DSMV
+ SUBROUTINE DSMV( N, X, Y, NELT, IA, JA, A, ISYM )
+C***BEGIN PROLOGUE DSMV
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSMV-S),
+C Matrix Vector Multiply, Sparse
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP Column Format Sparse Matrix Vector Product.
+C Routine to calculate the sparse matrix vector product:
+C Y = A*X.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(N+1), ISYM
+C DOUBLE PRECISION X(N), Y(N), A(NELT)
+C
+C CALL DSMV(N, X, Y, NELT, IA, JA, A, ISYM )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C X :IN Double Precision X(N).
+C The vector that should be multiplied by the matrix.
+C Y :OUT Double Precision Y(N).
+C The product of the matrix and the vector.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(N+1).
+C A :IN Integer A(NELT).
+C These arrays should hold the matrix A in the SLAP Column
+C format. See "Description", below.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C
+C *Description
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP format the "inner loops" of this routine
+C should vectorize on machines with hardware support for
+C vector gather/scatter operations. Your compiler may require
+C a compiler directive to convince it that there are no
+C implicit vector dependencies. Compiler directives for the
+C Alliant FX/Fortran and CRI CFT/CFT77 compilers are supplied
+C with the standard SLAP distribution.
+C
+C *Precision: Double Precision
+C *Cautions:
+C This routine assumes that the matrix A is stored in SLAP
+C Column format. It does not check for this (for speed) and
+C evil, ugly, ornery and nasty things will happen if the matrix
+C data structure is, in fact, not SLAP Column. Beware of the
+C wrong data structure!!!
+C
+C *See Also:
+C DSMTV
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSMV
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+ DOUBLE PRECISION A(NELT), X(N), Y(N)
+C
+C Zero out the result vector.
+C***FIRST EXECUTABLE STATEMENT DSMV
+ DO 10 I = 1, N
+ Y(I) = 0.0D0
+ 10 CONTINUE
+C
+C Multiply by A.
+C
+CVD$R NOCONCUR
+ DO 30 ICOL = 1, N
+ IBGN = JA(ICOL)
+ IEND = JA(ICOL+1)-1
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NODEPCHK
+ DO 20 I = IBGN, IEND
+ Y(IA(I)) = Y(IA(I)) + A(I)*X(ICOL)
+ 20 CONTINUE
+ 30 CONTINUE
+C
+ IF( ISYM.EQ.1 ) THEN
+C
+C The matrix is non-symmetric. Need to get the other half in...
+C This loops assumes that the diagonal is the first entry in
+C each column.
+C
+ DO 50 IROW = 1, N
+ JBGN = JA(IROW)+1
+ JEND = JA(IROW+1)-1
+ IF( JBGN.GT.JEND ) GOTO 50
+ DO 40 J = JBGN, JEND
+ Y(IROW) = Y(IROW) + A(J)*X(IA(J))
+ 40 CONTINUE
+ 50 CONTINUE
+ ENDIF
+ RETURN
+C------------- LAST LINE OF DSMV FOLLOWS ----------------------------
+ END
+*DECK DSMTV
+ SUBROUTINE DSMTV( N, X, Y, NELT, IA, JA, A, ISYM )
+C***BEGIN PROLOGUE DSMTV
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSMTV-S),
+C Matrix transpose Vector Multiply, Sparse
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP Column Format Sparse Matrix (transpose) Vector Prdt.
+C Routine to calculate the sparse matrix vector product:
+C Y = A'*X, where ' denotes transpose.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(N+1), ISYM
+C DOUBLE PRECISION X(N), Y(N), A(NELT)
+C
+C CALL DSMTV(N, X, Y, NELT, IA, JA, A, ISYM )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C X :IN Double Precision X(N).
+C The vector that should be multiplied by the transpose of
+C the matrix.
+C Y :OUT Double Precision Y(N).
+C The product of the transpose of the matrix and the vector.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(N+1).
+C A :IN Integer A(NELT).
+C These arrays should hold the matrix A in the SLAP Column
+C format. See "Description", below.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C
+C *Description
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP format the "inner loops" of this routine
+C should vectorize on machines with hardware support for
+C vector gather/scatter operations. Your compiler may require
+C a compiler directive to convince it that there are no
+C implicit vector dependencies. Compiler directives for the
+C Alliant FX/Fortran and CRI CFT/CFT77 compilers are supplied
+C with the standard SLAP distribution.
+C
+C *Precision: Double Precision
+C *Cautions:
+C This routine assumes that the matrix A is stored in SLAP
+C Column format. It does not check for this (for speed) and
+C evil, ugly, ornery and nasty things will happen if the matrix
+C data structure is, in fact, not SLAP Column. Beware of the
+C wrong data structure!!!
+C
+C *See Also:
+C DSMV
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSMTV
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM
+ DOUBLE PRECISION X(N), Y(N), A(NELT)
+C
+C Zero out the result vector.
+C***FIRST EXECUTABLE STATEMENT DSMTV
+ DO 10 I = 1, N
+ Y(I) = 0.0D0
+ 10 CONTINUE
+C
+C Multiply by A-Transpose.
+C A-Transpose is stored by rows...
+CVD$R NOCONCUR
+ DO 30 IROW = 1, N
+ IBGN = JA(IROW)
+ IEND = JA(IROW+1)-1
+CVD$ ASSOC
+ DO 20 I = IBGN, IEND
+ Y(IROW) = Y(IROW) + A(I)*X(IA(I))
+ 20 CONTINUE
+ 30 CONTINUE
+C
+ IF( ISYM.EQ.1 ) THEN
+C
+C The matrix is non-symmetric. Need to get the other half in...
+C This loops assumes that the diagonal is the first entry in
+C each column.
+C
+ DO 50 ICOL = 1, N
+ JBGN = JA(ICOL)+1
+ JEND = JA(ICOL+1)-1
+ IF( JBGN.GT.JEND ) GOTO 50
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NODEPCHK
+ DO 40 J = JBGN, JEND
+ Y(IA(J)) = Y(IA(J)) + A(J)*X(ICOL)
+ 40 CONTINUE
+ 50 CONTINUE
+ ENDIF
+ RETURN
+C------------- LAST LINE OF DSMTV FOLLOWS ----------------------------
+ END
+*DECK DSDI
+ SUBROUTINE DSDI(N, B, X, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C***BEGIN PROLOGUE DSDI
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4, D2B4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSDI-S),
+C Linear system solve, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Diagonal Matrix Vector Multiply.
+C Routine to calculate the product X = DIAG*B,
+C where DIAG is a diagonal matrix.
+C***DESCRIPTION
+C *Usage:
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Vector to multiply the diagonal by.
+C X :OUT Double Precision X(N).
+C Result of DIAG*B.
+C NELT :DUMMY Integer.
+C Retained for compatibility with SLAP MSOLVE calling sequence.
+C IA :DUMMY Integer IA(NELT).
+C Retained for compatibility with SLAP MSOLVE calling sequence.
+C JA :DUMMY Integer JA(N+1).
+C Retained for compatibility with SLAP MSOLVE calling sequence.
+C A :DUMMY Double Precision A(NELT).
+C Retained for compatibility with SLAP MSOLVE calling sequence.
+C ISYM :DUMMY Integer.
+C Retained for compatibility with SLAP MSOLVE calling sequence.
+C RWORK :IN Double Precision RWORK(USER DEFINABLE).
+C Work array holding the diagonal of some matrix to scale
+C B by. This array must be set by the user or by a call
+C to the slap routine DSDS or DSD2S. The length of RWORK
+C must be > IWORK(4)+N.
+C IWORK :IN Integer IWORK(10).
+C IWORK(4) holds the offset into RWORK for the diagonal matrix
+C to scale B by. This is usually set up by the SLAP pre-
+C conditioner setup routines DSDS or DSD2S.
+C
+C *Description:
+C This routine is supplied with the SLAP package to perform
+C the MSOLVE operation for iterative drivers that require
+C diagonal Scaling (e.g., DSDCG, DSDBCG). It conforms
+C to the SLAP MSOLVE CALLING CONVENTION and hence does not
+C require an interface routine as do some of the other pre-
+C conditioners supplied with SLAP.
+C
+C *Precision: Double Precision
+C *See Also:
+C DSDS, DSD2S
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSDI
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, IWORK(10)
+ DOUBLE PRECISION B(N), X(N), A(NELT), RWORK(1)
+C
+C Determine where the inverse of the diagonal
+C is in the work array and then scale by it.
+C***FIRST EXECUTABLE STATEMENT DSDI
+ LOCD = IWORK(4) - 1
+ DO 10 I = 1, N
+ X(I) = RWORK(LOCD+I)*B(I)
+ 10 CONTINUE
+ RETURN
+C------------- LAST LINE OF DSDI FOLLOWS ----------------------------
+ END
+*DECK DSLI
+ SUBROUTINE DSLI(N, B, X, NELT, IA, JA, A, ISYM, RWORK, IWORK )
+C***BEGIN PROLOGUE DSLI
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSLI-S),
+C Linear system solve, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP MSOLVE for Lower Triangle Matrix.
+C This routine acts as an interface between the SLAP generic
+C MSOLVE calling convention and the routine that actually
+C -1
+C computes L B = X.
+C
+C *Description
+C See the Description of SLLI2 for the gory details.
+C***ROUTINES CALLED SLLI2
+C***END PROLOGUE DSLI
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, IWORK(10)
+ DOUBLE PRECISION B(N), X(N), A(NELT), RWORK(1)
+C***FIRST EXECUTABLE STATEMENT DSLI
+C
+ NEL = IWORK(1)
+ LOCIEL = IWORK(2)
+ LOCJEL = IWORK(3)
+ LOCEL = IWORK(4)
+ CALL DSLI2(N, B, X, NEL, IWORK(LOCIEL), IWORK(LOCJEL),
+ $ RWORK(LOCEL))
+C
+ RETURN
+C------------- LAST LINE OF DSLI FOLLOWS ----------------------------
+ END
+*DECK DSLI2
+ SUBROUTINE DSLI2(N, B, X, NEL, IEL, JEL, EL)
+C***BEGIN PROLOGUE DSLI2
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSLI2-S),
+C Linear system solve, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP for Lower Triangle Matrix Backsolve.
+C Routine to solve a system of the form Lx = b , where
+C L is a lower triangular matrix.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NEL, IEL(N+1), JEL(NEL)
+C DOUBLE PRECISION B(N), X(N), EL(NEL)
+C
+C CALL DSLI2( N, B, X, NEL, IEL, JEL, EL )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right hand side vector.
+C X :OUT Double Precision X(N).
+C Solution to Lx = b.
+C NEL :IN Integer.
+C Number of non-zeros in the EL array.
+C IEL :IN Integer IEL(N+1).
+C JEL :IN Integer JEL(NEL).
+C EL :IN Double Precision EL(NEL).
+C IEL, JEL, EL contain the unit lower triangular factor of
+C the incomplete decomposition of the A matrix stored in
+C SLAP Row format. The diagonal of ones *IS* stored. This
+C structure can be set up by the DS2LT routine. See "LONG
+C DESCRIPTION", below for more details about the SLAP Row
+C format.
+C
+C *Description:
+C This routine is supplied with the SLAP package as a routine
+C to perform the MSOLVE operation in the SIR for the driver
+C routine DSGS. It must be called via the SLAP MSOLVE calling
+C sequence convention interface routine DSLI.
+C **** THIS ROUTINE ITSELF DOES NOT CONFORM TO THE ****
+C **** SLAP MSOLVE CALLING CONVENTION ****
+C
+C ==================== S L A P Row format ====================
+C This routine requires that the matrix A be stored in the
+C SLAP Row format. In this format the non-zeros are stored
+C counting across rows (except for the diagonal entry, which
+C must appear first in each "row") and are stored in the
+C double precision
+C array A. In other words, for each row in the matrix put the
+C diagonal entry in A. Then put in the other non-zero
+C elements going across the row (except the diagonal) in
+C order. The JA array holds the column index for each
+C non-zero. The IA array holds the offsets into the JA, A
+C arrays for the beginning of each row. That is,
+C JA(IA(IROW)), A(IA(IROW)) points to the beginning of the
+C IROW-th row in JA and A. JA(IA(IROW+1)-1), A(IA(IROW+1)-1)
+C points to the end of the IROW-th row. Note that we always
+C have IA(N+1) = NELT+1, where N is the number of rows in
+C the matrix and NELT is the number of non-zeros in the
+C matrix.
+C
+C Here is an example of the SLAP Row storage format for a 5x5
+C Matrix (in the A and JA arrays '|' denotes the end of a row):
+C
+C 5x5 Matrix SLAP Row format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 12 15 | 22 21 | 33 35 | 44 | 55 51 53
+C |21 22 0 0 0| JA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| IA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP Row format the "inner loop" of this routine
+C should vectorize on machines with hardware support for
+C vector gather/scatter operations. Your compiler may require
+C a compiler directive to convince it that there are no
+C implicit vector dependencies. Compiler directives for the
+C Alliant FX/Fortran and CRI CFT/CFT77 compilers are supplied
+C with the standard SLAP distribution.
+C
+C *Precision: Double Precision
+C *See Also:
+C DSLI
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSLI2
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NEL, IEL(NEL), JEL(NEL)
+ DOUBLE PRECISION B(N), X(N), EL(NEL)
+C
+C Initialize the solution by copying the right hands side
+C into it.
+C***FIRST EXECUTABLE STATEMENT DSLI2
+ DO 10 I=1,N
+ X(I) = B(I)
+ 10 CONTINUE
+C
+CVD$ NOCONCUR
+ DO 30 ICOL = 1, N
+ X(ICOL) = X(ICOL)/EL(JEL(ICOL))
+ JBGN = JEL(ICOL) + 1
+ JEND = JEL(ICOL+1) - 1
+ IF( JBGN.LE.JEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NOCONCUR
+CVD$ NODEPCHK
+ DO 20 J = JBGN, JEND
+ X(IEL(J)) = X(IEL(J)) - EL(J)*X(ICOL)
+ 20 CONTINUE
+ ENDIF
+ 30 CONTINUE
+C
+ RETURN
+C------------- LAST LINE OF DSLI2 FOLLOWS ----------------------------
+ END
+*DECK DSLLTI
+ SUBROUTINE DSLLTI(N, B, X, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C***BEGIN PROLOGUE DSLLTI
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSLLTI-S),
+C Linear system solve, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP MSOLVE for LDL' (IC) Factorization.
+C This routine acts as an interface between the SLAP generic
+C MSOLVE calling convention and the routine that actually
+C -1
+C computes (LDL') B = X.
+C***DESCRIPTION
+C See the DESCRIPTION of SLLTI2 for the gory details.
+C***ROUTINES CALLED SLLTI2
+C
+C***END PROLOGUE DSLLTI
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+CWJS (1-3-13): Changing IWORK from size 1 to size 10,
+CWJS in agreement with what was done in the CESM repository.
+CWJS This is needed to avoid warnings when array bounds checking is on.
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, IWORK(10)
+ DOUBLE PRECISION B(1), X(1), A(NELT), RWORK(1)
+C
+C***FIRST EXECUTABLE STATEMENT DSLLTI
+ NEL = IWORK(1)
+ LOCIEL = IWORK(3)
+ LOCJEL = IWORK(2)
+ LOCEL = IWORK(4)
+ LOCDIN = IWORK(5)
+ CALL SLLTI2(N, B, X, NEL, IWORK(LOCIEL), IWORK(LOCJEL),
+ $ RWORK(LOCEL), RWORK(LOCDIN))
+C
+ RETURN
+C------------- LAST LINE OF DSLLTI FOLLOWS ----------------------------
+ END
+*DECK SLLTI2
+ SUBROUTINE SLLTI2(N, B, X, NEL, IEL, JEL, EL, DINV)
+C***BEGIN PROLOGUE SLLTI2
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(SLLTI2-S),
+C Symmetric Linear system solve, Sparse,
+C Iterative Precondition, Incomplete Factorization
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP back solve routine for LDL' Factorization.
+C Routine to solve a system of the form L*D*L' X = B,
+C where L is a unit lower triangular matrix and D is a
+C diagonal matrix and ' means transpose.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NEL, IEL(N+1), JEL(NEL)
+C DOUBLE PRECISION B(N), X(N), EL(NEL), DINV(N)
+C
+C CALL SLLTI2( N, B, X, NEL, IEL, JEL, EL, DINV )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right hand side vector.
+C X :OUT Double Precision X(N).
+C Solution to L*D*L' x = b.
+C NEL :IN Integer.
+C Number of non-zeros in the EL array.
+C IEL :IN Integer IEL(N+1).
+C JEL :IN Integer JEL(NEL).
+C EL :IN Double Precision EL(NEL).
+C IEL, JEL, EL contain the unit lower triangular factor of
+C the incomplete decomposition of the A matrix stored in
+C SLAP Row format. The diagonal of ones *IS* stored. This
+C structure can be set up by the DS2LT routine. See
+C "Description", below for more details about the SLAP Row
+C format.
+C DINV :IN Double Precision DINV(N).
+C Inverse of the diagonal matrix D.
+C
+C *Description:
+C This routine is supplied with the SLAP package as a routine
+C to perform the MSOLVE operation in the SCG iteration routine
+C for the driver routine DSICCG. It must be called via the
+C SLAP MSOLVE calling sequence convention interface routine
+C DSLLI.
+C **** THIS ROUTINE ITSELF DOES NOT CONFORM TO THE ****
+C **** SLAP MSOLVE CALLING CONVENTION ****
+C
+C IEL, JEL, EL should contain the unit lower triangular factor
+C of the incomplete decomposition of the A matrix stored in
+C SLAP Row format. This IC factorization can be computed by
+C the DSICS routine. The diagonal (which is all one's) is
+C stored.
+C
+C ==================== S L A P Row format ====================
+C This routine requires that the matrix A be stored in the
+C SLAP Row format. In this format the non-zeros are stored
+C counting across rows (except for the diagonal entry, which
+C must appear first in each "row") and are stored in the
+C double precision
+C array A. In other words, for each row in the matrix put the
+C diagonal entry in A. Then put in the other non-zero
+C elements going across the row (except the diagonal) in
+C order. The JA array holds the column index for each
+C non-zero. The IA array holds the offsets into the JA, A
+C arrays for the beginning of each row. That is,
+C JA(IA(IROW)), A(IA(IROW)) points to the beginning of the
+C IROW-th row in JA and A. JA(IA(IROW+1)-1), A(IA(IROW+1)-1)
+C points to the end of the IROW-th row. Note that we always
+C have IA(N+1) = NELT+1, where N is the number of rows in
+C the matrix and NELT is the number of non-zeros in the
+C matrix.
+C
+C Here is an example of the SLAP Row storage format for a 5x5
+C Matrix (in the A and JA arrays '|' denotes the end of a row):
+C
+C 5x5 Matrix SLAP Row format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 12 15 | 22 21 | 33 35 | 44 | 55 51 53
+C |21 22 0 0 0| JA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| IA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP Row format the "inner loop" of this routine
+C should vectorize on machines with hardware support for
+C vector gather/scatter operations. Your compiler may require
+C a compiler directive to convince it that there are no
+C implicit vector dependencies. Compiler directives for the
+C Alliant FX/Fortran and CRI CFT/CFT77 compilers are supplied
+C with the standard SLAP distribution.
+C
+C *Precision: Double Precision
+C *See Also:
+C DSICCG, DSICS
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE SLLTI2
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NEL, IEL(NEL), JEL(1)
+ DOUBLE PRECISION B(N), X(N), EL(NEL), DINV(N)
+C
+C solve l*y = b, storing result in x.
+C***FIRST EXECUTABLE STATEMENT SLLTI2
+ DO 10 I=1,N
+ X(I) = B(I)
+ 10 CONTINUE
+ DO 30 IROW = 1, N
+ IBGN = IEL(IROW) + 1
+ IEND = IEL(IROW+1) - 1
+ IF( IBGN.LE.IEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NOCONCUR
+CVD$ NODEPCHK
+ DO 20 I = IBGN, IEND
+ X(IROW) = X(IROW) - EL(I)*X(JEL(I))
+ 20 CONTINUE
+ ENDIF
+ 30 CONTINUE
+C
+C Solve D*Z = Y, storing result in X.
+C
+ DO 40 I=1,N
+ X(I) = X(I)*DINV(I)
+ 40 CONTINUE
+C
+C Solve L-trans*X = Z.
+C
+ DO 60 IROW = N, 2, -1
+ IBGN = IEL(IROW) + 1
+ IEND = IEL(IROW+1) - 1
+ IF( IBGN.LE.IEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NOCONCUR
+CVD$ NODEPCHK
+ DO 50 I = IBGN, IEND
+ X(JEL(I)) = X(JEL(I)) - EL(I)*X(IROW)
+ 50 CONTINUE
+ ENDIF
+ 60 CONTINUE
+C
+ RETURN
+C------------- LAST LINE OF SLTI2 FOLLOWS ----------------------------
+ END
+*DECK DSLUI
+ SUBROUTINE DSLUI(N, B, X, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C***BEGIN PROLOGUE DSLUI
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSLUI-S),
+C Non-Symmetric Linear system solve, Sparse,
+C Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP MSOLVE for LDU Factorization.
+C This routine acts as an interface between the SLAP
+C generic MSLOVE calling convention and the routine that
+C actually computes: -1
+C (LDU) B = X.
+C***DESCRIPTION
+C See the "DESCRIPTION" of DSLUI2 for the gory details.
+C***ROUTINES CALLED DSLUI2
+C***END PROLOGUE DSLUI
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+CWJS (1-2-13): Changing IWORK and RWORK from size 10,1 (respectively) to size *,
+CWJS in agreement with what was in the CESM repository. This is needed to avoid problems
+CWJS when array bounds checking is on.
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(NELT), RWORK(*)
+C
+C Pull out the locations of the arrays holding the ILU
+C factorization.
+C***FIRST EXECUTABLE STATEMENT DSLUI
+ LOCIL = IWORK(1)
+ LOCJL = IWORK(2)
+ LOCIU = IWORK(3)
+ LOCJU = IWORK(4)
+ LOCL = IWORK(5)
+ LOCDIN = IWORK(6)
+ LOCU = IWORK(7)
+C
+C Solve the system LUx = b
+ CALL DSLUI2(N, B, X, IWORK(LOCIL), IWORK(LOCJL), RWORK(LOCL),
+ $ RWORK(LOCDIN), IWORK(LOCIU), IWORK(LOCJU), RWORK(LOCU) )
+C
+ RETURN
+C------------- LAST LINE OF DSLUI FOLLOWS ----------------------------
+ END
+*DECK DSLUI2
+ SUBROUTINE DSLUI2(N, B, X, IL, JL, L, DINV, IU, JU, U )
+C***BEGIN PROLOGUE DSLUI2
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSLUI2-S),
+C Non-Symmetric Linear system solve, Sparse,
+C Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP Back solve for LDU Factorization.
+C Routine to solve a system of the form L*D*U X = B,
+C where L is a unit lower triangular matrix, D is a
+C diagonal matrix, and U is a unit upper triangular matrix.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, IL(N+1), JL(NL), IU(NU), JU(N+1)
+C DOUBLE PRECISION B(N), X(N), L(NL), DINV(N), U(NU)
+C
+C CALL DSLUI2( N, B, X, IL, JL, L, DINV, IU, JU, U )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right hand side.
+C X :OUT Double Precision X(N).
+C Solution of L*D*U x = b.
+C NEL :IN Integer.
+C Number of non-zeros in the EL array.
+C IL :IN Integer IL(N+1).
+C JL :IN Integer JL(NL).
+C L :IN Double Precision L(NL).
+C IL, JL, L contain the unit lower triangular factor of the
+C incomplete decomposition of some matrix stored in SLAP Row
+C format. The diagonal of ones *IS* stored. This structure
+C can be set up by the DSILUS routine. See
+C "DESCRIPTION", below for more details about the SLAP
+C format.
+C DINV :IN Double Precision DINV(N).
+C Inverse of the diagonal matrix D.
+C NU :IN Integer.
+C Number of non-zeros in the U array.
+C IU :IN Integer IU(N+1).
+C JU :IN Integer JU(NU).
+C U :IN Double Precision U(NU).
+C IU, JU, U contain the unit upper triangular factor of the
+C incomplete decomposition of some matrix stored in SLAP
+C Column format. The diagonal of ones *IS* stored. This
+C structure can be set up by the DSILUS routine. See
+C "DESCRIPTION", below for more details about the SLAP
+C format.
+C
+C *Description:
+C This routine is supplied with the SLAP package as a routine
+C to perform the MSOLVE operation in the SIR and SBCG
+C iteration routines for the drivers DSILUR and DSLUBC. It
+C must be called via the SLAP MSOLVE calling sequence
+C convention interface routine DSLUI.
+C **** THIS ROUTINE ITSELF DOES NOT CONFORM TO THE ****
+C **** SLAP MSOLVE CALLING CONVENTION ****
+C
+C IL, JL, L should contain the unit lower triangular factor of
+C the incomplete decomposition of the A matrix stored in SLAP
+C Row format. IU, JU, U should contain the unit upper factor
+C of the incomplete decomposition of the A matrix stored in
+C SLAP Column format This ILU factorization can be computed by
+C the DSILUS routine. The diagonals (which is all one's) are
+C stored.
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C ==================== S L A P Row format ====================
+C This routine requires that the matrix A be stored in the
+C SLAP Row format. In this format the non-zeros are stored
+C counting across rows (except for the diagonal entry, which
+C must appear first in each "row") and are stored in the
+C double precision
+C array A. In other words, for each row in the matrix put the
+C diagonal entry in A. Then put in the other non-zero
+C elements going across the row (except the diagonal) in
+C order. The JA array holds the column index for each
+C non-zero. The IA array holds the offsets into the JA, A
+C arrays for the beginning of each row. That is,
+C JA(IA(IROW)), A(IA(IROW)) points to the beginning of the
+C IROW-th row in JA and A. JA(IA(IROW+1)-1), A(IA(IROW+1)-1)
+C points to the end of the IROW-th row. Note that we always
+C have IA(N+1) = NELT+1, where N is the number of rows in
+C the matrix and NELT is the number of non-zeros in the
+C matrix.
+C
+C Here is an example of the SLAP Row storage format for a 5x5
+C Matrix (in the A and JA arrays '|' denotes the end of a row):
+C
+C 5x5 Matrix SLAP Row format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 12 15 | 22 21 | 33 35 | 44 | 55 51 53
+C |21 22 0 0 0| JA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| IA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP format the "inner loops" of this routine
+C should vectorize on machines with hardware support for
+C vector gather/scatter operations. Your compiler may require
+C a compiler directive to convince it that there are no
+C implicit vector dependencies. Compiler directives for the
+C Alliant FX/Fortran and CRI CFT/CFT77 compilers are supplied
+C with the standard SLAP distribution.
+C
+C *Precision: Double Precision
+C *See Also:
+C DSILUS
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSLUI2
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+CWJS (1-2-13): Reimplementing changes from Jon Wolfe to make code work with array bounds checking
+CWJS INTEGER N, IL(1), JL(1), IU(1), JU(1)
+CWJS DOUBLE PRECISION B(N), X(N), L(1), DINV(N), U(1)
+ INTEGER N, IL(N+1), JL(*), IU(*), JU(N+1)
+ DOUBLE PRECISION B(N), X(N), L(*), DINV(N), U(*)
+C
+C Solve L*Y = B, storing result in X, L stored by rows.
+C***FIRST EXECUTABLE STATEMENT DSLUI2
+ DO 10 I = 1, N
+ X(I) = B(I)
+ 10 CONTINUE
+ DO 30 IROW = 2, N
+ JBGN = IL(IROW)
+ JEND = IL(IROW+1)-1
+ IF( JBGN.LE.JEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ ASSOC
+CVD$ NODEPCHK
+ DO 20 J = JBGN, JEND
+ X(IROW) = X(IROW) - L(J)*X(JL(J))
+ 20 CONTINUE
+ ENDIF
+ 30 CONTINUE
+C
+C Solve D*Z = Y, storing result in X.
+ DO 40 I=1,N
+ X(I) = X(I)*DINV(I)
+ 40 CONTINUE
+C
+C Solve U*X = Z, U stored by columns.
+ DO 60 ICOL = N, 2, -1
+ JBGN = JU(ICOL)
+ JEND = JU(ICOL+1)-1
+ IF( JBGN.LE.JEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NODEPCHK
+ DO 50 J = JBGN, JEND
+ X(IU(J)) = X(IU(J)) - U(J)*X(ICOL)
+ 50 CONTINUE
+ ENDIF
+ 60 CONTINUE
+C
+ RETURN
+C------------- LAST LINE OF DSLUI2 FOLLOWS ----------------------------
+ END
+*DECK DSLUTI
+ SUBROUTINE DSLUTI(N, B, X, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C***BEGIN PROLOGUE DSLUTI
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSLUTI-S),
+C Linear system solve, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP MTSOLV for LDU Factorization.
+C This routine acts as an interface between the SLAP
+C generic MTSOLV calling convention and the routine that
+C actually computes: -T
+C (LDU) B = X.
+C***DESCRIPTION
+C See the "DESCRIPTION" of DSLUI4 for the gory details.
+C***ROUTINES CALLED DSLUI4
+C***END PROLOGUE DSLUTI
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, IWORK(10)
+ DOUBLE PRECISION B(N), X(N), A(N), RWORK(1)
+C
+C Pull out the pointers to the L, D and U matricies and call
+C the workhorse routine.
+C***FIRST EXECUTABLE STATEMENT DSLUTI
+ LOCIL = IWORK(1)
+ LOCJL = IWORK(2)
+ LOCIU = IWORK(3)
+ LOCJU = IWORK(4)
+ LOCL = IWORK(5)
+ LOCDIN = IWORK(6)
+ LOCU = IWORK(7)
+C
+ CALL DSLUI4(N, B, X, IWORK(LOCIL), IWORK(LOCJL), RWORK(LOCL),
+ $ RWORK(LOCDIN), IWORK(LOCIU), IWORK(LOCJU), RWORK(LOCU))
+C
+ RETURN
+C------------- LAST LINE OF DSLUTI FOLLOWS ----------------------------
+ END
+*DECK DSLUI4
+ SUBROUTINE DSLUI4(N, B, X, IL, JL, L, DINV, IU, JU, U )
+C***BEGIN PROLOGUE DSLUI4
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSLUI4-S),
+C Non-Symmetric Linear system solve, Sparse,
+C Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP back solve for LDU Factorization.
+C Routine to solve a system of the form (L*D*U)' X = B,
+C where L is a unit lower triangular matrix, D is a
+C diagonal matrix, and U is a unit upper triangular
+C matrix and ' denotes transpose.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NL, IL(N+1), JL(NL), NU, IU(N+1), JU(NU)
+C DOUBLE PRECISION B(N), X(N), L(NEL), DINV(N), U(NU)
+C
+C CALL DSLUI4( N, B, X, IL, JL, L, DINV, IU, JU, U )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right hand side.
+C X :OUT Double Precision X(N).
+C Solution of (L*D*U)trans x = b.
+C IL :IN Integer IL(N+1).
+C JL :IN Integer JL(NL).
+C L :IN Double Precision L(NL).
+C IL, JL, L contain the unit lower triangular factor of the
+C incomplete decomposition of some matrix stored in SLAP Row
+C format. The diagonal of ones *IS* stored. This structure
+C can be set up by the DSILUS routine. See
+C "DESCRIPTION", below for more details about the SLAP
+C format.
+C DINV :IN Double Precision DINV(N).
+C Inverse of the diagonal matrix D.
+C IU :IN Integer IU(N+1).
+C JU :IN Integer JU(NU).
+C U :IN Double Precision U(NU).
+C IU, JU, U contain the unit upper triangular factor of the
+C incomplete decomposition of some matrix stored in SLAP
+C Column format. The diagonal of ones *IS* stored. This
+C structure can be set up by the DSILUS routine. See
+C "DESCRIPTION", below for more details about the SLAP
+C format.
+C
+C *Description:
+C This routine is supplied with the SLAP package as a routine
+C to perform the MTSOLV operation in the SBCG iteration
+C routine for the driver DSLUBC. It must be called via the
+C SLAP MTSOLV calling sequence convention interface routine
+C DSLUTI.
+C **** THIS ROUTINE ITSELF DOES NOT CONFORM TO THE ****
+C **** SLAP MSOLVE CALLING CONVENTION ****
+C
+C IL, JL, L should contain the unit lower triangular factor of
+C the incomplete decomposition of the A matrix stored in SLAP
+C Row format. IU, JU, U should contain the unit upper factor
+C of the incomplete decomposition of the A matrix stored in
+C SLAP Column format This ILU factorization can be computed by
+C the DSILUS routine. The diagonals (which is all one's) are
+C stored.
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C ==================== S L A P Row format ====================
+C This routine requires that the matrix A be stored in the
+C SLAP Row format. In this format the non-zeros are stored
+C counting across rows (except for the diagonal entry, which
+C must appear first in each "row") and are stored in the
+C double precision
+C array A. In other words, for each row in the matrix put the
+C diagonal entry in A. Then put in the other non-zero
+C elements going across the row (except the diagonal) in
+C order. The JA array holds the column index for each
+C non-zero. The IA array holds the offsets into the JA, A
+C arrays for the beginning of each row. That is,
+C JA(IA(IROW)), A(IA(IROW)) points to the beginning of the
+C IROW-th row in JA and A. JA(IA(IROW+1)-1), A(IA(IROW+1)-1)
+C points to the end of the IROW-th row. Note that we always
+C have IA(N+1) = NELT+1, where N is the number of rows in
+C the matrix and NELT is the number of non-zeros in the
+C matrix.
+C
+C Here is an example of the SLAP Row storage format for a 5x5
+C Matrix (in the A and JA arrays '|' denotes the end of a row):
+C
+C 5x5 Matrix SLAP Row format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 12 15 | 22 21 | 33 35 | 44 | 55 51 53
+C |21 22 0 0 0| JA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| IA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP format the "inner loops" of this routine
+C should vectorize on machines with hardware support for
+C vector gather/scatter operations. Your compiler may require
+C a compiler directive to convince it that there are no
+C implicit vector dependencies. Compiler directives for the
+C Alliant FX/Fortran and CRI CFT/CFT77 compilers are supplied
+C with the standard SLAP distribution.
+C
+C *Precision: Double Precision
+C *See Also:
+C DSILUS
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSLUI4
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, IL(*), JL(*), IU(*), JU(*)
+ DOUBLE PRECISION B(N), X(N), L(*), DINV(N), U(*)
+C
+C***FIRST EXECUTABLE STATEMENT DSLUI4
+ DO 10 I=1,N
+ X(I) = B(I)
+ 10 CONTINUE
+C
+C Solve U'*Y = X, storing result in X, U stored by columns.
+ DO 80 IROW = 2, N
+ JBGN = JU(IROW)
+ JEND = JU(IROW+1) - 1
+ IF( JBGN.LE.JEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ ASSOC
+CVD$ NODEPCHK
+ DO 70 J = JBGN, JEND
+ X(IROW) = X(IROW) - U(J)*X(IU(J))
+ 70 CONTINUE
+ ENDIF
+ 80 CONTINUE
+C
+C Solve D*Z = Y, storing result in X.
+ DO 90 I = 1, N
+ X(I) = X(I)*DINV(I)
+ 90 CONTINUE
+C
+C Solve L'*X = Z, L stored by rows.
+ DO 110 ICOL = N, 2, -1
+ JBGN = IL(ICOL)
+ JEND = IL(ICOL+1) - 1
+ IF( JBGN.LE.JEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NODEPCHK
+ DO 100 J = JBGN, JEND
+ X(JL(J)) = X(JL(J)) - L(J)*X(ICOL)
+ 100 CONTINUE
+ ENDIF
+ 110 CONTINUE
+ RETURN
+C------------- LAST LINE OF DSLUI4 FOLLOWS ----------------------------
+ END
+*DECK DSMMTI
+ SUBROUTINE DSMMTI(N, B, X, NELT, IA, JA, A, ISYM, RWORK, IWORK )
+C***BEGIN PROLOGUE DSMMTI
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSMMTI-S),
+C Linear system solve, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP MSOLVE for LDU Factorization of Normal Equations.
+C This routine acts as an interface between the SLAP
+C generic MMTSLV calling convention and the routine that
+C actually computes: -1
+C [(LDU)*(LDU)'] B = X.
+C***DESCRIPTION
+C See the "DESCRIPTION" of DSMMI2 for the gory details.
+C***ROUTINES CALLED DSMMI2
+C***END PROLOGUE DSMMTI
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, IWORK(10)
+ DOUBLE PRECISION B(N), X(N), A(NELT), RWORK(1)
+C
+C Pull out the locations of the arrays holding the ILU
+C factorization.
+C***FIRST EXECUTABLE STATEMENT DSMMTI
+ LOCIL = IWORK(1)
+ LOCJL = IWORK(2)
+ LOCIU = IWORK(3)
+ LOCJU = IWORK(4)
+ LOCL = IWORK(5)
+ LOCDIN = IWORK(6)
+ LOCU = IWORK(7)
+C
+ CALL DSMMI2(N, B, X, IWORK(LOCIL), IWORK(LOCJL),
+ $ RWORK(LOCL), RWORK(LOCDIN), IWORK(LOCIU),
+ $ IWORK(LOCJU), RWORK(LOCU))
+C
+ RETURN
+C------------- LAST LINE OF DSMMTI FOLLOWS ----------------------------
+ END
+*DECK DSMMI2
+ SUBROUTINE DSMMI2( N, B, X, IL, JL, L, DINV, IU, JU, U )
+C***BEGIN PROLOGUE DSMMI2
+C***DATE WRITTEN 871119 (YYMMDD)
+C***REVISION DATE 881213 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DSMMI2-S),
+C Linear system, Sparse, Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE SLAP Back solve for LDU Factorization of Normal Equations.
+C To solve a system of the form (L*D*U)*(L*D*U)' X = B,
+C where L is a unit lower triangular matrix, D is a
+C diagonal matrix, and U is a unit upper triangular
+C matrix and ' denotes transpose.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, IL(N+1), JL(NL), IU(N+1), JU(NU)
+C DOUBLE PRECISION B(N), X(N), L(NL), DINV(N), U(NU)
+C
+C CALL DSMMI2( N, B, X, IL, JE, L, DINV, IU, JU, U )
+C
+C *Arguments:
+C N :IN Integer
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right hand side.
+C X :OUT Double Precision X(N).
+C Solution of (L*D*U)(L*D*U)trans x = b.
+C IL :IN Integer IL(N+1).
+C JL :IN Integer JL(NL).
+C L :IN Double Precision L(NL).
+C IL, JL, L contain the unit lower triangular factor of the
+C incomplete decomposition of some matrix stored in SLAP Row
+C format. The diagonal of ones *IS* stored. This structure
+C can be set up by the DSILUS routine. See
+C "DESCRIPTION", below for more details about the SLAP
+C format.
+C DINV :IN Double Precision DINV(N).
+C Inverse of the diagonal matrix D.
+C IU :IN Integer IU(N+1).
+C JU :IN Integer JU(NU).
+C U :IN Double Precision U(NU).
+C IU, JU, U contain the unit upper triangular factor of the
+C incomplete decomposition of some matrix stored in SLAP
+C Column format. The diagonal of ones *IS* stored. This
+C structure can be set up by the DSILUS routine. See
+C "DESCRIPTION", below for more details about the SLAP
+C format.
+C
+C *Description:
+C This routine is supplied with the SLAP package as a routine
+C to perform the MSOLVE operation in the SBCGN iteration
+C routine for the driver DSLUCN. It must be called via the
+C SLAP MSOLVE calling sequence convention interface routine
+C DSMMTI.
+C **** THIS ROUTINE ITSELF DOES NOT CONFORM TO THE ****
+C **** SLAP MSOLVE CALLING CONVENTION ****
+C
+C IL, JL, L should contain the unit lower triangular factor of
+C the incomplete decomposition of the A matrix stored in SLAP
+C Row format. IU, JU, U should contain the unit upper factor
+C of the incomplete decomposition of the A matrix stored in
+C SLAP Column format This ILU factorization can be computed by
+C the DSILUS routine. The diagonals (which is all one's) are
+C stored.
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C ==================== S L A P Row format ====================
+C This routine requires that the matrix A be stored in the
+C SLAP Row format. In this format the non-zeros are stored
+C counting acrods rows (except for the diagonal entry, which
+C must appear first in each "row") and are stored in the
+C double precision
+C array A. In other words, for each row in the matrix put the
+C diagonal entry in A. Then put in the other non-zero
+C elements going across the row (except the diagonal) in
+C order. The JA array holds the column index for each
+C non-zero. The IA array holds the offsets into the JA, A
+C arrays for the beginning of each row. That is,
+C JA(IA(IROW)), A(IA(IROW)) points to the beginning of the
+C IROW-th row in JA and A. JA(IA(IROW+1)-1), A(IA(IROW+1)-1)
+C points to the end of the IROW-th row. Note that we always
+C have IA(N+1) = NELT+1, where N is the number of rows in
+C the matrix and NELT is the number of non-zeros in the
+C matrix.
+C
+C Here is an example of the SLAP Row storage format for a 5x5
+C Matrix (in the A and JA arrays '|' denotes the end of a row):
+C
+C 5x5 Matrix SLAP Row format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 12 15 | 22 21 | 33 35 | 44 | 55 51 53
+C |21 22 0 0 0| JA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| IA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C With the SLAP format the "inner loops" of this routine
+C should vectorize on machines with hardware support for
+C vector gather/scatter operations. Your compiler may require
+C a compiler directive to convince it that there are no
+C implicit vector dependencies. Compiler directives for the
+C Alliant FX/Fortran and CRI CFT/CFT77 compilers are supplied
+C with the standard SLAP distribution.
+C
+C *Precision: Double Precision
+C *See Also:
+C DSILUS
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE DSMMI2
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, IL(1), JL(1), IU(1), JU(1)
+ DOUBLE PRECISION B(N), X(N), L(1), DINV(N), U(N)
+C
+C Solve L*Y = B, storing result in X, L stored by rows.
+C***FIRST EXECUTABLE STATEMENT DSMMI2
+ DO 10 I = 1, N
+ X(I) = B(I)
+ 10 CONTINUE
+ DO 30 IROW = 2, N
+ JBGN = IL(IROW)
+ JEND = IL(IROW+1)-1
+ IF( JBGN.LE.JEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ ASSOC
+CVD$ NODEPCHK
+ DO 20 J = JBGN, JEND
+ X(IROW) = X(IROW) - L(J)*X(JL(J))
+ 20 CONTINUE
+ ENDIF
+ 30 CONTINUE
+C
+C Solve D*Z = Y, storing result in X.
+ DO 40 I=1,N
+ X(I) = X(I)*DINV(I)
+ 40 CONTINUE
+C
+C Solve U*X = Z, U stored by columns.
+ DO 60 ICOL = N, 2, -1
+ JBGN = JU(ICOL)
+ JEND = JU(ICOL+1)-1
+ IF( JBGN.LE.JEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NODEPCHK
+ DO 50 J = JBGN, JEND
+ X(IU(J)) = X(IU(J)) - U(J)*X(ICOL)
+ 50 CONTINUE
+ ENDIF
+ 60 CONTINUE
+C
+C Solve U'*Y = X, storing result in X, U stored by columns.
+ DO 80 IROW = 2, N
+ JBGN = JU(IROW)
+ JEND = JU(IROW+1) - 1
+ IF( JBGN.LE.JEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ ASSOC
+CVD$ NODEPCHK
+ DO 70 J = JBGN, JEND
+ X(IROW) = X(IROW) - U(J)*X(IU(J))
+ 70 CONTINUE
+ ENDIF
+ 80 CONTINUE
+C
+C Solve D*Z = Y, storing result in X.
+ DO 90 I = 1, N
+ X(I) = X(I)*DINV(I)
+ 90 CONTINUE
+C
+C Solve L'*X = Z, L stored by rows.
+ DO 110 ICOL = N, 2, -1
+ JBGN = IL(ICOL)
+ JEND = IL(ICOL+1) - 1
+ IF( JBGN.LE.JEND ) THEN
+CLLL. OPTION ASSERT (NOHAZARD)
+CDIR$ IVDEP
+CVD$ NODEPCHK
+ DO 100 J = JBGN, JEND
+ X(JL(J)) = X(JL(J)) - L(J)*X(ICOL)
+ 100 CONTINUE
+ ENDIF
+ 110 CONTINUE
+C
+ RETURN
+C------------- LAST LINE OF DSMMI2 FOLLOWS ----------------------------
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/domn.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/domn.f
new file mode 100644
index 0000000000..22b4aa2d7a
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/domn.f
@@ -0,0 +1,1084 @@
+*DECK DOMN
+ SUBROUTINE DOMN( N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
+ $ NSAVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P,
+ $ AP, EMAP, DZ, CSAV, RWORK, IWORK )
+C***BEGIN PROLOGUE DOMN
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(DOMN-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Orthomin
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned Orthomin Sparse Iterative Ax=b Solver.
+C Routine to solve a general linear system Ax = b using
+C the Preconditioned Orthomin method.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, IWORK(USER DEFINED)
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N)
+C DOUBLE PRECISION P(N,0:NSAVE), AP(N,0:NSAVE), EMAP(N,0:NSAVE)
+C DOUBLE PRECISION DZ(N), CSAV(NSAVE), RWORK(USER DEFIED)
+C EXTERNAL MATVEC, MSOLVE
+C
+C CALL DOMN(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
+C $ NSAVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R,
+C $ Z, P, AP, EMAP, DZ, PSAV, APSV, QSAV, CSAV, RWORK, IWORK)
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays contain the matrix data structure for A.
+C It could take any form. See "LONG DESCRIPTION", below
+C for more late breaking details...
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MATVEC :EXT External.
+C Name of a routine which performs the matrix vector multiply
+C Y = A*X given A and X. The name of the MATVEC routine must
+C be declared external in the calling program. The calling
+C sequence to MATVEC is:
+C CALL MATVEC( N, X, Y, NELT, IA, JA, A, ISYM )
+C Where N is the number of unknowns, Y is the product A*X
+C upon return X is an input vector, NELT is the number of
+C non-zeros in the SLAP IA, JA, A storage for the matrix A.
+C ISYM is a flag which, if non-zero, denotest that A is
+C symmetric and only the lower or upper triangle is stored.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for
+C Z given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine must
+C be declared external in the calling program. The calling
+C sequence to MSOLVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. RWORK is a
+C double precision
+C array that can be used to pass necessary preconditioning
+C information and/or workspace to MSOLVE. IWORK is an integer
+C work array for the same purpose as RWORK.
+C NSAVE :IN Integer.
+C Number of direction vectors to save and orthogonalize
+C against. NSAVE >= 0.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Breakdown of method detected.
+C $(p,Ap) < epsilon**2$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :WORK Double Precision R(N).
+C Z :WORK Double Precision Z(N).
+C P :WORK Double Precision P(N,0:NSAVE).
+C AP :WORK Double Precision AP(N,0:NSAVE).
+C EMAP :WORK Double Precision EMAP(N,0:NSAVE).
+C DZ :WORK Double Precision DZ(N).
+C CSAV :WORK Double Precision CSAV(NSAVE)
+C RWORK :WORK Double Precision RWORK(USER DEFINED).
+C Double Precision array that can be used for workspace in
+C MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINED).
+C Integer array that can be used for workspace in MSOLVE.
+C
+C *Precision: Double Precision
+C *See Also:
+C DSDOMN, DSLUOM, ISDOMN
+C
+C *Description
+C This routine does not care what matrix data structure is
+C used for A and M. It simply calls the MATVEC and MSOLVE
+C routines, with the arguments as described above. The user
+C could write any type of structure and the appropriate MATVEC
+C and MSOLVE routines. It is assumed that A is stored in the
+C IA, JA, A arrays in some fashion and that M (or INV(M)) is
+C stored in IWORK and RWORK) in some fashion. The SLAP
+C routines DSDOMN and DSLUOM are examples of this procedure.
+C
+C Two examples of matrix data structures are the: 1) SLAP
+C Triad format and 2) SLAP Column format.
+C
+C =================== S L A P Triad format ===================
+C In this format only the non-zeros are stored. They may
+C appear in *ANY* order. The user supplies three arrays of
+C length NELT, where NELT is the number of non-zeros in the
+C matrix: (IA(NELT), JA(NELT), A(NELT)). For each non-zero
+C the user puts the row and column index of that matrix
+C element in the IA and JA arrays. The value of the non-zero
+C matrix element is placed in the corresponding location of
+C the A array. This is an extremely easy data structure to
+C generate. On the other hand it is not too efficient on
+C vector computers for the iterative solution of linear
+C systems. Hence, SLAP changes this input data structure to
+C the SLAP Column format for the iteration (but does not
+C change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C***REFERENCES (NONE)
+C***ROUTINES CALLED MATVEC, MSOLVE, ISDOMN,
+C DCOPY, DDOT, DAXPY, D1MACH
+C***END PROLOGUE DOMN
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL, ITMAX
+ INTEGER ITER, IERR, IUNIT, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N)
+ DOUBLE PRECISION P(N,0:NSAVE), AP(N,0:NSAVE), EMAP(N,0:NSAVE)
+ DOUBLE PRECISION DZ(N), CSAV(NSAVE), RWORK(*)
+ EXTERNAL MATVEC, MSOLVE
+C
+C Check some of the input data.
+C***FIRST EXECUTABLE STATEMENT DOMN
+ ITER = 0
+ IERR = 0
+ IF( N.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ EPS = D1MACH(3)
+ IF( TOL.LT.500.0*EPS ) THEN
+ TOL = 500.0*EPS
+ IERR = 4
+ ENDIF
+ FUZZ = EPS*EPS
+C
+C Calculate initial residual and pseudo-residual, and check
+C stopping criterion.
+ CALL MATVEC(N, X, R, NELT, IA, JA, A, ISYM)
+ DO 10 I = 1, N
+ R(I) = B(I) - R(I)
+ 10 CONTINUE
+ CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C
+ IF( ISDOMN(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, NSAVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ R, Z, P, AP, EMAP, DZ, CSAV,
+ $ RWORK, IWORK, AK, BNRM, SOLNRM) .NE. 0 ) GO TO 200
+ IF( IERR.NE.0 ) RETURN
+C
+C
+C ***** iteration loop *****
+C
+CVD$R NOVECTOR
+CVD$R NOCONCUR
+ DO 100 K = 1, ITMAX
+ ITER = K
+ IP = MOD( ITER-1, NSAVE+1 )
+C
+C calculate direction vector p, a*p, and (m-inv)*a*p,
+C and save if desired.
+ CALL DCOPY(N, Z, 1, P(1,IP), 1)
+ CALL MATVEC(N, P(1,IP), AP(1,IP), NELT, IA, JA, A, ISYM)
+ CALL MSOLVE(N, AP(1,IP), EMAP(1,IP), NELT, IA, JA, A, ISYM,
+ $ RWORK, IWORK)
+ IF( NSAVE.EQ.0 ) THEN
+ AKDEN = DDOT(N, EMAP, 1, EMAP, 1)
+ ELSE
+ IF( ITER.GT.1 ) THEN
+ LMAX = MIN( NSAVE, ITER-1 )
+ DO 20 L = 1, LMAX
+ IPO = MOD(IP+(NSAVE+1-L),NSAVE+1)
+ BKL = DDOT(N, EMAP(1,IP), 1, EMAP(1,IPO), 1)
+ BKL = BKL*CSAV(L)
+ CALL DAXPY(N, -BKL, P(1,IPO), 1, P(1,IP), 1)
+ CALL DAXPY(N, -BKL, AP(1,IPO), 1, AP(1,IP), 1)
+ CALL DAXPY(N, -BKL, EMAP(1,IPO), 1, EMAP(1,IP), 1)
+ 20 CONTINUE
+ IF( NSAVE.GT.1 ) THEN
+ DO 30 L = NSAVE-1, 1, -1
+ CSAV(L+1) = CSAV(L)
+ 30 CONTINUE
+ ENDIF
+ ENDIF
+ AKDEN = DDOT(N, EMAP(1,IP), 1, EMAP(1,IP), 1)
+ IF( ABS(AKDEN).LT.EPS*EPS ) THEN
+ IERR = 6
+ RETURN
+ ENDIF
+ CSAV(1) = 1./AKDEN
+C
+C calculate coefficient ak, new iterate x, new residual r, and
+C new pseudo-residual z.
+ ENDIF
+ AKNUM = DDOT(N, Z, 1, EMAP(1,IP), 1)
+ AK = AKNUM/AKDEN
+ CALL DAXPY(N, AK, P(1,IP), 1, X, 1)
+ CALL DAXPY(N, -AK, AP(1,IP), 1, R, 1)
+ CALL DAXPY(N, -AK, EMAP(1,IP), 1, Z, 1)
+C
+C check stopping criterion.
+ IF( ISDOMN(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, NSAVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ R, Z, P, AP, EMAP, DZ, CSAV,
+ $ RWORK, IWORK, AK, BNRM, SOLNRM) .NE. 0 ) GO TO 200
+C
+ 100 CONTINUE
+C
+C ***** end of loop *****
+C
+C Stopping criterion not satisfied.
+ ITER = ITMAX + 1
+ IERR = 2
+C
+ 200 RETURN
+C------------- LAST LINE OF DOMN FOLLOWS ----------------------------
+ END
+*DECK DSDOMN
+ SUBROUTINE DSDOMN(N, B, X, NELT, IA, JA, A, ISYM, NSAVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSDOMN
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(SSDOMN-D),
+C Non-Symmetric Linear system solve, Sparse,
+C Iterative Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Diagonally Scaled Orthomin Sparse Iterative Ax=b Solver.
+C Routine to solve a general linear system Ax = b using
+C the Orthomin method with diagonal scaling.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(10), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR
+C DOUBLE PRECISION RWORK(7*N+3*N*NSAVE+NSAVE)
+C
+C CALL DSDOMN(N, B, X, NELT, IA, JA, A, ISYM, NSAVE, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the Matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "LONG
+C DESCRIPTION", below. If the SLAP Triad format is chosen
+C it is changed internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C NSAVE :IN Integer.
+C Number of direction vectors to save and orthogonalize against.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN( )
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Breakdown of method detected.
+C $(p,Ap) < epsilon**2$.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace.
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= 7*N+NSAVE*(3*N+1).
+C IWORK :WORK Integer IWORK(LENIW).
+C Used to hold pointers into the RWORK array.
+C LENIW :IN Integer.
+C Length of the double precision workspace, RWORK. LENW >= 10.
+C
+C *Description:
+C This routine is simply a driver for the DOMN routine. It
+C calls the DSDS routine to set up the preconditioning and
+C then calls DOMN with the appropriate MATVEC and MSOLVE
+C routines.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C In this format only the non-zeros are stored. They may
+C appear in *ANY* order. The user supplies three arrays of
+C length NELT, where NELT is the number of non-zeros in the
+C matrix: (IA(NELT), JA(NELT), A(NELT)). For each non-zero
+C the user puts the row and column index of that matrix
+C element in the IA and JA arrays. The value of the non-zero
+C matrix element is placed in the corresponding location of
+C the A array. This is an extremely easy data structure to
+C generate. On the other hand it is not too efficient on
+C vector computers for the iterative solution of linear
+C systems. Hence, SLAP changes this input data structure to
+C the SLAP Column format for the iteration (but does not
+C change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to
+C be the SLAP Column format. See the "LONG DESCRIPTION",
+C below.
+C
+C *See Also:
+C DOMN, DSLUOM
+C***REFERENCES (NONE)
+C***ROUTINES CALLED DS2Y, DCHKW, DSDS, DOMN, DSMV, DSDI
+C***END PROLOGUE DSDOMN
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL, ITMAX
+ INTEGER ITER, IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(N), TOL, ERR, RWORK(LENW)
+ EXTERNAL DSMV, DSDI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSDOMN
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Set up the workspace. Compute the inverse of the
+C diagonal of the matrix.
+ LOCIW = LOCIB
+C
+ LOCDIN = LOCRB
+ LOCR = LOCDIN + N
+ LOCZ = LOCR + N
+ LOCP = LOCZ + N
+ LOCAP = LOCP + N*(NSAVE+1)
+ LOCEMA = LOCAP + N*(NSAVE+1)
+ LOCDZ = LOCEMA + N*(NSAVE+1)
+ LOCCSA = LOCDZ + N
+ LOCW = LOCCSA + NSAVE
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSDOMN', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(4) = LOCDIN
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+ CALL DSDS(N, NELT, IA, JA, A, ISYM, RWORK(LOCDIN))
+C
+C Perform the Diagonally Scaled Orthomin iteration algorithm.
+ CALL DOMN(N, B, X, NELT, IA, JA, A, ISYM, DSMV,
+ $ DSDI, NSAVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK(LOCR), RWORK(LOCZ), RWORK(LOCP), RWORK(LOCAP),
+ $ RWORK(LOCEMA), RWORK(LOCDZ), RWORK(LOCCSA),
+ $ RWORK, IWORK )
+ RETURN
+C------------- LAST LINE OF DSDOMN FOLLOWS ----------------------------
+ END
+*DECK DSLUOM
+ SUBROUTINE DSLUOM(N, B, X, NELT, IA, JA, A, ISYM, NSAVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK, LENW, IWORK, LENIW )
+C***BEGIN PROLOGUE DSLUOM
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(SSLUOM-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative incomplete LU Precondition
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Incomplete LU Orthomin Sparse Iterative Ax=b Solver.
+C Routine to solve a general linear system Ax = b using
+C the Orthomin method with Incomplete LU decomposition.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, LENW, IWORK(NEL+NU+4*N+2), LENIW
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR
+C DOUBLE PRECISION RWORK(NEL+NU+7*N+3*N*NSAVE+NSAVE)
+C
+C CALL DSLUOM(N, B, X, NELT, IA, JA, A, ISYM, NSAVE, ITOL, TOL,
+C $ ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW, IWORK, LENIW )
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :INOUT Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :INOUT Integer IA(NELT).
+C JA :INOUT Integer JA(NELT).
+C A :INOUT Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "LONG
+C DESCRIPTION", below. If the SLAP Triad format is chosen
+C it is changed internally to the SLAP Column format.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C NSAVE :IN Integer.
+C Number of direction vectors to save and orthogonalize against.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :OUT Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Return error flag.
+C IERR = 0 => All went well.
+C IERR = 1 => Insufficient storage allocated
+C for WORK or IWORK.
+C IERR = 2 => Method failed to converge in
+C ITMAX steps.
+C IERR = 3 => Error in user input. Check input
+C value of N, ITOL.
+C IERR = 4 => User error tolerance set too tight.
+C Reset to 500.0*D1MACH(3). Iteration proceeded.
+C IERR = 5 => Preconditioning matrix, M, is not
+C Positive Definite. $(r,z) < 0.0$.
+C IERR = 6 => Breakdown of the method detected.
+C $(p,Ap) < epsilon**2$.
+C IERR = 7 => Incomplete factorization broke down
+C and was fudged. Resulting preconditioning may
+C be less than the best.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C RWORK :WORK Double Precision RWORK(LENW).
+C Double Precision array used for workspace. NL is the
+C number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal). NU is the number of nonzeros in the upper
+C triangle of the matrix (including the diagonal).
+C LENW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW >= NL+NU+4*N+NSAVE*(3*N+1)
+C IWORK :WORK Integer IWORK(LENIW)
+C Integer array used for workspace. NL is the number of non-
+C zeros in the lower triangle of the matrix (including the
+C diagonal). NU is the number of nonzeros in the upper
+C triangle of the matrix (including the diagonal).
+C Upon return the following locations of IWORK hold information
+C which may be of use to the user:
+C IWORK(9) Amount of Integer workspace actually used.
+C IWORK(10) Amount of Double Precision workspace actually used.
+C LENIW :IN Integer.
+C Length of the double precision workspace, RWORK.
+C LENW > NL+NU+4*N+12.
+C
+C *Description:
+C This routine is simply a driver for the DOMN routine. It
+C calls the DSILUS routine to set up the preconditioning and
+C then calls DOMN with the appropriate MATVEC and MSOLVE
+C routines.
+C
+C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
+C data structures: 1) the SLAP Triad format or 2) the SLAP
+C Column format. The user can hand this routine either of the
+C of these data structures and SLAP will figure out which on
+C is being used and act accordingly.
+C
+C =================== S L A P Triad format ===================
+C
+C This routine requires that the matrix A be stored in the
+C SLAP Triad format. In this format only the non-zeros are
+C stored. They may appear in *ANY* order. The user supplies
+C three arrays of length NELT, where NELT is the number of
+C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
+C each non-zero the user puts the row and column index of that
+C matrix element in the IA and JA arrays. The value of the
+C non-zero matrix element is placed in the corresponding
+C location of the A array. This is an extremely easy data
+C structure to generate. On the other hand it is not too
+C efficient on vector computers for the iterative solution of
+C linear systems. Hence, SLAP changes this input data
+C structure to the SLAP Column format for the iteration (but
+C does not change it back).
+C
+C Here is an example of the SLAP Triad storage format for a
+C 5x5 Matrix. Recall that the entries may appear in any order.
+C
+C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
+C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
+C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C =================== S L A P Column format ==================
+C This routine requires that the matrix A be stored in the
+C SLAP Column format. In this format the non-zeros are stored
+C counting down columns (except for the diagonal entry, which
+C must appear first in each "column") and are stored in the
+C double precision array A. In other words, for each column
+C in the matrix put the diagonal entry in A. Then put in the
+C other non-zero elements going down the column (except the
+C diagonal) in order. The IA array holds the row index for
+C each non-zero. The JA array holds the offsets into the IA,
+C A arrays for the beginning of each column. That is,
+C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
+C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
+C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
+C Note that we always have JA(N+1) = NELT+1, where N is the
+C number of columns in the matrix and NELT is the number of
+C non-zeros in the matrix.
+C
+C Here is an example of the SLAP Column storage format for a
+C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
+C column):
+C
+C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
+C 1 2 3 4 5 6 7 8 9 10 11
+C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
+C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
+C | 0 0 33 0 35| JA: 1 4 6 8 9 12
+C | 0 0 0 44 0|
+C |51 0 53 0 55|
+C
+C *Precision: Double Precision
+C *Side Effects:
+C The SLAP Triad format (IA, JA, A) is modified internally to
+C be the SLAP Column format. See the "LONG DESCRIPTION",
+C below.
+C
+C *See Also:
+C DOMN, DSDOMN
+C***REFERENCES (NONE)
+C***ROUTINES CALLED DS2Y, DCHKW, DSILUS, DOMN, DSMV, DSLUI
+C***END PROLOGUE DSLUOM
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL, ITMAX
+ INTEGER ITER, IERR, IUNIT, LENW, IWORK(LENIW), LENIW
+ DOUBLE PRECISION B(N), X(N), A(N), RWORK(LENW)
+ EXTERNAL DSMV, DSLUI
+ PARAMETER (LOCRB=1, LOCIB=11)
+C
+C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
+C***FIRST EXECUTABLE STATEMENT DSLUOM
+ IERR = 0
+ IF( N.LT.1 .OR. NELT.LT.1 ) THEN
+ IERR = 3
+ RETURN
+ ENDIF
+ CALL DS2Y( N, NELT, IA, JA, A, ISYM )
+C
+C Count number of Non-Zero elements preconditioner ILU matrix.
+C Then set up the work arrays.
+ NL = 0
+ NU = 0
+ DO 20 ICOL = 1, N
+C Don't count diagonal.
+ JBGN = JA(ICOL)+1
+ JEND = JA(ICOL+1)-1
+ IF( JBGN.LE.JEND ) THEN
+CVD$ NOVECTOR
+ DO 10 J = JBGN, JEND
+ IF( IA(J).GT.ICOL ) THEN
+ NL = NL + 1
+ IF( ISYM.NE.0 ) NU = NU + 1
+ ELSE
+ NU = NU + 1
+ ENDIF
+ 10 CONTINUE
+ ENDIF
+ 20 CONTINUE
+C
+ LOCIL = LOCIB
+ LOCJL = LOCIL + N+1
+ LOCIU = LOCJL + NL
+ LOCJU = LOCIU + NU
+ LOCNR = LOCJU + N+1
+ LOCNC = LOCNR + N
+ LOCIW = LOCNC + N
+C
+ LOCL = LOCRB
+ LOCDIN = LOCL + NL
+ LOCU = LOCDIN + N
+ LOCR = LOCU + NU
+ LOCZ = LOCR + N
+ LOCP = LOCZ + N
+ LOCAP = LOCP + N*(NSAVE+1)
+ LOCEMA = LOCAP + N*(NSAVE+1)
+ LOCDZ = LOCEMA + N*(NSAVE+1)
+ LOCCSA = LOCDZ + N
+ LOCW = LOCCSA + NSAVE
+C
+C Check the workspace allocations.
+ CALL DCHKW( 'DSLUOM', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
+ IF( IERR.NE.0 ) RETURN
+C
+ IWORK(1) = LOCIL
+ IWORK(2) = LOCJL
+ IWORK(3) = LOCIU
+ IWORK(4) = LOCJU
+ IWORK(5) = LOCL
+ IWORK(6) = LOCDIN
+ IWORK(7) = LOCU
+ IWORK(9) = LOCIW
+ IWORK(10) = LOCW
+C
+C Compute the Incomplete LU decomposition.
+ CALL DSILUS( N, NELT, IA, JA, A, ISYM, NL, IWORK(LOCIL),
+ $ IWORK(LOCJL), RWORK(LOCL), RWORK(LOCDIN), NU, IWORK(LOCIU),
+ $ IWORK(LOCJU), RWORK(LOCU), IWORK(LOCNR), IWORK(LOCNC) )
+C
+C Perform the incomplete LU preconditioned OrthoMin algorithm.
+ CALL DOMN(N, B, X, NELT, IA, JA, A, ISYM, DSMV,
+ $ DSLUI, NSAVE, ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ RWORK(LOCR), RWORK(LOCZ), RWORK(LOCP), RWORK(LOCAP),
+ $ RWORK(LOCEMA), RWORK(LOCDZ), RWORK(LOCCSA),
+ $ RWORK, IWORK )
+ RETURN
+ END
+*DECK ISDOMN
+ FUNCTION ISDOMN(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, NSAVE,
+ $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
+ $ R, Z, P, AP, EMAP, DZ, CSAV,
+ $ RWORK, IWORK, AK, BNRM, SOLNRM)
+C***BEGIN PROLOGUE ISDOMN
+C***REFER TO DOMN, DSDOMN, DSLUOM
+C***DATE WRITTEN 890404 (YYMMDD)
+C***REVISION DATE 890404 (YYMMDD)
+C***CATEGORY NO. D2A4
+C***KEYWORDS LIBRARY=SLATEC(SLAP),
+C TYPE=DOUBLE PRECISION(ISDOMN-D),
+C Non-Symmetric Linear system, Sparse,
+C Iterative Precondition, Stop Test, Orthomin
+C***AUTHOR Greenbaum, Anne, Courant Institute
+C Seager, Mark K., (LLNL)
+C Lawrence Livermore National Laboratory
+C PO BOX 808, L-300
+C Livermore, CA 94550 (415) 423-3141
+C seager@lll-crg.llnl.gov
+C***PURPOSE Preconditioned Orthomin Sparse Stop Test.
+C This routine calculates the stop test for the Orthomin
+C iteration scheme. It returns a nonzero if the error
+C estimate (the type of which is determined by ITOL) is
+C less than the user specified tolerance TOL.
+C***DESCRIPTION
+C *Usage:
+C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL, ITMAX
+C INTEGER ITER, IERR, IUNIT, IWORK(USER DEFINED)
+C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N)
+C DOUBLE PRECISION P(N,0:NSAVE), AP(N,0:NSAVE), EMAP(N,0:NSAVE)
+C DOUBLE PRECISION DZ(N), CSAV(NSAVE), RWORK(USER DEFINED), AK
+C DOUBLE PRECISION BNRM, SOLNRM
+C EXTERNAL MSOLVE
+C
+C IF( ISDOMN(N, B, X, NELT, IA, JA, A, ISYM, MSOLVE, NSAVE,
+C $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, R, Z, P, AP,
+C $ EMAP, DZ, CSAV, RWORK, IWORK, AK, BNRM, SOLNRM)
+C $ .NE.0 ) THEN ITERATION CONVERGED
+C
+C *Arguments:
+C N :IN Integer.
+C Order of the matrix.
+C B :IN Double Precision B(N).
+C Right-hand side vector.
+C X :IN Double Precision X(N).
+C On input X is your initial guess for solution vector.
+C On output X is the final approximate solution.
+C NELT :IN Integer.
+C Number of Non-Zeros stored in A.
+C IA :IN Integer IA(NELT).
+C JA :IN Integer JA(NELT).
+C A :IN Double Precision A(NELT).
+C These arrays should hold the matrix A in either the SLAP
+C Triad format or the SLAP Column format. See "LONG
+C DESCRIPTION" in the DSDOMN or DSLUOM.
+C ISYM :IN Integer.
+C Flag to indicate symmetric storage format.
+C If ISYM=0, all nonzero entries of the matrix are stored.
+C If ISYM=1, the matrix is symmetric, and only the upper
+C or lower triangle of the matrix is stored.
+C MSOLVE :EXT External.
+C Name of a routine which solves a linear system MZ = R for
+C Z given R with the preconditioning matrix M (M is supplied via
+C RWORK and IWORK arrays). The name of the MSOLVE routine must
+C be declared external in the calling program. The calling
+C sequence to MSOLVE is:
+C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+C Where N is the number of unknowns, R is the right-hand side
+C vector, and Z is the solution upon return. RWORK is a
+C double precision
+C array that can be used to pass necessary preconditioning
+C information and/or workspace to MSOLVE. IWORK is an integer
+C work array for the same purpose as RWORK.
+C NSAVE :IN Integer.
+C Number of direction vectors to save and orthogonalize against.
+C ITOL :IN Integer.
+C Flag to indicate type of convergence criterion.
+C If ITOL=1, iteration stops when the 2-norm of the residual
+C divided by the 2-norm of the right-hand side is less than TOL.
+C If ITOL=2, iteration stops when the 2-norm of M-inv times the
+C residual divided by the 2-norm of M-inv times the right hand
+C side is less than TOL, where M-inv is the inverse of the
+C diagonal of A.
+C ITOL=11 is often useful for checking and comparing different
+C routines. For this case, the user must supply the "exact"
+C solution or a very accurate approximation (one with an error
+C much less than TOL) through a common block,
+C COMMON /SOLBLK/ SOLN(1)
+C if ITOL=11, iteration stops when the 2-norm of the difference
+C between the iterative approximation and the user-supplied
+C solution divided by the 2-norm of the user-supplied solution
+C is less than TOL. Note that this requires the user to set up
+C the "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling routine.
+C The routine with this declaration should be loaded before the
+C stop test so that the correct length is used by the loader.
+C This procedure is not standard Fortran and may not work
+C correctly on your system (although it has worked on every
+C system the authors have tried). If ITOL is not 11 then this
+C common block is indeed standard Fortran.
+C TOL :IN Double Precision.
+C Convergence criterion, as described above.
+C ITMAX :IN Integer.
+C Maximum number of iterations.
+C ITER :IN Integer.
+C Number of iterations required to reach convergence, or
+C ITMAX+1 if convergence criterion could not be achieved in
+C ITMAX iterations.
+C ERR :OUT Double Precision.
+C Error estimate of error in final approximate solution, as
+C defined by ITOL.
+C IERR :OUT Integer.
+C Error flag. IERR is set to 3 if ITOL is not on of the
+C acceptable values, see above.
+C IUNIT :IN Integer.
+C Unit number on which to write the error at each iteration,
+C if this is desired for monitoring convergence. If unit
+C number is 0, no writing will occur.
+C R :IN Double Precision R(N).
+C The residual R = B-AX.
+C Z :WORK Double Precision Z(N).
+C P :IN Double Precision P(N,0:NSAVE).
+C Workspace used to hold the conjugate direction vector(s).
+C AP :IN Double Precision AP(N,0:NSAVE).
+C Workspace used to hold the matrix A times the P vector(s).
+C EMAP :IN Double Precision EMAP(N,0:NSAVE).
+C Workspace used to hold M-inv times the AP vector(s).
+C DZ :WORK Double Precision DZ(N).
+C Workspace.
+C CSAV :DUMMY Double Precision CSAV(NSAVE)
+C Reserved for future use.
+C RWORK :WORK Double Precision RWORK(USER DEFINED).
+C Double Precision array that can be used for workspace in
+C MSOLVE.
+C IWORK :WORK Integer IWORK(USER DEFINED).
+C Integer array that can be used for workspace in MSOLVE.
+C AK :IN Double Precision.
+C Current iterate BiConjugate Gradient iteration parameter.
+C
+C *Function Return Values:
+C 0 : Error estimate (determined by ITOL) is *NOT* less than the
+C specified tolerance, TOL. The iteration must continue.
+C 1 : Error estimate (determined by ITOL) is less than the
+C specified tolerance, TOL. The iteration can be considered
+C complete.
+C
+C *Precision: Double Precision
+C *See Also:
+C DOMN, DSDOMN, DSLUOM
+C
+C *Cautions:
+C This routine will attempt to write to the fortran logical output
+C unit IUNIT, if IUNIT .ne. 0. Thus, the user must make sure that
+C this logical unit must be attached to a file or terminal
+C before calling this routine with a non-zero value for IUNIT.
+C This routine does not check for the validity of a non-zero IUNIT
+C unit number.
+C***REFERENCES (NONE)
+C***ROUTINES CALLED MSOLVE, DNRM2
+C***COMMON BLOCKS SOLBLK
+C***END PROLOGUE ISDOMN
+ IMPLICIT DOUBLE PRECISION(A-H,O-Z)
+ INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL, ITMAX
+ INTEGER ITER, IUNIT, IWORK(*)
+ DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, R(N), Z(N)
+ DOUBLE PRECISION P(N,0:NSAVE), AP(N,0:NSAVE), EMAP(N,0:NSAVE)
+ DOUBLE PRECISION DZ(N), CSAV(NSAVE), RWORK(*)
+ EXTERNAL MSOLVE
+ COMMON /SOLBLK/ SOLN(1)
+C
+C***FIRST EXECUTABLE STATEMENT ISDOMN
+ ISDOMN = 0
+C
+ IF( ITOL.EQ.1 ) THEN
+C err = ||Residual||/||RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) BNRM = DNRM2(N, B, 1)
+ ERR = DNRM2(N, R, 1)/BNRM
+ ELSE IF( ITOL.EQ.2 ) THEN
+C -1 -1
+C err = ||M Residual||/||M RightHandSide|| (2-Norms).
+ IF(ITER .EQ. 0) THEN
+ CALL MSOLVE(N, B, DZ, NELT, IA, JA, A, ISYM, RWORK, IWORK)
+ BNRM = DNRM2(N, DZ, 1)
+ ENDIF
+ ERR = DNRM2(N, Z, 1)/BNRM
+ ELSE IF( ITOL.EQ.11 ) THEN
+C err = ||x-TrueSolution||/||TrueSolution|| (2-Norms).
+ IF(ITER .EQ. 0) SOLNRM = DNRM2(N, SOLN, 1)
+ DO 10 I = 1, N
+ DZ(I) = X(I) - SOLN(I)
+ 10 CONTINUE
+ ERR = DNRM2(N, DZ, 1)/SOLNRM
+ ELSE
+C
+C If we get here ITOL is not one of the acceptable values.
+ ERR = 1.0E10
+ IERR = 3
+ ENDIF
+C
+ IF(IUNIT .NE. 0) THEN
+ IF( ITER.EQ.0 ) THEN
+ WRITE(IUNIT,1000) NSAVE, N, ITOL
+ ENDIF
+ WRITE(IUNIT,1010) ITER, ERR, AK
+ ENDIF
+ IF(ERR .LE. TOL) ISDOMN = 1
+C
+ RETURN
+ 1000 FORMAT(' Preconditioned Orthomin(',I3,') for ',
+ $ 'N, ITOL = ',I5, I5,
+ $ /' ITER',' Error Estimate',' Alpha')
+ 1010 FORMAT(1X,I4,1X,E16.7,1X,E16.7)
+C------------- LAST LINE OF ISDOMN FOLLOWS ----------------------------
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/mach.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/mach.f
new file mode 100644
index 0000000000..089d496b6f
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/mach.f
@@ -0,0 +1,1135 @@
+ REAL FUNCTION R1MACH(I)
+C
+C SINGLE-PRECISION MACHINE CONSTANTS
+C
+C R1MACH(1) = B**(EMIN-1), THE SMALLEST POSITIVE MAGNITUDE.
+C
+C R1MACH(2) = B**EMAX*(1 - B**(-T)), THE LARGEST MAGNITUDE.
+C
+C R1MACH(3) = B**(-T), THE SMALLEST RELATIVE SPACING.
+C
+C R1MACH(4) = B**(1-T), THE LARGEST RELATIVE SPACING.
+C
+C R1MACH(5) = LOG10(B)
+C
+C TO ALTER THIS FUNCTION FOR A PARTICULAR ENVIRONMENT,
+C THE DESIRED SET OF DATA STATEMENTS SHOULD BE ACTIVATED BY
+C REMOVING THE C FROM COLUMN 1.
+C ON RARE MACHINES A STATIC STATEMENT MAY NEED TO BE ADDED.
+C (BUT PROBABLY MORE SYSTEMS PROHIBIT IT THAN REQUIRE IT.)
+C
+C WHERE POSSIBLE, OCTAL OR HEXADECIMAL CONSTANTS HAVE BEEN USED
+C TO SPECIFY THE CONSTANTS EXACTLY WHICH HAS IN SOME CASES
+C REQUIRED THE USE OF EQUIVALENT INTEGER ARRAYS.
+C
+ INTEGER SMALL(2)
+ INTEGER LARGE(2)
+ INTEGER RIGHT(2)
+ INTEGER DIVER(2)
+ INTEGER LOG10(2)
+C
+ REAL RMACH(5)
+C
+ EQUIVALENCE (RMACH(1),SMALL(1))
+ EQUIVALENCE (RMACH(2),LARGE(1))
+ EQUIVALENCE (RMACH(3),RIGHT(1))
+ EQUIVALENCE (RMACH(4),DIVER(1))
+ EQUIVALENCE (RMACH(5),LOG10(1))
+C
+C MACHINE CONSTANTS FOR THE BURROUGHS 1700 SYSTEM.
+C
+C DATA RMACH(1) / Z400800000 /
+C DATA RMACH(2) / Z5FFFFFFFF /
+C DATA RMACH(3) / Z4E9800000 /
+C DATA RMACH(4) / Z4EA800000 /
+C DATA RMACH(5) / Z500E730E8 /
+C
+C MACHINE CONSTANTS FOR THE BURROUGHS 5700/6700/7700 SYSTEMS.
+C
+C DATA RMACH(1) / O1771000000000000 /
+C DATA RMACH(2) / O0777777777777777 /
+C DATA RMACH(3) / O1311000000000000 /
+C DATA RMACH(4) / O1301000000000000 /
+C DATA RMACH(5) / O1157163034761675 /
+C
+C MACHINE CONSTANTS FOR THE CDC 6000/7000 SERIES.
+C
+C DATA RMACH(1) / 00014000000000000000B /
+C DATA RMACH(2) / 37767777777777777777B /
+C DATA RMACH(3) / 16404000000000000000B /
+C DATA RMACH(4) / 16414000000000000000B /
+C DATA RMACH(5) / 17164642023241175720B /
+C
+C MACHINE CONSTANTS FOR CONVEX C-1
+C
+C DATA RMACH(1) / '00800000'X /
+C DATA RMACH(2) / '7FFFFFFF'X /
+C DATA RMACH(3) / '34800000'X /
+C DATA RMACH(4) / '35000000'X /
+C DATA RMACH(5) / '3F9A209B'X /
+C
+C MACHINE CONSTANTS FOR THE CRAY 1
+C
+C DATA RMACH(1) / 200034000000000000000B /
+C DATA RMACH(2) / 577767777777777777776B /
+C DATA RMACH(3) / 377224000000000000000B /
+C DATA RMACH(4) / 377234000000000000000B /
+C DATA RMACH(5) / 377774642023241175720B /
+C
+C MACHINE CONSTANTS FOR THE DATA GENERAL ECLIPSE S/200
+C
+C NOTE - IT MAY BE APPROPRIATE TO INCLUDE THE FOLLOWING CARD -
+C STATIC RMACH(5)
+C
+C DATA SMALL/20K,0/,LARGE/77777K,177777K/
+C DATA RIGHT/35420K,0/,DIVER/36020K,0/
+C DATA LOG10/40423K,42023K/
+C
+C MACHINE CONSTANTS FOR THE HARRIS SLASH 6 AND SLASH 7
+C
+C DATA SMALL(1),SMALL(2) / '20000000, '00000201 /
+C DATA LARGE(1),LARGE(2) / '37777777, '00000177 /
+C DATA RIGHT(1),RIGHT(2) / '20000000, '00000352 /
+C DATA DIVER(1),DIVER(2) / '20000000, '00000353 /
+C DATA LOG10(1),LOG10(2) / '23210115, '00000377 /
+C
+C MACHINE CONSTANTS FOR THE HONEYWELL DPS 8/70 SERIES.
+C
+C DATA RMACH(1) / O402400000000 /
+C DATA RMACH(2) / O376777777777 /
+C DATA RMACH(3) / O714400000000 /
+C DATA RMACH(4) / O716400000000 /
+C DATA RMACH(5) / O776464202324 /
+C
+C MACHINE CONSTANTS FOR AT&T 3B SERIES MACHINES.
+C
+C DATA SMALL(1) / 8388608 /
+C DATA LARGE(1) / 2139095039 /
+C DATA RIGHT(1) / 864026624 /
+C DATA DIVER(1) / 872415232 /
+C DATA LOG10(1) / 1050288283 /
+C
+C MACHINE CONSTANTS FOR THE IBM PC AND OTHER 8087-ARITHMETIC MICROS
+C
+C DATA SMALL(1) / 8388608 /
+C DATA LARGE(1) / 2139095039 /
+C DATA RIGHT(1) / 864026624 /
+C DATA DIVER(1) / 872415232 /
+C DATA LOG10(1) / 1050288283 /
+C
+C MACHINE CONSTANTS FOR THE IBM 360/370 SERIES,
+C THE XEROX SIGMA 5/7/9 AND THE SEL SYSTEMS 85/86.
+C
+C DATA RMACH(1) / Z00100000 /
+C DATA RMACH(2) / Z7FFFFFFF /
+C DATA RMACH(3) / Z3B100000 /
+C DATA RMACH(4) / Z3C100000 /
+C DATA RMACH(5) / Z41134413 /
+C
+C MACHINE CONSTANTS FOR THE INTERDATA 8/32
+C WITH THE UNIX SYSTEM FORTRAN 77 COMPILER.
+C
+C FOR THE INTERDATA FORTRAN VII COMPILER REPLACE
+C THE Z'S SPECIFYING HEX CONSTANTS WITH Y'S.
+C
+C DATA RMACH(1) / Z'00100000' /
+C DATA RMACH(2) / Z'7EFFFFFF' /
+C DATA RMACH(3) / Z'3B100000' /
+C DATA RMACH(4) / Z'3C100000' /
+C DATA RMACH(5) / Z'41134413' /
+C
+C MACHINE CONSTANTS FOR THE PDP-10 (KA OR KI PROCESSOR).
+C
+C DATA RMACH(1) / "000400000000 /
+C DATA RMACH(2) / "377777777777 /
+C DATA RMACH(3) / "146400000000 /
+C DATA RMACH(4) / "147400000000 /
+C DATA RMACH(5) / "177464202324 /
+C
+C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING
+C 32-BIT INTEGERS (EXPRESSED IN INTEGER AND OCTAL).
+C
+C DATA SMALL(1) / 8388608 /
+C DATA LARGE(1) / 2147483647 /
+C DATA RIGHT(1) / 880803840 /
+C DATA DIVER(1) / 889192448 /
+C DATA LOG10(1) / 1067065499 /
+C
+C DATA RMACH(1) / O00040000000 /
+C DATA RMACH(2) / O17777777777 /
+C DATA RMACH(3) / O06440000000 /
+C DATA RMACH(4) / O06500000000 /
+C DATA RMACH(5) / O07746420233 /
+C
+C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING
+C 16-BIT INTEGERS (EXPRESSED IN INTEGER AND OCTAL).
+C
+C DATA SMALL(1),SMALL(2) / 128, 0 /
+C DATA LARGE(1),LARGE(2) / 32767, -1 /
+C DATA RIGHT(1),RIGHT(2) / 13440, 0 /
+C DATA DIVER(1),DIVER(2) / 13568, 0 /
+C DATA LOG10(1),LOG10(2) / 16282, 8347 /
+C
+C DATA SMALL(1),SMALL(2) / O000200, O000000 /
+C DATA LARGE(1),LARGE(2) / O077777, O177777 /
+C DATA RIGHT(1),RIGHT(2) / O032200, O000000 /
+C DATA DIVER(1),DIVER(2) / O032400, O000000 /
+C DATA LOG10(1),LOG10(2) / O037632, O020233 /
+C
+C MACHINE CONSTANTS FOR THE SUN MICROSYSTEMS UNIX F77 COMPILER.
+C
+ DATA RMACH(1) / 1.17549435E-38 /
+ DATA RMACH(2) / 3.40282347E+38 /
+ DATA RMACH(3) / 5.96016605E-08 /
+ DATA RMACH(4) / 1.19203321E-07 /
+ DATA RMACH(5) / 3.01030010E-01 /
+C
+C MACHINE CONSTANTS FOR THE Alliant FX/8 UNIX Fortran COMPILER
+C WITH THE -r8 COMMAND LINE OPTION. This option causes all variables
+c declared with 'real' to be of type 'real*8' or double precision.
+c This option does not override the 'real*4' declarations. These
+c R1MACH numbers below and the coresponding I1MACH are simply the double
+c precision or 'real*8' numbers. If you use the -r8 your whole code
+c (and the user libraries you link with, the system libraries are taken
+c care of automagicly) must be compiled with this option.
+C
+c$$$ DATA RMACH(1) / 2.22507385850721D-308 /
+c$$$ DATA RMACH(2) / 1.79769313486231D+308 /
+c$$$ DATA RMACH(3) / 1.1101827117665D-16 /
+c$$$ DATA RMACH(4) / 2.2203654423533D-16 /
+c$$$ DATA RMACH(5) / 3.01029995663981E-1 /
+C
+C MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES.
+C
+C DATA RMACH(1) / O000400000000 /
+C DATA RMACH(2) / O377777777777 /
+C DATA RMACH(3) / O146400000000 /
+C DATA RMACH(4) / O147400000000 /
+C DATA RMACH(5) / O177464202324 /
+C
+C MACHINE CONSTANTS FOR THE VAX UNIX F77 COMPILER
+C
+C DATA SMALL(1) / 128 /
+C DATA LARGE(1) / -32769 /
+C DATA RIGHT(1) / 13440 /
+C DATA DIVER(1) / 13568 /
+C DATA LOG10(1) / 547045274 /
+C
+C MACHINE CONSTANTS FOR THE VAX-11 WITH
+C FORTRAN IV-PLUS COMPILER
+C
+C DATA RMACH(1) / Z00000080 /
+C DATA RMACH(2) / ZFFFF7FFF /
+C DATA RMACH(3) / Z00003480 /
+C DATA RMACH(4) / Z00003500 /
+C DATA RMACH(5) / Z209B3F9A /
+C
+C MACHINE CONSTANTS FOR VAX/VMS VERSION 2.2
+C
+C DATA RMACH(1) / '80'X /
+C DATA RMACH(2) / 'FFFF7FFF'X /
+C DATA RMACH(3) / '3480'X /
+C DATA RMACH(4) / '3500'X /
+C DATA RMACH(5) / '209B3F9A'X /
+C
+C MACHINE CONSTANTS FOR THE SEQUENT BALANCE 8000 AND SVS FORTRAN ON
+C THE AT&T 7300 (UNIX PC)
+C
+C DATA SMALL(1) / $00800000 /
+C DATA LARGE(1) / $7F7FFFFF /
+C DATA RIGHT(1) / $33800000 /
+C DATA DIVER(1) / $34000000 /
+C DATA LOG10(1) / $3E9A209B /
+C
+C MACHINE CONSTANTS FOR RM FORTRAN (ON THE AT&T 7300)
+C
+C DATA RMACH(1) / Z'00800000' /
+C DATA RMACH(2) / Z'7F7FFFFF' /
+C DATA RMACH(3) / Z'33800000' /
+C DATA RMACH(4) / Z'34000000' /
+C DATA RMACH(5) / Z'3E9A209B' /
+C
+C
+ IF (I .LT. 1 .OR. I .GT. 5) GOTO 999
+ R1MACH = RMACH(I)
+ RETURN
+ 999 WRITE(I1MACH(2),1999) I
+ 1999 FORMAT(' R1MACH - I OUT OF BOUNDS',I10)
+ STOP
+ END
+ DOUBLE PRECISION FUNCTION D1MACH(I)
+C
+C DOUBLE-PRECISION MACHINE CONSTANTS
+C
+C D1MACH( 1) = B**(EMIN-1), THE SMALLEST POSITIVE MAGNITUDE.
+C
+C D1MACH( 2) = B**EMAX*(1 - B**(-T)), THE LARGEST MAGNITUDE.
+C
+C D1MACH( 3) = B**(-T), THE SMALLEST RELATIVE SPACING.
+C
+C D1MACH( 4) = B**(1-T), THE LARGEST RELATIVE SPACING.
+C
+C D1MACH( 5) = LOG10(B)
+C
+C TO ALTER THIS FUNCTION FOR A PARTICULAR ENVIRONMENT,
+C THE DESIRED SET OF DATA STATEMENTS SHOULD BE ACTIVATED BY
+C REMOVING THE C FROM COLUMN 1.
+C ON RARE MACHINES A STATIC STATEMENT MAY NEED TO BE ADDED.
+C (BUT PROBABLY MORE SYSTEMS PROHIBIT IT THAN REQUIRE IT.)
+C
+C WHERE POSSIBLE, OCTAL OR HEXADECIMAL CONSTANTS HAVE BEEN USED
+C TO SPECIFY THE CONSTANTS EXACTLY WHICH HAS IN SOME CASES
+C REQUIRED THE USE OF EQUIVALENT INTEGER ARRAYS.
+C
+CWJS (1-2-13): Using machine-independent functions to set DMACH, rather than hard-coding it
+CWJS (Note that this uses f90 features)
+CWJS INTEGER SMALL(4)
+CWJS INTEGER LARGE(4)
+CWJS INTEGER RIGHT(4)
+CWJS INTEGER DIVER(4)
+CWJS INTEGER LOG10(4)
+C
+CWJS DOUBLE PRECISION DMACH(5)
+ DOUBLE PRECISION, PARAMETER :: DMACH(5) =
+ & (/ tiny(1.d0),
+ & huge(1.d0),
+ & epsilon(1.d0)/2.d0,
+ & epsilon(1.d0),
+ & log10(2.d0) /)
+C
+CWJS EQUIVALENCE (DMACH(1),SMALL(1))
+CWJS EQUIVALENCE (DMACH(2),LARGE(1))
+CWJS EQUIVALENCE (DMACH(3),RIGHT(1))
+CWJS EQUIVALENCE (DMACH(4),DIVER(1))
+CWJS EQUIVALENCE (DMACH(5),LOG10(1))
+C
+C MACHINE CONSTANTS FOR THE BURROUGHS 1700 SYSTEM.
+C
+C DATA SMALL(1) / ZC00800000 /
+C DATA SMALL(2) / Z000000000 /
+C
+C DATA LARGE(1) / ZDFFFFFFFF /
+C DATA LARGE(2) / ZFFFFFFFFF /
+C
+C DATA RIGHT(1) / ZCC5800000 /
+C DATA RIGHT(2) / Z000000000 /
+C
+C DATA DIVER(1) / ZCC6800000 /
+C DATA DIVER(2) / Z000000000 /
+C
+C DATA LOG10(1) / ZD00E730E7 /
+C DATA LOG10(2) / ZC77800DC0 /
+C
+C MACHINE CONSTANTS FOR THE BURROUGHS 5700 SYSTEM.
+C
+C DATA SMALL(1) / O1771000000000000 /
+C DATA SMALL(2) / O0000000000000000 /
+C
+C DATA LARGE(1) / O0777777777777777 /
+C DATA LARGE(2) / O0007777777777777 /
+C
+C DATA RIGHT(1) / O1461000000000000 /
+C DATA RIGHT(2) / O0000000000000000 /
+C
+C DATA DIVER(1) / O1451000000000000 /
+C DATA DIVER(2) / O0000000000000000 /
+C
+C DATA LOG10(1) / O1157163034761674 /
+C DATA LOG10(2) / O0006677466732724 /
+C
+C MACHINE CONSTANTS FOR THE BURROUGHS 6700/7700 SYSTEMS.
+C
+C DATA SMALL(1) / O1771000000000000 /
+C DATA SMALL(2) / O7770000000000000 /
+C
+C DATA LARGE(1) / O0777777777777777 /
+C DATA LARGE(2) / O7777777777777777 /
+C
+C DATA RIGHT(1) / O1461000000000000 /
+C DATA RIGHT(2) / O0000000000000000 /
+C
+C DATA DIVER(1) / O1451000000000000 /
+C DATA DIVER(2) / O0000000000000000 /
+C
+C DATA LOG10(1) / O1157163034761674 /
+C DATA LOG10(2) / O0006677466732724 /
+C
+C MACHINE CONSTANTS FOR THE CDC 6000/7000 SERIES.
+C
+C DATA SMALL(1) / 00604000000000000000B /
+C DATA SMALL(2) / 00000000000000000000B /
+C
+C DATA LARGE(1) / 37767777777777777777B /
+C DATA LARGE(2) / 37167777777777777777B /
+C
+C DATA RIGHT(1) / 15604000000000000000B /
+C DATA RIGHT(2) / 15000000000000000000B /
+C
+C DATA DIVER(1) / 15614000000000000000B /
+C DATA DIVER(2) / 15010000000000000000B /
+C
+C DATA LOG10(1) / 17164642023241175717B /
+C DATA LOG10(2) / 16367571421742254654B /
+C
+C MACHINE CONSTANTS FOR CONVEX C-1
+C
+C DATA SMALL(1),SMALL(2) / '00100000'X, '00000000'X /
+C DATA LARGE(1),LARGE(2) / '7FFFFFFF'X, 'FFFFFFFF'X /
+C DATA RIGHT(1),RIGHT(2) / '3CC00000'X, '00000000'X /
+C DATA DIVER(1),DIVER(2) / '3CD00000'X, '00000000'X /
+C DATA LOG10(1),LOG10(2) / '3FF34413'X, '509F79FF'X /
+C
+C MACHINE CONSTANTS FOR THE CRAY 1
+C
+C DATA SMALL(1) / 201354000000000000000B /
+C DATA SMALL(2) / 000000000000000000000B /
+C
+C DATA LARGE(1) / 577767777777777777777B /
+C DATA LARGE(2) / 000007777777777777776B /
+C
+C DATA RIGHT(1) / 376434000000000000000B /
+C DATA RIGHT(2) / 000000000000000000000B /
+C
+C DATA DIVER(1) / 376444000000000000000B /
+C DATA DIVER(2) / 000000000000000000000B /
+C
+C DATA LOG10(1) / 377774642023241175717B /
+C DATA LOG10(2) / 000007571421742254654B /
+C
+C MACHINE CONSTANTS FOR THE DATA GENERAL ECLIPSE S/200
+C
+C NOTE - IT MAY BE APPROPRIATE TO INCLUDE THE FOLLOWING CARD -
+C STATIC DMACH(5)
+C
+C DATA SMALL/20K,3*0/,LARGE/77777K,3*177777K/
+C DATA RIGHT/31420K,3*0/,DIVER/32020K,3*0/
+C DATA LOG10/40423K,42023K,50237K,74776K/
+C
+C MACHINE CONSTANTS FOR THE HARRIS SLASH 6 AND SLASH 7
+C
+C DATA SMALL(1),SMALL(2) / '20000000, '00000201 /
+C DATA LARGE(1),LARGE(2) / '37777777, '37777577 /
+C DATA RIGHT(1),RIGHT(2) / '20000000, '00000333 /
+C DATA DIVER(1),DIVER(2) / '20000000, '00000334 /
+C DATA LOG10(1),LOG10(2) / '23210115, '10237777 /
+C
+C MACHINE CONSTANTS FOR THE HONEYWELL DPS 8/70 SERIES.
+C
+C DATA SMALL(1),SMALL(2) / O402400000000, O000000000000 /
+C DATA LARGE(1),LARGE(2) / O376777777777, O777777777777 /
+C DATA RIGHT(1),RIGHT(2) / O604400000000, O000000000000 /
+C DATA DIVER(1),DIVER(2) / O606400000000, O000000000000 /
+C DATA LOG10(1),LOG10(2) / O776464202324, O117571775714 /
+C
+C MACHINE CONSTANTS FOR AT&T 3B SERIES MACHINES.
+C
+C DATA SMALL(1),SMALL(2) / 1048576, 0 /
+C DATA LARGE(1),LARGE(2) / 2146435071, -1 /
+C DATA RIGHT(1),RIGHT(2) / 1017118720, 0 /
+C DATA DIVER(1),DIVER(2) / 1018167296, 0 /
+C DATA LOG10(1),LOG10(2) / 1070810131, 1352628735 /
+C
+C MACHINE CONSTANTS FOR THE IBM PC AND OTHER 8087-ARITHMETIC MICROS
+C
+C DATA SMALL(1),SMALL(2) / 0, 1048576 /
+C DATA LARGE(1),LARGE(2) / -1, 2146435071 /
+C DATA RIGHT(1),RIGHT(2) / 0, 1017118720 /
+C DATA DIVER(1),DIVER(2) / 0, 1018167296 /
+C DATA LOG10(1),LOG10(2) / 1352628735, 1070810131 /
+C
+C MACHINE CONSTANTS FOR THE IBM 360/370 SERIES,
+C THE XEROX SIGMA 5/7/9 AND THE SEL SYSTEMS 85/86.
+C
+C DATA SMALL(1),SMALL(2) / Z00100000, Z00000000 /
+C DATA LARGE(1),LARGE(2) / Z7FFFFFFF, ZFFFFFFFF /
+C DATA RIGHT(1),RIGHT(2) / Z33100000, Z00000000 /
+C DATA DIVER(1),DIVER(2) / Z34100000, Z00000000 /
+C DATA LOG10(1),LOG10(2) / Z41134413, Z509F79FF /
+C
+C MACHINE CONSTANTS FOR THE INTERDATA 8/32
+C WITH THE UNIX SYSTEM FORTRAN 77 COMPILER.
+C
+C FOR THE INTERDATA FORTRAN VII COMPILER REPLACE
+C THE Z'S SPECIFYING HEX CONSTANTS WITH Y'S.
+C
+C DATA SMALL(1),SMALL(2) / Z'00100000', Z'00000000' /
+C DATA LARGE(1),LARGE(2) / Z'7EFFFFFF', Z'FFFFFFFF' /
+C DATA RIGHT(1),RIGHT(2) / Z'33100000', Z'00000000' /
+C DATA DIVER(1),DIVER(2) / Z'34100000', Z'00000000' /
+C DATA LOG10(1),LOG10(2) / Z'41134413', Z'509F79FF' /
+C
+C MACHINE CONSTANTS FOR THE PDP-10 (KA PROCESSOR).
+C
+C DATA SMALL(1),SMALL(2) / "033400000000, "000000000000 /
+C DATA LARGE(1),LARGE(2) / "377777777777, "344777777777 /
+C DATA RIGHT(1),RIGHT(2) / "113400000000, "000000000000 /
+C DATA DIVER(1),DIVER(2) / "114400000000, "000000000000 /
+C DATA LOG10(1),LOG10(2) / "177464202324, "144117571776 /
+C
+C MACHINE CONSTANTS FOR THE PDP-10 (KI PROCESSOR).
+C
+C DATA SMALL(1),SMALL(2) / "000400000000, "000000000000 /
+C DATA LARGE(1),LARGE(2) / "377777777777, "377777777777 /
+C DATA RIGHT(1),RIGHT(2) / "103400000000, "000000000000 /
+C DATA DIVER(1),DIVER(2) / "104400000000, "000000000000 /
+C DATA LOG10(1),LOG10(2) / "177464202324, "047674776746 /
+C
+C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING
+C 32-BIT INTEGERS (EXPRESSED IN INTEGER AND OCTAL).
+C
+C DATA SMALL(1),SMALL(2) / 8388608, 0 /
+C DATA LARGE(1),LARGE(2) / 2147483647, -1 /
+C DATA RIGHT(1),RIGHT(2) / 612368384, 0 /
+C DATA DIVER(1),DIVER(2) / 620756992, 0 /
+C DATA LOG10(1),LOG10(2) / 1067065498, -2063872008 /
+C
+C DATA SMALL(1),SMALL(2) / O00040000000, O00000000000 /
+C DATA LARGE(1),LARGE(2) / O17777777777, O37777777777 /
+C DATA RIGHT(1),RIGHT(2) / O04440000000, O00000000000 /
+C DATA DIVER(1),DIVER(2) / O04500000000, O00000000000 /
+C DATA LOG10(1),LOG10(2) / O07746420232, O20476747770 /
+C
+C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING
+C 16-BIT INTEGERS (EXPRESSED IN INTEGER AND OCTAL).
+C
+C DATA SMALL(1),SMALL(2) / 128, 0 /
+C DATA SMALL(3),SMALL(4) / 0, 0 /
+C
+C DATA LARGE(1),LARGE(2) / 32767, -1 /
+C DATA LARGE(3),LARGE(4) / -1, -1 /
+C
+C DATA RIGHT(1),RIGHT(2) / 9344, 0 /
+C DATA RIGHT(3),RIGHT(4) / 0, 0 /
+C
+C DATA DIVER(1),DIVER(2) / 9472, 0 /
+C DATA DIVER(3),DIVER(4) / 0, 0 /
+C
+C DATA LOG10(1),LOG10(2) / 16282, 8346 /
+C DATA LOG10(3),LOG10(4) / -31493, -12296 /
+C
+C DATA SMALL(1),SMALL(2) / O000200, O000000 /
+C DATA SMALL(3),SMALL(4) / O000000, O000000 /
+C
+C DATA LARGE(1),LARGE(2) / O077777, O177777 /
+C DATA LARGE(3),LARGE(4) / O177777, O177777 /
+C
+C DATA RIGHT(1),RIGHT(2) / O022200, O000000 /
+C DATA RIGHT(3),RIGHT(4) / O000000, O000000 /
+C
+C DATA DIVER(1),DIVER(2) / O022400, O000000 /
+C DATA DIVER(3),DIVER(4) / O000000, O000000 /
+C
+C DATA LOG10(1),LOG10(2) / O037632, O020232 /
+C DATA LOG10(3),LOG10(4) / O102373, O147770 /
+C
+C MACHINE CONSTANTS FOR THE SUN MICROSYSTEMS UNIX F77 COMPILER.
+C
+CWJS DATA DMACH(1) / 2.22507385850720D-308 /
+CWJS DATA DMACH(2) / 1.79769313486231D+308 /
+CWJS DATA DMACH(3) / 1.1101827117665D-16 /
+CWJS DATA DMACH(4) / 2.2203654423533D-16 /
+CWJS DATA DMACH(5) / 3.01029995663981E-1 /
+C
+C MACHINE CONSTANTS FOR THE ALLIANT FX/8 UNIX FORTRAN COMPILER.
+C
+c$$$ DATA DMACH(1) / 2.22507385850721D-308 /
+c$$$ DATA DMACH(2) / 1.79769313486231D+308 /
+c$$$ DATA DMACH(3) / 1.1101827117665D-16 /
+c$$$ DATA DMACH(4) / 2.2203654423533D-16 /
+c$$$ DATA DMACH(5) / 3.01029995663981E-1 /
+C
+C MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES.
+C
+C DATA SMALL(1),SMALL(2) / O000040000000, O000000000000 /
+C DATA LARGE(1),LARGE(2) / O377777777777, O777777777777 /
+C DATA RIGHT(1),RIGHT(2) / O170540000000, O000000000000 /
+C DATA DIVER(1),DIVER(2) / O170640000000, O000000000000 /
+C DATA LOG10(1),LOG10(2) / O177746420232, O411757177572 /
+C
+C MACHINE CONSTANTS FOR THE VAX UNIX F77 COMPILER
+C
+C DATA SMALL(1),SMALL(2) / 128, 0 /
+C DATA LARGE(1),LARGE(2) / -32769, -1 /
+C DATA RIGHT(1),RIGHT(2) / 9344, 0 /
+C DATA DIVER(1),DIVER(2) / 9472, 0 /
+C DATA LOG10(1),LOG10(2) / 546979738, -805796613 /
+C
+C MACHINE CONSTANTS FOR THE VAX-11 WITH
+C FORTRAN IV-PLUS COMPILER
+C
+C DATA SMALL(1),SMALL(2) / Z00000080, Z00000000 /
+C DATA LARGE(1),LARGE(2) / ZFFFF7FFF, ZFFFFFFFF /
+C DATA RIGHT(1),RIGHT(2) / Z00002480, Z00000000 /
+C DATA DIVER(1),DIVER(2) / Z00002500, Z00000000 /
+C DATA LOG10(1),LOG10(2) / Z209A3F9A, ZCFF884FB /
+C
+C MACHINE CONSTANTS FOR VAX/VMS VERSION 2.2
+C
+C DATA SMALL(1),SMALL(2) / '80'X, '0'X /
+C DATA LARGE(1),LARGE(2) / 'FFFF7FFF'X, 'FFFFFFFF'X /
+C DATA RIGHT(1),RIGHT(2) / '2480'X, '0'X /
+C DATA DIVER(1),DIVER(2) / '2500'X, '0'X /
+C DATA LOG10(1),LOG10(2) / '209A3F9A'X, 'CFF884FB'X /
+C
+C MACHINE CONSTANTS FOR THE SEQUENT BALANCE 8000
+C
+C DATA SMALL(1),SMALL(2) / $00000000, $00100000 /
+C DATA LARGE(1),LARGE(2) / $FFFFFFFF, $7FEFFFFF /
+C DATA RIGHT(1),RIGHT(2) / $00000000, $3CA00000 /
+C DATA DIVER(1),DIVER(2) / $00000000, $3CB00000 /
+C DATA LOG10(1),LOG10(2) / $509F79FF, $3FD34413 /
+C
+C MACHINE CONSTANTS FOR SVS FORTRAN ON THE AT&T 7300 (UNIX PC)
+C
+C DATA SMALL(1),SMALL(2) / $00100000, $00000000 /
+C DATA LARGE(1),LARGE(2) / $7FEFFFFF, $FFFFFFFF /
+C DATA RIGHT(1),RIGHT(2) / $3CA00000, $00000000 /
+C DATA DIVER(1),DIVER(2) / $3CB00000, $00000000 /
+C DATA LOG10(1),LOG10(2) / $3FD34413, $509F79FF /
+C
+C MACHINE CONSTANTS FOR THE RM FORTRAN ON THE AT&T 7300 (UNIX PC)
+C
+C DATA SMALL(1),SMALL(2) / Z'00100000', Z'00000000' /
+C DATA LARGE(1),LARGE(2) / Z'7FEFFFFF', Z'FFFFFFFF' /
+C DATA RIGHT(1),RIGHT(2) / Z'3CA00000', Z'00000000' /
+C DATA DIVER(1),DIVER(2) / Z'3CB00000', Z'00000000' /
+C DATA LOG10(1),LOG10(2) / Z'3FD34413', Z'509F79FF' /
+C
+ IF (I .LT. 1 .OR. I .GT. 5) GOTO 999
+ D1MACH = DMACH(I)
+ RETURN
+ 999 WRITE(I1MACH(2),1999) I
+ 1999 FORMAT(' D1MACH - I OUT OF BOUNDS',I10)
+ STOP
+ END
+ INTEGER FUNCTION I1MACH(I)
+C
+C I/O UNIT NUMBERS.
+C
+C I1MACH( 1) = THE STANDARD INPUT UNIT.
+C
+C I1MACH( 2) = THE STANDARD OUTPUT UNIT.
+C
+C I1MACH( 3) = THE STANDARD PUNCH UNIT.
+C
+C I1MACH( 4) = THE STANDARD ERROR MESSAGE UNIT.
+C
+C WORDS.
+C
+C I1MACH( 5) = THE NUMBER OF BITS PER INTEGER STORAGE UNIT.
+C
+C I1MACH( 6) = THE NUMBER OF CHARACTERS PER INTEGER STORAGE UNIT.
+C
+C INTEGERS.
+C
+C ASSUME INTEGERS ARE REPRESENTED IN THE S-DIGIT, BASE-A FORM
+C
+C SIGN ( X(S-1)*A**(S-1) + ... + X(1)*A + X(0) )
+C
+C WHERE 0 .LE. X(I) .LT. A FOR I=0,...,S-1.
+C
+C I1MACH( 7) = A, THE BASE.
+C
+C I1MACH( 8) = S, THE NUMBER OF BASE-A DIGITS.
+C
+C I1MACH( 9) = A**S - 1, THE LARGEST MAGNITUDE.
+C
+C FLOATING-POINT NUMBERS.
+C
+C ASSUME FLOATING-POINT NUMBERS ARE REPRESENTED IN THE T-DIGIT,
+C BASE-B FORM
+C
+C SIGN (B**E)*( (X(1)/B) + ... + (X(T)/B**T) )
+C
+C WHERE 0 .LE. X(I) .LT. B FOR I=1,...,T,
+C 0 .LT. X(1), AND EMIN .LE. E .LE. EMAX.
+C
+C I1MACH(10) = B, THE BASE.
+C
+C SINGLE-PRECISION
+C
+C I1MACH(11) = T, THE NUMBER OF BASE-B DIGITS.
+C
+C I1MACH(12) = EMIN, THE SMALLEST EXPONENT E.
+C
+C I1MACH(13) = EMAX, THE LARGEST EXPONENT E.
+C
+C DOUBLE-PRECISION
+C
+C I1MACH(14) = T, THE NUMBER OF BASE-B DIGITS.
+C
+C I1MACH(15) = EMIN, THE SMALLEST EXPONENT E.
+C
+C I1MACH(16) = EMAX, THE LARGEST EXPONENT E.
+C
+C TO ALTER THIS FUNCTION FOR A PARTICULAR ENVIRONMENT,
+C THE DESIRED SET OF DATA STATEMENTS SHOULD BE ACTIVATED BY
+C REMOVING THE C FROM COLUMN 1. ALSO, THE VALUES OF
+C I1MACH(1) - I1MACH(4) SHOULD BE CHECKED FOR CONSISTENCY
+C WITH THE LOCAL OPERATING SYSTEM.
+C ON RARE MACHINES A STATIC STATEMENT MAY NEED TO BE ADDED.
+C (BUT PROBABLY MORE SYSTEMS PROHIBIT IT THAN REQUIRE IT.)
+C
+ INTEGER IMACH(16),OUTPUT
+C
+ EQUIVALENCE (IMACH(4),OUTPUT)
+C
+C MACHINE CONSTANTS FOR THE BURROUGHS 1700 SYSTEM.
+C
+C DATA IMACH( 1) / 7 /
+C DATA IMACH( 2) / 2 /
+C DATA IMACH( 3) / 2 /
+C DATA IMACH( 4) / 2 /
+C DATA IMACH( 5) / 36 /
+C DATA IMACH( 6) / 4 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 33 /
+C DATA IMACH( 9) / Z1FFFFFFFF /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 24 /
+C DATA IMACH(12) / -256 /
+C DATA IMACH(13) / 255 /
+C DATA IMACH(14) / 60 /
+C DATA IMACH(15) / -256 /
+C DATA IMACH(16) / 255 /
+C
+C MACHINE CONSTANTS FOR THE BURROUGHS 5700 SYSTEM.
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 48 /
+C DATA IMACH( 6) / 6 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 39 /
+C DATA IMACH( 9) / O0007777777777777 /
+C DATA IMACH(10) / 8 /
+C DATA IMACH(11) / 13 /
+C DATA IMACH(12) / -50 /
+C DATA IMACH(13) / 76 /
+C DATA IMACH(14) / 26 /
+C DATA IMACH(15) / -50 /
+C DATA IMACH(16) / 76 /
+C
+C MACHINE CONSTANTS FOR THE BURROUGHS 6700/7700 SYSTEMS.
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 48 /
+C DATA IMACH( 6) / 6 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 39 /
+C DATA IMACH( 9) / O0007777777777777 /
+C DATA IMACH(10) / 8 /
+C DATA IMACH(11) / 13 /
+C DATA IMACH(12) / -50 /
+C DATA IMACH(13) / 76 /
+C DATA IMACH(14) / 26 /
+C DATA IMACH(15) / -32754 /
+C DATA IMACH(16) / 32780 /
+C
+C MACHINE CONSTANTS FOR THE CDC 6000/7000 SERIES.
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 60 /
+C DATA IMACH( 6) / 10 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 48 /
+C DATA IMACH( 9) / 00007777777777777777B /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 48 /
+C DATA IMACH(12) / -974 /
+C DATA IMACH(13) / 1070 /
+C DATA IMACH(14) / 96 /
+C DATA IMACH(15) / -927 /
+C DATA IMACH(16) / 1070 /
+C
+C MACHINE CONSTANTS FOR CONVEX C-1
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 32 /
+C DATA IMACH( 6) / 4 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 31 /
+C DATA IMACH( 9) / 2147483647 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 24 /
+C DATA IMACH(12) / -128 /
+C DATA IMACH(13) / 127 /
+C DATA IMACH(14) / 53 /
+C DATA IMACH(15) /-1024 /
+C DATA IMACH(16) / 1023 /
+C
+C MACHINE CONSTANTS FOR THE CRAY 1
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 102 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 64 /
+C DATA IMACH( 6) / 8 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 46 /
+C DATA IMACH( 9) / 1777777777777777B /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 47 /
+C DATA IMACH(12) / -8189 /
+C DATA IMACH(13) / 8190 /
+C DATA IMACH(14) / 94 /
+C DATA IMACH(15) / -8099 /
+C DATA IMACH(16) / 8190 /
+C
+C MACHINE CONSTANTS FOR THE DATA GENERAL ECLIPSE S/200
+C
+C DATA IMACH( 1) / 11 /
+C DATA IMACH( 2) / 12 /
+C DATA IMACH( 3) / 8 /
+C DATA IMACH( 4) / 10 /
+C DATA IMACH( 5) / 16 /
+C DATA IMACH( 6) / 2 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 15 /
+C DATA IMACH( 9) /32767 /
+C DATA IMACH(10) / 16 /
+C DATA IMACH(11) / 6 /
+C DATA IMACH(12) / -64 /
+C DATA IMACH(13) / 63 /
+C DATA IMACH(14) / 14 /
+C DATA IMACH(15) / -64 /
+C DATA IMACH(16) / 63 /
+C
+C MACHINE CONSTANTS FOR THE HARRIS SLASH 6 AND SLASH 7
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 0 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 24 /
+C DATA IMACH( 6) / 3 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 23 /
+C DATA IMACH( 9) / 8388607 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 23 /
+C DATA IMACH(12) / -127 /
+C DATA IMACH(13) / 127 /
+C DATA IMACH(14) / 38 /
+C DATA IMACH(15) / -127 /
+C DATA IMACH(16) / 127 /
+C
+C MACHINE CONSTANTS FOR THE HONEYWELL DPS 8/70 SERIES.
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 43 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 36 /
+C DATA IMACH( 6) / 4 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 35 /
+C DATA IMACH( 9) / O377777777777 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 27 /
+C DATA IMACH(12) / -127 /
+C DATA IMACH(13) / 127 /
+C DATA IMACH(14) / 63 /
+C DATA IMACH(15) / -127 /
+C DATA IMACH(16) / 127 /
+C
+C MACHINE CONSTANTS FOR IEEE ARITHMETIC MACHINES (E.G., AT&T 3B
+C SERIES COMPUTERS AND 8087-BASED MACHINES LIKE THE IBM PC).
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 32 /
+C DATA IMACH( 6) / 4 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 31 /
+C DATA IMACH( 9) / 2147483647 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 24 /
+C DATA IMACH(12) / -125 /
+C DATA IMACH(13) / 128 /
+C DATA IMACH(14) / 53 /
+C DATA IMACH(15) / -1021 /
+C DATA IMACH(16) / 1024 /
+C
+C MACHINE CONSTANTS FOR THE IBM 360/370 SERIES,
+C THE XEROX SIGMA 5/7/9 AND THE SEL SYSTEMS 85/86.
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 32 /
+C DATA IMACH( 6) / 4 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 31 /
+C DATA IMACH( 9) / Z7FFFFFFF /
+C DATA IMACH(10) / 16 /
+C DATA IMACH(11) / 6 /
+C DATA IMACH(12) / -64 /
+C DATA IMACH(13) / 63 /
+C DATA IMACH(14) / 14 /
+C DATA IMACH(15) / -64 /
+C DATA IMACH(16) / 63 /
+C
+C MACHINE CONSTANTS FOR THE INTERDATA 8/32
+C WITH THE UNIX SYSTEM FORTRAN 77 COMPILER.
+C
+C FOR THE INTERDATA FORTRAN VII COMPILER REPLACE
+C THE Z'S SPECIFYING HEX CONSTANTS WITH Y'S.
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 6 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 32 /
+C DATA IMACH( 6) / 4 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 31 /
+C DATA IMACH( 9) / Z'7FFFFFFF' /
+C DATA IMACH(10) / 16 /
+C DATA IMACH(11) / 6 /
+C DATA IMACH(12) / -64 /
+C DATA IMACH(13) / 62 /
+C DATA IMACH(14) / 14 /
+C DATA IMACH(15) / -64 /
+C DATA IMACH(16) / 62 /
+C
+C MACHINE CONSTANTS FOR THE PDP-10 (KA PROCESSOR).
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 36 /
+C DATA IMACH( 6) / 5 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 35 /
+C DATA IMACH( 9) / "377777777777 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 27 /
+C DATA IMACH(12) / -128 /
+C DATA IMACH(13) / 127 /
+C DATA IMACH(14) / 54 /
+C DATA IMACH(15) / -101 /
+C DATA IMACH(16) / 127 /
+C
+C MACHINE CONSTANTS FOR THE PDP-10 (KI PROCESSOR).
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 36 /
+C DATA IMACH( 6) / 5 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 35 /
+C DATA IMACH( 9) / "377777777777 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 27 /
+C DATA IMACH(12) / -128 /
+C DATA IMACH(13) / 127 /
+C DATA IMACH(14) / 62 /
+C DATA IMACH(15) / -128 /
+C DATA IMACH(16) / 127 /
+C
+C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING
+C 32-BIT INTEGER ARITHMETIC.
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 32 /
+C DATA IMACH( 6) / 4 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 31 /
+C DATA IMACH( 9) / 2147483647 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 24 /
+C DATA IMACH(12) / -127 /
+C DATA IMACH(13) / 127 /
+C DATA IMACH(14) / 56 /
+C DATA IMACH(15) / -127 /
+C DATA IMACH(16) / 127 /
+C
+C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING
+C 16-BIT INTEGER ARITHMETIC.
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 16 /
+C DATA IMACH( 6) / 2 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 15 /
+C DATA IMACH( 9) / 32767 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 24 /
+C DATA IMACH(12) / -127 /
+C DATA IMACH(13) / 127 /
+C DATA IMACH(14) / 56 /
+C DATA IMACH(15) / -127 /
+C DATA IMACH(16) / 127 /
+C
+C MACHINE CONSTANTS FOR THE SUN MICROSYSTEMS UNIX F77 COMPILER.
+C
+ DATA IMACH( 1) / 5 /
+ DATA IMACH( 2) / 6 /
+ DATA IMACH( 3) / 6 /
+ DATA IMACH( 4) / 0 /
+ DATA IMACH( 5) / 32 /
+ DATA IMACH( 6) / 4 /
+ DATA IMACH( 7) / 2 /
+ DATA IMACH( 8) / 32 /
+ DATA IMACH( 9) /2147483647/
+ DATA IMACH(10) / 2 /
+ DATA IMACH(11) / 24 /
+ DATA IMACH(12) / -126 /
+ DATA IMACH(13) / 128 /
+ DATA IMACH(14) / 53 /
+ DATA IMACH(15) / -1022 /
+ DATA IMACH(16) / 1024 /
+C
+C MACHINE CONSTANTS FOR THE ALLIANT FX/8 UNIX FORTRAN COMPILER.
+C
+c$$$ DATA IMACH( 1) / 5 /
+c$$$ DATA IMACH( 2) / 6 /
+c$$$ DATA IMACH( 3) / 6 /
+c$$$ DATA IMACH( 4) / 0 /
+c$$$ DATA IMACH( 5) / 32 /
+c$$$ DATA IMACH( 6) / 4 /
+c$$$ DATA IMACH( 7) / 2 /
+c$$$ DATA IMACH( 8) / 32 /
+c$$$ DATA IMACH( 9) /2147483647/
+c$$$ DATA IMACH(10) / 2 /
+c$$$ DATA IMACH(11) / 24 /
+c$$$ DATA IMACH(12) / -126 /
+c$$$ DATA IMACH(13) / 128 /
+c$$$ DATA IMACH(14) / 53 /
+c$$$ DATA IMACH(15) / -1022 /
+c$$$ DATA IMACH(16) / 1024 /
+C
+C MACHINE CONSTANTS FOR THE ALLIANT FX/8 UNIX FORTRAN COMPILER.
+C WITH THE -r8 COMMAND LINE OPTION.
+C
+c$$$ DATA IMACH( 1) / 5 /
+c$$$ DATA IMACH( 2) / 6 /
+c$$$ DATA IMACH( 3) / 6 /
+c$$$ DATA IMACH( 4) / 0 /
+c$$$ DATA IMACH( 5) / 32 /
+c$$$ DATA IMACH( 6) / 4 /
+c$$$ DATA IMACH( 7) / 2 /
+c$$$ DATA IMACH( 8) / 32 /
+c$$$ DATA IMACH( 9) /2147483647/
+c$$$ DATA IMACH(10) / 2 /
+c$$$ DATA IMACH(11) / 53 /
+c$$$ DATA IMACH(12) / -1022 /
+c$$$ DATA IMACH(13) / 1024 /
+c$$$ DATA IMACH(14) / 53 /
+c$$$ DATA IMACH(15) / -1022 /
+c$$$ DATA IMACH(16) / 1024 /
+C
+C MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES.
+C
+C NOTE THAT THE PUNCH UNIT, I1MACH(3), HAS BEEN SET TO 7
+C WHICH IS APPROPRIATE FOR THE UNIVAC-FOR SYSTEM.
+C IF YOU HAVE THE UNIVAC-FTN SYSTEM, SET IT TO 1.
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 36 /
+C DATA IMACH( 6) / 6 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 35 /
+C DATA IMACH( 9) / O377777777777 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 27 /
+C DATA IMACH(12) / -128 /
+C DATA IMACH(13) / 127 /
+C DATA IMACH(14) / 60 /
+C DATA IMACH(15) /-1024 /
+C DATA IMACH(16) / 1023 /
+C
+C MACHINE CONSTANTS FOR VAX
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 32 /
+C DATA IMACH( 6) / 4 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 31 /
+C DATA IMACH( 9) / 2147483647 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 24 /
+C DATA IMACH(12) / -127 /
+C DATA IMACH(13) / 127 /
+C DATA IMACH(14) / 56 /
+C DATA IMACH(15) / -127 /
+C DATA IMACH(16) / 127 /
+C
+C MACHINE CONSTANTS FOR THE SEQUENT BALANCE 8000 AND SVS FORTRAN ON
+C THE AT&T 7300 (UNIX PC)
+C
+C DATA IMACH( 1) / 0 /
+C DATA IMACH( 2) / 0 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 0 /
+C DATA IMACH( 5) / 32 /
+C DATA IMACH( 6) / 1 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 31 /
+C DATA IMACH( 9) / 2147483647 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 24 /
+C DATA IMACH(12) / -125 /
+C DATA IMACH(13) / 128 /
+C DATA IMACH(14) / 53 /
+C DATA IMACH(15) / -1021 /
+C DATA IMACH(16) / 1024 /
+C
+C MACHINE CONSTANTS FOR THE RM FORTRAN ON THE AT&T 7300 (UNIX PC)
+C
+C DATA IMACH( 1) / 5 /
+C DATA IMACH( 2) / 6 /
+C DATA IMACH( 3) / 7 /
+C DATA IMACH( 4) / 6 /
+C DATA IMACH( 5) / 32 /
+C DATA IMACH( 6) / 1 /
+C DATA IMACH( 7) / 2 /
+C DATA IMACH( 8) / 31 /
+C DATA IMACH( 9) / 2147483647 /
+C DATA IMACH(10) / 2 /
+C DATA IMACH(11) / 24 /
+C DATA IMACH(12) / -125 /
+C DATA IMACH(13) / 128 /
+C DATA IMACH(14) / 53 /
+C DATA IMACH(15) / -1021 /
+C DATA IMACH(16) / 1024 /
+C
+ IF (I .LT. 1 .OR. I .GT. 16) GO TO 999
+ I1MACH=IMACH(I)
+ RETURN
+ 999 WRITE(OUTPUT,1999) I
+ 1999 FORMAT(' I1MACH - I OUT OF BOUNDS',I10)
+ STOP
+ END
+
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/runTests.sh b/components/cism/glimmer-cism/libglimmer-solve/SLAP/runTests.sh
new file mode 100755
index 0000000000..3232d145b5
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/runTests.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+echo 2 | ./dlapqc
diff --git a/components/cism/glimmer-cism/libglimmer-solve/SLAP/xersla.f b/components/cism/glimmer-cism/libglimmer-solve/SLAP/xersla.f
new file mode 100644
index 0000000000..17eb531ca7
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/SLAP/xersla.f
@@ -0,0 +1,924 @@
+CVD$G NOVECTOR
+CVD$G NOCONCUR
+*deck xerabt
+ subroutine xerabt(messg,nmessg)
+c***begin prologue xerabt
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3c
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose abort program execution and print error message.
+c***description
+c
+c abstract
+c ***note*** machine dependent routine
+c xerabt aborts the execution of the program.
+c the error message causing the abort is given in the calling
+c sequence, in case one needs it for printing on a dayfile,
+c for example.
+c
+c description of parameters
+c messg and nmessg are as in xerror, except that nmessg may
+c be zero, in which case no message is being supplied.
+c
+c written by ron jones, with slatec common math library subcommittee
+c latest revision --- 1 august 1982
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called (none)
+c***end prologue xerabt
+ dimension messg(nmessg)
+c***first executable statement xerabt
+ call exit(1)
+ end
+*deck xerctl
+ subroutine xerctl(messg1,nmessg,nerr,level,kontrl)
+c***begin prologue xerctl
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3c
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose allow user control over handling of errors.
+c***description
+c
+c abstract
+c allows user control over handling of individual errors.
+c just after each message is recorded, but before it is
+c processed any further (i.e., before it is printed or
+c a decision to abort is made), a call is made to xerctl.
+c if the user has provided his own version of xerctl, he
+c can then override the value of kontrol used in processing
+c this message by redefining its value.
+c kontrl may be set to any value from -2 to 2.
+c the meanings for kontrl are the same as in xsetf, except
+c that the value of kontrl changes only for this message.
+c if kontrl is set to a value outside the range from -2 to 2,
+c it will be moved back into that range.
+c
+c description of parameters
+c
+c --input--
+c messg1 - the first word (only) of the error message.
+c nmessg - same as in the call to xerror or xerrwv.
+c nerr - same as in the call to xerror or xerrwv.
+c level - same as in the call to xerror or xerrwv.
+c kontrl - the current value of the control flag as set
+c by a call to xsetf.
+c
+c --output--
+c kontrl - the new value of kontrl. if kontrl is not
+c defined, it will remain at its original value.
+c this changed value of control affects only
+c the current occurrence of the current message.
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called (none)
+c***end prologue xerctl
+ character*20 messg1
+c***first executable statement xerctl
+ return
+ end
+*deck xerprt
+ subroutine xerprt(messg,nmessg)
+c***begin prologue xerprt
+c***date written 790801 (yymmdd)
+c***revision date 851213 (yymmdd)
+c***category no. r3
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose print error messages.
+c***description
+c
+c abstract
+c print the hollerith message in messg, of length nmessg,
+c on each file indicated by xgetua.
+c latest revision --- 1 august 1985
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called i1mach,xgetua
+c***end prologue xerprt
+ integer lun(5)
+ character*(*) messg
+c obtain unit numbers and write line to each unit
+c***first executable statement xerprt
+ call xgetua(lun,nunit)
+ lenmes = len(messg)
+ do 20 kunit=1,nunit
+ iunit = lun(kunit)
+ if (iunit.eq.0) iunit = i1mach(4)
+ do 10 ichar=1,lenmes,72
+ last = min0(ichar+71 , lenmes)
+ write (iunit,'(1x,a)') messg(ichar:last)
+ 10 continue
+ 20 continue
+ return
+ end
+*deck xerror
+ subroutine xerror(messg,nmessg,nerr,level)
+c***begin prologue xerror
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3c
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose process an error (diagnostic) message.
+c***description
+c
+c abstract
+c xerror processes a diagnostic message, in a manner
+c determined by the value of level and the current value
+c of the library error control flag, kontrl.
+c (see subroutine xsetf for details.)
+c
+c description of parameters
+c --input--
+c messg - the hollerith message to be processed, containing
+c no more than 72 characters.
+c nmessg- the actual number of characters in messg.
+c nerr - the error number associated with this message.
+c nerr must not be zero.
+c level - error category.
+c =2 means this is an unconditionally fatal error.
+c =1 means this is a recoverable error. (i.e., it is
+c non-fatal if xsetf has been appropriately called.)
+c =0 means this is a warning message only.
+c =-1 means this is a warning message which is to be
+c printed at most once, regardless of how many
+c times this call is executed.
+c
+c examples
+c call xerror('smooth -- num was zero.',23,1,2)
+c call xerror('integ -- less than full accuracy achieved.',
+c 1 43,2,1)
+c call xerror('rooter -- actual zero of f found before interval f
+c 1ully collapsed.',65,3,0)
+c call xerror('exp -- underflows being set to zero.',39,1,-1)
+c
+c written by ron jones, with slatec common math library subcommittee
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called xerrwv
+c***end prologue xerror
+ character*(*) messg
+c***first executable statement xerror
+ call xerrwv(messg,nmessg,nerr,level,0,0,0,0,0.,0.)
+ return
+ end
+*deck xerrwv
+ subroutine xerrwv(messg,nmessg,nerr,level,ni,i1,i2,nr,r1,r2)
+c***begin prologue xerrwv
+c***date written 800319 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3c
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose process an error message allowing 2 integer and 2 real
+c values to be included in the message.
+c***description
+c
+c abstract
+c xerrwv processes a diagnostic message, in a manner
+c determined by the value of level and the current value
+c of the library error control flag, kontrl.
+c (see subroutine xsetf for details.)
+c in addition, up to two integer values and two real
+c values may be printed along with the message.
+c
+c description of parameters
+c --input--
+c messg - the hollerith message to be processed.
+c nmessg- the actual number of characters in messg.
+c nerr - the error number associated with this message.
+c nerr must not be zero.
+c level - error category.
+c =2 means this is an unconditionally fatal error.
+c =1 means this is a recoverable error. (i.e., it is
+c non-fatal if xsetf has been appropriately called.)
+c =0 means this is a warning message only.
+c =-1 means this is a warning message which is to be
+c printed at most once, regardless of how many
+c times this call is executed.
+c ni - number of integer values to be printed. (0 to 2)
+c i1 - first integer value.
+c i2 - second integer value.
+c nr - number of real values to be printed. (0 to 2)
+c r1 - first real value.
+c r2 - second real value.
+c
+c examples
+c call xerrwv('smooth -- num (=i1) was zero.',29,1,2,
+c 1 1,num,0,0,0.,0.)
+c call xerrwv('quadxy -- requested error (r1) less than minimum (
+c 1r2).,54,77,1,0,0,0,2,errreq,errmin)
+c
+c latest revision --- 1 august 1985
+c written by ron jones, with slatec common math library subcommittee
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called fdump,i1mach,j4save,xerabt,xerctl,xerprt,xersav,
+c xgetua
+c***end prologue xerrwv
+ character*(*) messg
+ character*20 lfirst
+ character*37 form
+ dimension lun(5)
+c get flags
+c***first executable statement xerrwv
+ lkntrl = j4save(2,0,.false.)
+ maxmes = j4save(4,0,.false.)
+c check for valid input
+ if ((nmessg.gt.0).and.(nerr.ne.0).and.
+ 1 (level.ge.(-1)).and.(level.le.2)) go to 10
+ if (lkntrl.gt.0) call xerprt('fatal error in...',17)
+ call xerprt('xerror -- invalid input',23)
+c if (lkntrl.gt.0) call fdump
+ if (lkntrl.gt.0) call xerprt('job abort due to fatal error.',
+ 1 29)
+ if (lkntrl.gt.0) call xersav(' ',0,0,0,kdummy)
+ call xerabt('xerror -- invalid input',23)
+ return
+ 10 continue
+c record message
+ junk = j4save(1,nerr,.true.)
+ call xersav(messg,nmessg,nerr,level,kount)
+c let user override
+ lfirst = messg
+ lmessg = nmessg
+ lerr = nerr
+ llevel = level
+ call xerctl(lfirst,lmessg,lerr,llevel,lkntrl)
+c reset to original values
+ lmessg = nmessg
+ lerr = nerr
+ llevel = level
+ lkntrl = max0(-2,min0(2,lkntrl))
+ mkntrl = iabs(lkntrl)
+c decide whether to print message
+ if ((llevel.lt.2).and.(lkntrl.eq.0)) go to 100
+ if (((llevel.eq.(-1)).and.(kount.gt.min0(1,maxmes)))
+ 1.or.((llevel.eq.0) .and.(kount.gt.maxmes))
+ 2.or.((llevel.eq.1) .and.(kount.gt.maxmes).and.(mkntrl.eq.1))
+ 3.or.((llevel.eq.2) .and.(kount.gt.max0(1,maxmes)))) go to 100
+ if (lkntrl.le.0) go to 20
+ call xerprt(' ',1)
+c introduction
+ if (llevel.eq.(-1)) call xerprt
+ 1('warning message...this message will only be printed once.',57)
+ if (llevel.eq.0) call xerprt('warning in...',13)
+ if (llevel.eq.1) call xerprt
+ 1 ('recoverable error in...',23)
+ if (llevel.eq.2) call xerprt('fatal error in...',17)
+ 20 continue
+c message
+ call xerprt(messg,lmessg)
+ call xgetua(lun,nunit)
+ isizei = log10(float(i1mach(9))) + 1.0
+ isizef = log10(float(i1mach(10))**i1mach(11)) + 1.0
+ do 50 kunit=1,nunit
+ iunit = lun(kunit)
+ if (iunit.eq.0) iunit = i1mach(4)
+ do 22 i=1,min(ni,2)
+ write (form,21) i,isizei
+ 21 format ('(11x,21hin above message, i',i1,'=,i',i2,') ')
+ if (i.eq.1) write (iunit,form) i1
+ if (i.eq.2) write (iunit,form) i2
+ 22 continue
+ do 24 i=1,min(nr,2)
+ write (form,23) i,isizef+10,isizef
+ 23 format ('(11x,21hin above message, r',i1,'=,e',
+ 1 i2,'.',i2,')')
+ if (i.eq.1) write (iunit,form) r1
+ if (i.eq.2) write (iunit,form) r2
+ 24 continue
+ if (lkntrl.le.0) go to 40
+c error number
+ write (iunit,30) lerr
+ 30 format (15h error number =,i10)
+ 40 continue
+ 50 continue
+c trace-back
+c if (lkntrl.gt.0) call fdump
+ 100 continue
+ ifatal = 0
+ if ((llevel.eq.2).or.((llevel.eq.1).and.(mkntrl.eq.2)))
+ 1ifatal = 1
+c quit here if message is not fatal
+ if (ifatal.le.0) return
+ if ((lkntrl.le.0).or.(kount.gt.max0(1,maxmes))) go to 120
+c print reason for abort
+ if (llevel.eq.1) call xerprt
+ 1 ('job abort due to unrecovered error.',35)
+ if (llevel.eq.2) call xerprt
+ 1 ('job abort due to fatal error.',29)
+c print error summary
+ call xersav(' ',-1,0,0,kdummy)
+ 120 continue
+c abort
+ if ((llevel.eq.2).and.(kount.gt.max0(1,maxmes))) lmessg = 0
+ call xerabt(messg,lmessg)
+ return
+ end
+*deck xersav
+ subroutine xersav(messg,nmessg,nerr,level,icount)
+c***begin prologue xersav
+c***date written 800319 (yymmdd)
+c***revision date 851213 (yymmdd)
+c***category no. r3
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose record that an error has occurred.
+c***description
+c
+c abstract
+c record that this error occurred.
+c
+c description of parameters
+c --input--
+c messg, nmessg, nerr, level are as in xerror,
+c except that when nmessg=0 the tables will be
+c dumped and cleared, and when nmessg is less than zero the
+c tables will be dumped and not cleared.
+c --output--
+c icount will be the number of times this message has
+c been seen, or zero if the table has overflowed and
+c does not contain this message specifically.
+c when nmessg=0, icount will not be altered.
+c
+c written by ron jones, with slatec common math library subcommittee
+c latest revision --- 1 august 1985
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called i1mach,xgetua
+c***end prologue xersav
+ integer lun(5)
+ character*(*) messg
+ character*20 mestab(10),mes
+ dimension nertab(10),levtab(10),kount(10)
+ save mestab,nertab,levtab,kount,kountx
+c next two data statements are necessary to provide a blank
+c error table initially
+ data kount(1),kount(2),kount(3),kount(4),kount(5),
+ 1 kount(6),kount(7),kount(8),kount(9),kount(10)
+ 2 /0,0,0,0,0,0,0,0,0,0/
+ data kountx/0/
+c***first executable statement xersav
+ if (nmessg.gt.0) go to 80
+c dump the table
+ if (kount(1).eq.0) return
+c print to each unit
+ call xgetua(lun,nunit)
+ do 60 kunit=1,nunit
+ iunit = lun(kunit)
+ if (iunit.eq.0) iunit = i1mach(4)
+c print table header
+ write (iunit,10)
+ 10 format (32h0 error message summary/
+ 1 51h message start nerr level count)
+c print body of table
+ do 20 i=1,10
+ if (kount(i).eq.0) go to 30
+ write (iunit,15) mestab(i),nertab(i),levtab(i),kount(i)
+ 15 format (1x,a20,3i10)
+ 20 continue
+ 30 continue
+c print number of other errors
+ if (kountx.ne.0) write (iunit,40) kountx
+ 40 format (41h0other errors not individually tabulated=,i10)
+ write (iunit,50)
+ 50 format (1x)
+ 60 continue
+ if (nmessg.lt.0) return
+c clear the error tables
+ do 70 i=1,10
+ 70 kount(i) = 0
+ kountx = 0
+ return
+ 80 continue
+c process a message...
+c search for this messg, or else an empty slot for this messg,
+c or else determine that the error table is full.
+ mes = messg
+ do 90 i=1,10
+ ii = i
+ if (kount(i).eq.0) go to 110
+ if (mes.ne.mestab(i)) go to 90
+ if (nerr.ne.nertab(i)) go to 90
+ if (level.ne.levtab(i)) go to 90
+ go to 100
+ 90 continue
+c three possible cases...
+c table is full
+ kountx = kountx+1
+ icount = 1
+ return
+c message found in table
+ 100 kount(ii) = kount(ii) + 1
+ icount = kount(ii)
+ return
+c empty slot found for new message
+ 110 mestab(ii) = mes
+ nertab(ii) = nerr
+ levtab(ii) = level
+ kount(ii) = 1
+ icount = 1
+ return
+ end
+ subroutine xgetf(kontrl)
+c***begin prologue xgetf
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3c
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose return the current value of the error control flag.
+c***description
+c
+c abstract
+c xgetf returns the current value of the error control flag
+c in kontrl. see subroutine xsetf for flag value meanings.
+c (kontrl is an output parameter only.)
+c
+c written by ron jones, with slatec common math library subcommittee
+c latest revision --- 7 june 1978
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called j4save
+c***end prologue xgetf
+c***first executable statement xgetf
+ kontrl = j4save(2,0,.false.)
+ return
+ end
+*deck xgetua
+ subroutine xgetua(iunita,n)
+c***begin prologue xgetua
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3c
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose return unit number(s) to which error messages are being
+c sent.
+c***description
+c
+c abstract
+c xgetua may be called to determine the unit number or numbers
+c to which error messages are being sent.
+c these unit numbers may have been set by a call to xsetun,
+c or a call to xsetua, or may be a default value.
+c
+c description of parameters
+c --output--
+c iunit - an array of one to five unit numbers, depending
+c on the value of n. a value of zero refers to the
+c default unit, as defined by the i1mach machine
+c constant routine. only iunit(1),...,iunit(n) are
+c defined by xgetua. the values of iunit(n+1),...,
+c iunit(5) are not defined (for n .lt. 5) or altered
+c in any way by xgetua.
+c n - the number of units to which copies of the
+c error messages are being sent. n will be in the
+c range from 1 to 5.
+c
+c latest revision --- 19 mar 1980
+c written by ron jones, with slatec common math library subcommittee
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called j4save
+c***end prologue xgetua
+ dimension iunita(5)
+c***first executable statement xgetua
+ n = j4save(5,0,.false.)
+ do 30 i=1,n
+ index = i+4
+ if (i.eq.1) index = 3
+ iunita(i) = j4save(index,0,.false.)
+ 30 continue
+ return
+ end
+*deck j4save
+ function j4save(iwhich,ivalue,iset)
+c***begin prologue j4save
+c***refer to xerror
+c***routines called (none)
+c***description
+c
+c abstract
+c j4save saves and recalls several global variables needed
+c by the library error handling routines.
+c
+c description of parameters
+c --input--
+c iwhich - index of item desired.
+c = 1 refers to current error number.
+c = 2 refers to current error control flag.
+c = 3 refers to current unit number to which error
+c messages are to be sent. (0 means use standard.)
+c = 4 refers to the maximum number of times any
+c message is to be printed (as set by xermax).
+c = 5 refers to the total number of units to which
+c each error message is to be written.
+c = 6 refers to the 2nd unit for error messages
+c = 7 refers to the 3rd unit for error messages
+c = 8 refers to the 4th unit for error messages
+c = 9 refers to the 5th unit for error messages
+c ivalue - the value to be set for the iwhich-th parameter,
+c if iset is .true. .
+c iset - if iset=.true., the iwhich-th parameter will be
+c given the value, ivalue. if iset=.false., the
+c iwhich-th parameter will be unchanged, and ivalue
+c is a dummy parameter.
+c --output--
+c the (old) value of the iwhich-th parameter will be returned
+c in the function value, j4save.
+c
+c written by ron jones, with slatec common math library subcommittee
+c adapted from bell laboratories port library error handler
+c latest revision --- 1 august 1985
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***end prologue j4save
+ logical iset
+ integer iparam(9)
+ save iparam
+ data iparam(1),iparam(2),iparam(3),iparam(4)/0,2,0,10/
+ data iparam(5)/1/
+ data iparam(6),iparam(7),iparam(8),iparam(9)/0,0,0,0/
+c***first executable statement j4save
+ j4save = iparam(iwhich)
+ if (iset) iparam(iwhich) = ivalue
+ return
+ end
+*deck xerclr
+ subroutine xerclr
+c***begin prologue xerclr
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3c
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose reset current error number to zero.
+c***description
+c
+c abstract
+c this routine simply resets the current error number to zero.
+c this may be necessary to do in order to determine that
+c a certain error has occurred again since the last time
+c numxer was referenced.
+c
+c written by ron jones, with slatec common math library subcommittee
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called j4save
+c***end prologue xerclr
+c***first executable statement xerclr
+ junk = j4save(1,0,.true.)
+ return
+ end
+ subroutine xerdmp
+c***begin prologue xerdmp
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3c
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose print the error tables and then clear them.
+c***description
+c
+c abstract
+c xerdmp prints the error tables, then clears them.
+c
+c written by ron jones, with slatec common math library subcommittee
+c latest revision --- 7 june 1978
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called xersav
+c***end prologue xerdmp
+c***first executable statement xerdmp
+ call xersav(' ',0,0,0,kount)
+ return
+ end
+ subroutine xermax(max)
+c***begin prologue xermax
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3c
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose set maximum number of times any error message is to be
+c printed.
+c***description
+c
+c abstract
+c xermax sets the maximum number of times any message
+c is to be printed. that is, non-fatal messages are
+c not to be printed after they have occured max times.
+c such non-fatal messages may be printed less than
+c max times even if they occur max times, if error
+c suppression mode (kontrl=0) is ever in effect.
+c
+c description of parameter
+c --input--
+c max - the maximum number of times any one message
+c is to be printed.
+c
+c written by ron jones, with slatec common math library subcommittee
+c latest revision --- 7 june 1978
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called j4save
+c***end prologue xermax
+c***first executable statement xermax
+ junk = j4save(4,max,.true.)
+ return
+ end
+ subroutine xgetun(iunit)
+c***begin prologue xgetun
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3c
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose return the (first) output file to which error messages
+c are being sent.
+c***description
+c
+c abstract
+c xgetun gets the (first) output file to which error messages
+c are being sent. to find out if more than one file is being
+c used, one must use the xgetua routine.
+c
+c description of parameter
+c --output--
+c iunit - the logical unit number of the (first) unit to
+c which error messages are being sent.
+c a value of zero means that the default file, as
+c defined by the i1mach routine, is being used.
+c
+c written by ron jones, with slatec common math library subcommittee
+c latest revision --- 23 may 1979
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called j4save
+c***end prologue xgetun
+c***first executable statement xgetun
+ iunit = j4save(3,0,.false.)
+ return
+ end
+ subroutine xsetf(kontrl)
+c***begin prologue xsetf
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3a
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose set the error control flag.
+c***description
+c
+c abstract
+c xsetf sets the error control flag value to kontrl.
+c (kontrl is an input parameter only.)
+c the following table shows how each message is treated,
+c depending on the values of kontrl and level. (see xerror
+c for description of level.)
+c
+c if kontrl is zero or negative, no information other than the
+c message itself (including numeric values, if any) will be
+c printed. if kontrl is positive, introductory messages,
+c trace-backs, etc., will be printed in addition to the message.
+c
+c iabs(kontrl)
+c level 0 1 2
+c value
+c 2 fatal fatal fatal
+c
+c 1 not printed printed fatal
+c
+c 0 not printed printed printed
+c
+c -1 not printed printed printed
+c only only
+c once once
+c
+c written by ron jones, with slatec common math library subcommittee
+c latest revision --- 19 mar 1980
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called j4save,xerrwv
+c***end prologue xsetf
+c***first executable statement xsetf
+ if ((kontrl.ge.(-2)).and.(kontrl.le.2)) go to 10
+ call xerrwv('xsetf -- invalid value of kontrl (i1).',33,1,2,
+ 1 1,kontrl,0,0,0.,0.)
+ return
+ 10 junk = j4save(2,kontrl,.true.)
+ return
+ end
+ subroutine xsetua(iunita,n)
+c***begin prologue xsetua
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3b
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose set logical unit numbers (up to 5) to which error
+c messages are to be sent.
+c***description
+c
+c abstract
+c xsetua may be called to declare a list of up to five
+c logical units, each of which is to receive a copy of
+c each error message processed by this package.
+c the purpose of xsetua is to allow simultaneous printing
+c of each error message on, say, a main output file,
+c an interactive terminal, and other files such as graphics
+c communication files.
+c
+c description of parameters
+c --input--
+c iunit - an array of up to five unit numbers.
+c normally these numbers should all be different
+c (but duplicates are not prohibited.)
+c n - the number of unit numbers provided in iunit
+c must have 1 .le. n .le. 5.
+c
+c written by ron jones, with slatec common math library subcommittee
+c latest revision --- 19 mar 1980
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called j4save,xerrwv
+c***end prologue xsetua
+ dimension iunita(5)
+c***first executable statement xsetua
+ if ((n.ge.1).and.(n.le.5)) go to 10
+ call xerrwv('xsetua -- invalid value of n (i1).',34,1,2,
+ 1 1,n,0,0,0.,0.)
+ return
+ 10 continue
+ do 20 i=1,n
+ index = i+4
+ if (i.eq.1) index = 3
+ junk = j4save(index,iunita(i),.true.)
+ 20 continue
+ junk = j4save(5,n,.true.)
+ return
+ end
+ subroutine xsetun(iunit)
+c***begin prologue xsetun
+c***date written 790801 (yymmdd)
+c***revision date 851111 (yymmdd)
+c***category no. r3b
+c***keywords error,xerror package
+c***author jones, r. e., (snla)
+c***purpose set output file to which error messages are to be sent.
+c***description
+c
+c abstract
+c xsetun sets the output file to which error messages are to
+c be sent. only one file will be used. see xsetua for
+c how to declare more than one file.
+c
+c description of parameter
+c --input--
+c iunit - an input parameter giving the logical unit number
+c to which error messages are to be sent.
+c
+c written by ron jones, with slatec common math library subcommittee
+c latest revision --- 7 june 1978
+c***references jones r.e., kahaner d.k., 'xerror, the slatec error-
+c handling package', sand82-0800, sandia laboratories,
+c 1982.
+c***routines called j4save
+c***end prologue xsetun
+c***first executable statement xsetun
+ junk = j4save(3,iunit,.true.)
+ junk = j4save(5,1,.true.)
+ return
+ end
+ FUNCTION RAND(R)
+C***BEGIN PROLOGUE RAND
+C***DATE WRITTEN 770401 (YYMMDD)
+C***REVISION DATE 861211 (YYMMDD)
+C***CATEGORY NO. L6A21
+C***KEYWORDS LIBRARY=SLATEC(FNLIB),TYPE=SINGLE PRECISION(RAND-S),
+C RANDOM NUMBER,SPECIAL FUNCTIONS,UNIFORM
+C***AUTHOR FULLERTON, W., (LANL)
+C***PURPOSE Generates a uniformly distributed random number.
+C***DESCRIPTION
+C
+C This pseudo-random number generator is portable among a wide
+C variety of computers. RAND(R) undoubtedly is not as good as many
+C readily available installation dependent versions, and so this
+C routine is not recommended for widespread usage. Its redeeming
+C feature is that the exact same random numbers (to within final round-
+C off error) can be generated from machine to machine. Thus, programs
+C that make use of random numbers can be easily transported to and
+C checked in a new environment.
+C The random numbers are generated by the linear congruential
+C method described, e.g., by Knuth in Seminumerical Methods (p.9),
+C Addison-Wesley, 1969. Given the I-th number of a pseudo-random
+C sequence, the I+1 -st number is generated from
+C X(I+1) = (A*X(I) + C) MOD M,
+C where here M = 2**22 = 4194304, C = 1731 and several suitable values
+C of the multiplier A are discussed below. Both the multiplier A and
+C random number X are represented in double precision as two 11-bit
+C words. The constants are chosen so that the period is the maximum
+C possible, 4194304.
+C In order that the same numbers be generated from machine to
+C machine, it is necessary that 23-bit integers be reducible modulo
+C 2**11 exactly, that 23-bit integers be added exactly, and that 11-bit
+C integers be multiplied exactly. Furthermore, if the restart option
+C is used (where R is between 0 and 1), then the product R*2**22 =
+C R*4194304 must be correct to the nearest integer.
+C The first four random numbers should be .0004127026,
+C .6750836372, .1614754200, and .9086198807. The tenth random number
+C is .5527787209, and the hundredth is .3600893021 . The thousandth
+C number should be .2176990509 .
+C In order to generate several effectively independent sequences
+C with the same generator, it is necessary to know the random number
+C for several widely spaced calls. The I-th random number times 2**22,
+C where I=K*P/8 and P is the period of the sequence (P = 2**22), is
+C still of the form L*P/8. In particular we find the I-th random
+C number multiplied by 2**22 is given by
+C I = 0 1*P/8 2*P/8 3*P/8 4*P/8 5*P/8 6*P/8 7*P/8 8*P/8
+C RAND= 0 5*P/8 2*P/8 7*P/8 4*P/8 1*P/8 6*P/8 3*P/8 0
+C Thus the 4*P/8 = 2097152 random number is 2097152/2**22.
+C Several multipliers have been subjected to the spectral test
+C (see Knuth, p. 82). Four suitable multipliers roughly in order of
+C goodness according to the spectral test are
+C 3146757 = 1536*2048 + 1029 = 2**21 + 2**20 + 2**10 + 5
+C 2098181 = 1024*2048 + 1029 = 2**21 + 2**10 + 5
+C 3146245 = 1536*2048 + 517 = 2**21 + 2**20 + 2**9 + 5
+C 2776669 = 1355*2048 + 1629 = 5**9 + 7**7 + 1
+C
+C In the table below LOG10(NU(I)) gives roughly the number of
+C random decimal digits in the random numbers considered I at a time.
+C C is the primary measure of goodness. In both cases bigger is better.
+C
+C LOG10 NU(I) C(I)
+C A I=2 I=3 I=4 I=5 I=2 I=3 I=4 I=5
+C
+C 3146757 3.3 2.0 1.6 1.3 3.1 1.3 4.6 2.6
+C 2098181 3.3 2.0 1.6 1.2 3.2 1.3 4.6 1.7
+C 3146245 3.3 2.2 1.5 1.1 3.2 4.2 1.1 0.4
+C 2776669 3.3 2.1 1.6 1.3 2.5 2.0 1.9 2.6
+C Best
+C Possible 3.3 2.3 1.7 1.4 3.6 5.9 9.7 14.9
+C
+C Input Argument --
+C R If R=0., the next random number of the sequence is generated.
+C If R .LT. 0., the last generated number will be returned for
+C possible use in a restart procedure.
+C If R .GT. 0., the sequence of random numbers will start with
+C the seed R mod 1. This seed is also returned as the value of
+C RAND provided the arithmetic is done exactly.
+C
+C Output Value --
+C RAND a pseudo-random number between 0. and 1.
+C***REFERENCES (NONE)
+C***ROUTINES CALLED (NONE)
+C***END PROLOGUE RAND
+ SAVE IA1, IA0, IA1MA0, IC, IX1, IX0
+ DATA IA1, IA0, IA1MA0 /1536, 1029, 507/
+ DATA IC /1731/
+ DATA IX1, IX0 /0, 0/
+C***FIRST EXECUTABLE STATEMENT RAND
+ IF (R.LT.0.) GO TO 10
+ IF (R.GT.0.) GO TO 20
+C
+C A*X = 2**22*IA1*IX1 + 2**11*(IA1*IX1 + (IA1-IA0)*(IX0-IX1)
+C + IA0*IX0) + IA0*IX0
+C
+ IY0 = IA0*IX0
+ IY1 = IA1*IX1 + IA1MA0*(IX0-IX1) + IY0
+ IY0 = IY0 + IC
+ IX0 = MOD (IY0, 2048)
+ IY1 = IY1 + (IY0-IX0)/2048
+ IX1 = MOD (IY1, 2048)
+C
+ 10 RAND = IX1*2048 + IX0
+ RAND = RAND / 4194304.
+ RETURN
+C
+ 20 IX1 = AMOD(R,1.)*4194304. + 0.5
+ IX0 = MOD (IX1, 2048)
+ IX1 = (IX1-IX0)/2048
+ GO TO 10
+C
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/caxpy.f b/components/cism/glimmer-cism/libglimmer-solve/blas/caxpy.f
new file mode 100644
index 0000000000..ece603c6c2
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/caxpy.f
@@ -0,0 +1,52 @@
+ SUBROUTINE CAXPY(N,CA,CX,INCX,CY,INCY)
+* .. Scalar Arguments ..
+ COMPLEX CA
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ COMPLEX CX(*),CY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* CAXPY constant times a vector plus a vector.
+*
+* Further Details
+* ===============
+*
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+* .. Local Scalars ..
+ INTEGER I,IX,IY
+* ..
+* .. External Functions ..
+ REAL SCABS1
+ EXTERNAL SCABS1
+* ..
+ IF (N.LE.0) RETURN
+ IF (SCABS1(CA).EQ.0.0E+0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments
+* not equal to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ CY(IY) = CY(IY) + CA*CX(IX)
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+*
+ 20 DO 30 I = 1,N
+ CY(I) = CY(I) + CA*CX(I)
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/ccopy.f b/components/cism/glimmer-cism/libglimmer-solve/blas/ccopy.f
new file mode 100644
index 0000000000..97e6a235de
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/ccopy.f
@@ -0,0 +1,46 @@
+ SUBROUTINE CCOPY(N,CX,INCX,CY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ COMPLEX CX(*),CY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* CCOPY copies a vector x to a vector y.
+*
+* Further Details
+* ===============
+*
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+* .. Local Scalars ..
+ INTEGER I,IX,IY
+* ..
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments
+* not equal to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ CY(IY) = CX(IX)
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+*
+ 20 DO 30 I = 1,N
+ CY(I) = CX(I)
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/cdotc.f b/components/cism/glimmer-cism/libglimmer-solve/blas/cdotc.f
new file mode 100644
index 0000000000..40b7748cb9
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/cdotc.f
@@ -0,0 +1,55 @@
+ COMPLEX FUNCTION CDOTC(N,CX,INCX,CY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ COMPLEX CX(*),CY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* forms the dot product of two vectors, conjugating the first
+* vector.
+*
+* Further Details
+* ===============
+*
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+* .. Local Scalars ..
+ COMPLEX CTEMP
+ INTEGER I,IX,IY
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC CONJG
+* ..
+ CTEMP = (0.0,0.0)
+ CDOTC = (0.0,0.0)
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments
+* not equal to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ CTEMP = CTEMP + CONJG(CX(IX))*CY(IY)
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ CDOTC = CTEMP
+ RETURN
+*
+* code for both increments equal to 1
+*
+ 20 DO 30 I = 1,N
+ CTEMP = CTEMP + CONJG(CX(I))*CY(I)
+ 30 CONTINUE
+ CDOTC = CTEMP
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/cdotu.f b/components/cism/glimmer-cism/libglimmer-solve/blas/cdotu.f
new file mode 100644
index 0000000000..529c0e264b
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/cdotu.f
@@ -0,0 +1,51 @@
+ COMPLEX FUNCTION CDOTU(N,CX,INCX,CY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ COMPLEX CX(*),CY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* CDOTU forms the dot product of two vectors.
+*
+* Further Details
+* ===============
+*
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+* .. Local Scalars ..
+ COMPLEX CTEMP
+ INTEGER I,IX,IY
+* ..
+ CTEMP = (0.0,0.0)
+ CDOTU = (0.0,0.0)
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments
+* not equal to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ CTEMP = CTEMP + CX(IX)*CY(IY)
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ CDOTU = CTEMP
+ RETURN
+*
+* code for both increments equal to 1
+*
+ 20 DO 30 I = 1,N
+ CTEMP = CTEMP + CX(I)*CY(I)
+ 30 CONTINUE
+ CDOTU = CTEMP
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/crotg.f b/components/cism/glimmer-cism/libglimmer-solve/blas/crotg.f
new file mode 100644
index 0000000000..2057a29807
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/crotg.f
@@ -0,0 +1,33 @@
+ SUBROUTINE CROTG(CA,CB,C,S)
+* .. Scalar Arguments ..
+ COMPLEX CA,CB,S
+ REAL C
+* ..
+*
+* Purpose
+* =======
+*
+* CROTG determines a complex Givens rotation.
+*
+* .. Local Scalars ..
+ COMPLEX ALPHA
+ REAL NORM,SCALE
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC CABS,CONJG,SQRT
+* ..
+ IF (CABS(CA).NE.0.) GO TO 10
+ C = 0.
+ S = (1.,0.)
+ CA = CB
+ GO TO 20
+ 10 CONTINUE
+ SCALE = CABS(CA) + CABS(CB)
+ NORM = SCALE*SQRT((CABS(CA/SCALE))**2+ (CABS(CB/SCALE))**2)
+ ALPHA = CA/CABS(CA)
+ C = CABS(CA)/NORM
+ S = ALPHA*CONJG(CB)/NORM
+ CA = ALPHA*NORM
+ 20 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/cscal.f b/components/cism/glimmer-cism/libglimmer-solve/blas/cscal.f
new file mode 100644
index 0000000000..3bcdff67b6
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/cscal.f
@@ -0,0 +1,39 @@
+ SUBROUTINE CSCAL(N,CA,CX,INCX)
+* .. Scalar Arguments ..
+ COMPLEX CA
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ COMPLEX CX(*)
+* ..
+*
+* Purpose
+* =======
+*
+* scales a vector by a constant.
+* jack dongarra, linpack, 3/11/78.
+* modified 3/93 to return if incx .le. 0.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ INTEGER I,NINCX
+* ..
+ IF (N.LE.0 .OR. INCX.LE.0) RETURN
+ IF (INCX.EQ.1) GO TO 20
+*
+* code for increment not equal to 1
+*
+ NINCX = N*INCX
+ DO 10 I = 1,NINCX,INCX
+ CX(I) = CA*CX(I)
+ 10 CONTINUE
+ RETURN
+*
+* code for increment equal to 1
+*
+ 20 DO 30 I = 1,N
+ CX(I) = CA*CX(I)
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/csscal.f b/components/cism/glimmer-cism/libglimmer-solve/blas/csscal.f
new file mode 100644
index 0000000000..1bc2b60904
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/csscal.f
@@ -0,0 +1,42 @@
+ SUBROUTINE CSSCAL(N,SA,CX,INCX)
+* .. Scalar Arguments ..
+ REAL SA
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ COMPLEX CX(*)
+* ..
+*
+* Purpose
+* =======
+*
+* scales a complex vector by a real constant.
+* jack dongarra, linpack, 3/11/78.
+* modified 3/93 to return if incx .le. 0.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ INTEGER I,NINCX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC AIMAG,CMPLX,REAL
+* ..
+ IF (N.LE.0 .OR. INCX.LE.0) RETURN
+ IF (INCX.EQ.1) GO TO 20
+*
+* code for increment not equal to 1
+*
+ NINCX = N*INCX
+ DO 10 I = 1,NINCX,INCX
+ CX(I) = CMPLX(SA*REAL(CX(I)),SA*AIMAG(CX(I)))
+ 10 CONTINUE
+ RETURN
+*
+* code for increment equal to 1
+*
+ 20 DO 30 I = 1,N
+ CX(I) = CMPLX(SA*REAL(CX(I)),SA*AIMAG(CX(I)))
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/cswap.f b/components/cism/glimmer-cism/libglimmer-solve/blas/cswap.f
new file mode 100644
index 0000000000..4a2b33bf0e
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/cswap.f
@@ -0,0 +1,47 @@
+ SUBROUTINE CSWAP(N,CX,INCX,CY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ COMPLEX CX(*),CY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* interchanges two vectors.
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ COMPLEX CTEMP
+ INTEGER I,IX,IY
+* ..
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments not equal
+* to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ CTEMP = CX(IX)
+ CX(IX) = CY(IY)
+ CY(IY) = CTEMP
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+ 20 DO 30 I = 1,N
+ CTEMP = CX(I)
+ CX(I) = CY(I)
+ CY(I) = CTEMP
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/dasum.f b/components/cism/glimmer-cism/libglimmer-solve/blas/dasum.f
new file mode 100644
index 0000000000..def066cc7f
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/dasum.f
@@ -0,0 +1,57 @@
+ DOUBLE PRECISION FUNCTION DASUM(N,DX,INCX)
+* .. Scalar Arguments ..
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION DX(*)
+* ..
+*
+* Purpose
+* =======
+*
+* takes the sum of the absolute values.
+* jack dongarra, linpack, 3/11/78.
+* modified 3/93 to return if incx .le. 0.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ DOUBLE PRECISION DTEMP
+ INTEGER I,M,MP1,NINCX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC DABS,MOD
+* ..
+ DASUM = 0.0d0
+ DTEMP = 0.0d0
+ IF (N.LE.0 .OR. INCX.LE.0) RETURN
+ IF (INCX.EQ.1) GO TO 20
+*
+* code for increment not equal to 1
+*
+ NINCX = N*INCX
+ DO 10 I = 1,NINCX,INCX
+ DTEMP = DTEMP + DABS(DX(I))
+ 10 CONTINUE
+ DASUM = DTEMP
+ RETURN
+*
+* code for increment equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,6)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ DTEMP = DTEMP + DABS(DX(I))
+ 30 CONTINUE
+ IF (N.LT.6) GO TO 60
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,6
+ DTEMP = DTEMP + DABS(DX(I)) + DABS(DX(I+1)) + DABS(DX(I+2)) +
+ + DABS(DX(I+3)) + DABS(DX(I+4)) + DABS(DX(I+5))
+ 50 CONTINUE
+ 60 DASUM = DTEMP
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/daxpy.f b/components/cism/glimmer-cism/libglimmer-solve/blas/daxpy.f
new file mode 100644
index 0000000000..ceac8cc515
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/daxpy.f
@@ -0,0 +1,62 @@
+ SUBROUTINE DAXPY(N,DA,DX,INCX,DY,INCY)
+* .. Scalar Arguments ..
+ DOUBLE PRECISION DA
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION DX(*),DY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* constant times a vector plus a vector.
+* uses unrolled loops for increments equal to one.
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ INTEGER I,IX,IY,M,MP1
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC MOD
+* ..
+ IF (N.LE.0) RETURN
+ IF (DA.EQ.0.0d0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments
+* not equal to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ DY(IY) = DY(IY) + DA*DX(IX)
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,4)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ DY(I) = DY(I) + DA*DX(I)
+ 30 CONTINUE
+ IF (N.LT.4) RETURN
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,4
+ DY(I) = DY(I) + DA*DX(I)
+ DY(I+1) = DY(I+1) + DA*DX(I+1)
+ DY(I+2) = DY(I+2) + DA*DX(I+2)
+ DY(I+3) = DY(I+3) + DA*DX(I+3)
+ 50 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/dcopy.f b/components/cism/glimmer-cism/libglimmer-solve/blas/dcopy.f
new file mode 100644
index 0000000000..f2305ebc03
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/dcopy.f
@@ -0,0 +1,63 @@
+ SUBROUTINE DCOPY(N,DX,INCX,DY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION DX(*),DY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* copies a vector, x, to a vector, y.
+* uses unrolled loops for increments equal to one.
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ INTEGER I,IX,IY,M,MP1
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC MOD
+* ..
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments
+* not equal to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ DY(IY) = DX(IX)
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,7)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ DY(I) = DX(I)
+ 30 CONTINUE
+ IF (N.LT.7) RETURN
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,7
+ DY(I) = DX(I)
+ DY(I+1) = DX(I+1)
+ DY(I+2) = DX(I+2)
+ DY(I+3) = DX(I+3)
+ DY(I+4) = DX(I+4)
+ DY(I+5) = DX(I+5)
+ DY(I+6) = DX(I+6)
+ 50 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/ddot.f b/components/cism/glimmer-cism/libglimmer-solve/blas/ddot.f
new file mode 100644
index 0000000000..582ae64931
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/ddot.f
@@ -0,0 +1,63 @@
+ DOUBLE PRECISION FUNCTION DDOT(N,DX,INCX,DY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION DX(*),DY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* forms the dot product of two vectors.
+* uses unrolled loops for increments equal to one.
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ DOUBLE PRECISION DTEMP
+ INTEGER I,IX,IY,M,MP1
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC MOD
+* ..
+ DDOT = 0.0d0
+ DTEMP = 0.0d0
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments
+* not equal to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ DTEMP = DTEMP + DX(IX)*DY(IY)
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ DDOT = DTEMP
+ RETURN
+*
+* code for both increments equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,5)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ DTEMP = DTEMP + DX(I)*DY(I)
+ 30 CONTINUE
+ IF (N.LT.5) GO TO 60
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,5
+ DTEMP = DTEMP + DX(I)*DY(I) + DX(I+1)*DY(I+1) +
+ + DX(I+2)*DY(I+2) + DX(I+3)*DY(I+3) + DX(I+4)*DY(I+4)
+ 50 CONTINUE
+ 60 DDOT = DTEMP
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/dnrm2.f b/components/cism/glimmer-cism/libglimmer-solve/blas/dnrm2.f
new file mode 100644
index 0000000000..6102c3e4d7
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/dnrm2.f
@@ -0,0 +1,64 @@
+ DOUBLE PRECISION FUNCTION DNRM2(N,X,INCX)
+* .. Scalar Arguments ..
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION X(*)
+* ..
+*
+* Purpose
+* =======
+*
+* DNRM2 returns the euclidean norm of a vector via the function
+* name, so that
+*
+* DNRM2 := sqrt( x'*x )
+*
+*
+* -- This version written on 25-October-1982.
+* Modified on 14-October-1993 to inline the call to DLASSQ.
+* Sven Hammarling, Nag Ltd.
+*
+*
+* .. Parameters ..
+ DOUBLE PRECISION ONE,ZERO
+ PARAMETER (ONE=1.0D+0,ZERO=0.0D+0)
+* ..
+* .. Local Scalars ..
+ DOUBLE PRECISION ABSXI,NORM,SCALE,SSQ
+ INTEGER IX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC ABS,SQRT
+* ..
+ IF (N.LT.1 .OR. INCX.LT.1) THEN
+ NORM = ZERO
+ ELSE IF (N.EQ.1) THEN
+ NORM = ABS(X(1))
+ ELSE
+ SCALE = ZERO
+ SSQ = ONE
+* The following loop is equivalent to this call to the LAPACK
+* auxiliary routine:
+* CALL DLASSQ( N, X, INCX, SCALE, SSQ )
+*
+ DO 10 IX = 1,1 + (N-1)*INCX,INCX
+ IF (X(IX).NE.ZERO) THEN
+ ABSXI = ABS(X(IX))
+ IF (SCALE.LT.ABSXI) THEN
+ SSQ = ONE + SSQ* (SCALE/ABSXI)**2
+ SCALE = ABSXI
+ ELSE
+ SSQ = SSQ + (ABSXI/SCALE)**2
+ END IF
+ END IF
+ 10 CONTINUE
+ NORM = SCALE*SQRT(SSQ)
+ END IF
+*
+ DNRM2 = NORM
+ RETURN
+*
+* End of DNRM2.
+*
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/drot.f b/components/cism/glimmer-cism/libglimmer-solve/blas/drot.f
new file mode 100644
index 0000000000..adaa88f6c4
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/drot.f
@@ -0,0 +1,49 @@
+ SUBROUTINE DROT(N,DX,INCX,DY,INCY,C,S)
+* .. Scalar Arguments ..
+ DOUBLE PRECISION C,S
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION DX(*),DY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* applies a plane rotation.
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ DOUBLE PRECISION DTEMP
+ INTEGER I,IX,IY
+* ..
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments not equal
+* to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ DTEMP = C*DX(IX) + S*DY(IY)
+ DY(IY) = C*DY(IY) - S*DX(IX)
+ DX(IX) = DTEMP
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+*
+ 20 DO 30 I = 1,N
+ DTEMP = C*DX(I) + S*DY(I)
+ DY(I) = C*DY(I) - S*DX(I)
+ DX(I) = DTEMP
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/drotg.f b/components/cism/glimmer-cism/libglimmer-solve/blas/drotg.f
new file mode 100644
index 0000000000..e50dd648ab
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/drotg.f
@@ -0,0 +1,38 @@
+ SUBROUTINE DROTG(DA,DB,C,S)
+* .. Scalar Arguments ..
+ DOUBLE PRECISION C,DA,DB,S
+* ..
+*
+* Purpose
+* =======
+*
+* construct givens plane rotation.
+* jack dongarra, linpack, 3/11/78.
+*
+*
+* .. Local Scalars ..
+ DOUBLE PRECISION R,ROE,SCALE,Z
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC DABS,DSIGN,DSQRT
+* ..
+ ROE = DB
+ IF (DABS(DA).GT.DABS(DB)) ROE = DA
+ SCALE = DABS(DA) + DABS(DB)
+ IF (SCALE.NE.0.0d0) GO TO 10
+ C = 1.0d0
+ S = 0.0d0
+ R = 0.0d0
+ Z = 0.0d0
+ GO TO 20
+ 10 R = SCALE*DSQRT((DA/SCALE)**2+ (DB/SCALE)**2)
+ R = DSIGN(1.0d0,ROE)*R
+ C = DA/R
+ S = DB/R
+ Z = 1.0d0
+ IF (DABS(DA).GT.DABS(DB)) Z = S
+ IF (DABS(DB).GE.DABS(DA) .AND. C.NE.0.0d0) Z = 1.0d0/C
+ 20 DA = R
+ DB = Z
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/drotm.f b/components/cism/glimmer-cism/libglimmer-solve/blas/drotm.f
new file mode 100644
index 0000000000..28cf21372a
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/drotm.f
@@ -0,0 +1,147 @@
+ SUBROUTINE DROTM(N,DX,INCX,DY,INCY,DPARAM)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION DPARAM(5),DX(1),DY(1)
+* ..
+*
+* Purpose
+* =======
+*
+* APPLY THE MODIFIED GIVENS TRANSFORMATION, H, TO THE 2 BY N MATRIX
+*
+* (DX**T) , WHERE **T INDICATES TRANSPOSE. THE ELEMENTS OF DX ARE IN
+* (DY**T)
+*
+* DX(LX+I*INCX), I = 0 TO N-1, WHERE LX = 1 IF INCX .GE. 0, ELSE
+* LX = (-INCX)*N, AND SIMILARLY FOR SY USING LY AND INCY.
+* WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS..
+*
+* DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0
+*
+* (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0)
+* H=( ) ( ) ( ) ( )
+* (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0).
+* SEE DROTMG FOR A DESCRIPTION OF DATA STORAGE IN DPARAM.
+*
+* Arguments
+* =========
+*
+* N (input) INTEGER
+* number of elements in input vector(s)
+*
+* DX (input/output) DOUBLE PRECISION array, dimension N
+* double precision vector with 5 elements
+*
+* INCX (input) INTEGER
+* storage spacing between elements of DX
+*
+* DY (input/output) DOUBLE PRECISION array, dimension N
+* double precision vector with N elements
+*
+* INCY (input) INTEGER
+* storage spacing between elements of DY
+*
+* DPARAM (input/output) DOUBLE PRECISION array, dimension 5
+* DPARAM(1)=DFLAG
+* DPARAM(2)=DH11
+* DPARAM(3)=DH21
+* DPARAM(4)=DH12
+* DPARAM(5)=DH22
+*
+* =====================================================================
+*
+* .. Local Scalars ..
+ DOUBLE PRECISION DFLAG,DH11,DH12,DH21,DH22,TWO,W,Z,ZERO
+ INTEGER I,KX,KY,NSTEPS
+* ..
+* .. Data statements ..
+ DATA ZERO,TWO/0.D0,2.D0/
+* ..
+*
+ DFLAG = DPARAM(1)
+ IF (N.LE.0 .OR. (DFLAG+TWO.EQ.ZERO)) GO TO 140
+ IF (.NOT. (INCX.EQ.INCY.AND.INCX.GT.0)) GO TO 70
+*
+ NSTEPS = N*INCX
+ IF (DFLAG) 50,10,30
+ 10 CONTINUE
+ DH12 = DPARAM(4)
+ DH21 = DPARAM(3)
+ DO 20 I = 1,NSTEPS,INCX
+ W = DX(I)
+ Z = DY(I)
+ DX(I) = W + Z*DH12
+ DY(I) = W*DH21 + Z
+ 20 CONTINUE
+ GO TO 140
+ 30 CONTINUE
+ DH11 = DPARAM(2)
+ DH22 = DPARAM(5)
+ DO 40 I = 1,NSTEPS,INCX
+ W = DX(I)
+ Z = DY(I)
+ DX(I) = W*DH11 + Z
+ DY(I) = -W + DH22*Z
+ 40 CONTINUE
+ GO TO 140
+ 50 CONTINUE
+ DH11 = DPARAM(2)
+ DH12 = DPARAM(4)
+ DH21 = DPARAM(3)
+ DH22 = DPARAM(5)
+ DO 60 I = 1,NSTEPS,INCX
+ W = DX(I)
+ Z = DY(I)
+ DX(I) = W*DH11 + Z*DH12
+ DY(I) = W*DH21 + Z*DH22
+ 60 CONTINUE
+ GO TO 140
+ 70 CONTINUE
+ KX = 1
+ KY = 1
+ IF (INCX.LT.0) KX = 1 + (1-N)*INCX
+ IF (INCY.LT.0) KY = 1 + (1-N)*INCY
+*
+ IF (DFLAG) 120,80,100
+ 80 CONTINUE
+ DH12 = DPARAM(4)
+ DH21 = DPARAM(3)
+ DO 90 I = 1,N
+ W = DX(KX)
+ Z = DY(KY)
+ DX(KX) = W + Z*DH12
+ DY(KY) = W*DH21 + Z
+ KX = KX + INCX
+ KY = KY + INCY
+ 90 CONTINUE
+ GO TO 140
+ 100 CONTINUE
+ DH11 = DPARAM(2)
+ DH22 = DPARAM(5)
+ DO 110 I = 1,N
+ W = DX(KX)
+ Z = DY(KY)
+ DX(KX) = W*DH11 + Z
+ DY(KY) = -W + DH22*Z
+ KX = KX + INCX
+ KY = KY + INCY
+ 110 CONTINUE
+ GO TO 140
+ 120 CONTINUE
+ DH11 = DPARAM(2)
+ DH12 = DPARAM(4)
+ DH21 = DPARAM(3)
+ DH22 = DPARAM(5)
+ DO 130 I = 1,N
+ W = DX(KX)
+ Z = DY(KY)
+ DX(KX) = W*DH11 + Z*DH12
+ DY(KY) = W*DH21 + Z*DH22
+ KX = KX + INCX
+ KY = KY + INCY
+ 130 CONTINUE
+ 140 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/drotmg.f b/components/cism/glimmer-cism/libglimmer-solve/blas/drotmg.f
new file mode 100644
index 0000000000..3ae647b087
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/drotmg.f
@@ -0,0 +1,206 @@
+ SUBROUTINE DROTMG(DD1,DD2,DX1,DY1,DPARAM)
+* .. Scalar Arguments ..
+ DOUBLE PRECISION DD1,DD2,DX1,DY1
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION DPARAM(5)
+* ..
+*
+* Purpose
+* =======
+*
+* CONSTRUCT THE MODIFIED GIVENS TRANSFORMATION MATRIX H WHICH ZEROS
+* THE SECOND COMPONENT OF THE 2-VECTOR (DSQRT(DD1)*DX1,DSQRT(DD2)*
+* DY2)**T.
+* WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS..
+*
+* DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0
+*
+* (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0)
+* H=( ) ( ) ( ) ( )
+* (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0).
+* LOCATIONS 2-4 OF DPARAM CONTAIN DH11, DH21, DH12, AND DH22
+* RESPECTIVELY. (VALUES OF 1.D0, -1.D0, OR 0.D0 IMPLIED BY THE
+* VALUE OF DPARAM(1) ARE NOT STORED IN DPARAM.)
+*
+* THE VALUES OF GAMSQ AND RGAMSQ SET IN THE DATA STATEMENT MAY BE
+* INEXACT. THIS IS OK AS THEY ARE ONLY USED FOR TESTING THE SIZE
+* OF DD1 AND DD2. ALL ACTUAL SCALING OF DATA IS DONE USING GAM.
+*
+*
+* Arguments
+* =========
+*
+* DD1 (input/output) DOUBLE PRECISION
+*
+* DD2 (input/output) DOUBLE PRECISION
+*
+* DX1 (input/output) DOUBLE PRECISION
+*
+* DY1 (input) DOUBLE PRECISION
+*
+* DPARAM (input/output) DOUBLE PRECISION array, dimension 5
+* DPARAM(1)=DFLAG
+* DPARAM(2)=DH11
+* DPARAM(3)=DH21
+* DPARAM(4)=DH12
+* DPARAM(5)=DH22
+*
+* =====================================================================
+*
+* .. Local Scalars ..
+ DOUBLE PRECISION DFLAG,DH11,DH12,DH21,DH22,DP1,DP2,DQ1,DQ2,DTEMP,
+ + DU,GAM,GAMSQ,ONE,RGAMSQ,TWO,ZERO
+ INTEGER IGO
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC DABS
+* ..
+* .. Data statements ..
+*
+ DATA ZERO,ONE,TWO/0.D0,1.D0,2.D0/
+ DATA GAM,GAMSQ,RGAMSQ/4096.D0,16777216.D0,5.9604645D-8/
+* ..
+
+ IF (.NOT.DD1.LT.ZERO) GO TO 10
+* GO ZERO-H-D-AND-DX1..
+ GO TO 60
+ 10 CONTINUE
+* CASE-DD1-NONNEGATIVE
+ DP2 = DD2*DY1
+ IF (.NOT.DP2.EQ.ZERO) GO TO 20
+ DFLAG = -TWO
+ GO TO 260
+* REGULAR-CASE..
+ 20 CONTINUE
+ DP1 = DD1*DX1
+ DQ2 = DP2*DY1
+ DQ1 = DP1*DX1
+*
+ IF (.NOT.DABS(DQ1).GT.DABS(DQ2)) GO TO 40
+ DH21 = -DY1/DX1
+ DH12 = DP2/DP1
+*
+ DU = ONE - DH12*DH21
+*
+ IF (.NOT.DU.LE.ZERO) GO TO 30
+* GO ZERO-H-D-AND-DX1..
+ GO TO 60
+ 30 CONTINUE
+ DFLAG = ZERO
+ DD1 = DD1/DU
+ DD2 = DD2/DU
+ DX1 = DX1*DU
+* GO SCALE-CHECK..
+ GO TO 100
+ 40 CONTINUE
+ IF (.NOT.DQ2.LT.ZERO) GO TO 50
+* GO ZERO-H-D-AND-DX1..
+ GO TO 60
+ 50 CONTINUE
+ DFLAG = ONE
+ DH11 = DP1/DP2
+ DH22 = DX1/DY1
+ DU = ONE + DH11*DH22
+ DTEMP = DD2/DU
+ DD2 = DD1/DU
+ DD1 = DTEMP
+ DX1 = DY1*DU
+* GO SCALE-CHECK
+ GO TO 100
+* PROCEDURE..ZERO-H-D-AND-DX1..
+ 60 CONTINUE
+ DFLAG = -ONE
+ DH11 = ZERO
+ DH12 = ZERO
+ DH21 = ZERO
+ DH22 = ZERO
+*
+ DD1 = ZERO
+ DD2 = ZERO
+ DX1 = ZERO
+* RETURN..
+ GO TO 220
+* PROCEDURE..FIX-H..
+ 70 CONTINUE
+ IF (.NOT.DFLAG.GE.ZERO) GO TO 90
+*
+ IF (.NOT.DFLAG.EQ.ZERO) GO TO 80
+ DH11 = ONE
+ DH22 = ONE
+ DFLAG = -ONE
+ GO TO 90
+ 80 CONTINUE
+ DH21 = -ONE
+ DH12 = ONE
+ DFLAG = -ONE
+ 90 CONTINUE
+ GO TO IGO(120,150,180,210)
+* PROCEDURE..SCALE-CHECK
+ 100 CONTINUE
+ 110 CONTINUE
+ IF (.NOT.DD1.LE.RGAMSQ) GO TO 130
+ IF (DD1.EQ.ZERO) GO TO 160
+ ASSIGN 120 TO IGO
+* FIX-H..
+ GO TO 70
+ 120 CONTINUE
+ DD1 = DD1*GAM**2
+ DX1 = DX1/GAM
+ DH11 = DH11/GAM
+ DH12 = DH12/GAM
+ GO TO 110
+ 130 CONTINUE
+ 140 CONTINUE
+ IF (.NOT.DD1.GE.GAMSQ) GO TO 160
+ ASSIGN 150 TO IGO
+* FIX-H..
+ GO TO 70
+ 150 CONTINUE
+ DD1 = DD1/GAM**2
+ DX1 = DX1*GAM
+ DH11 = DH11*GAM
+ DH12 = DH12*GAM
+ GO TO 140
+ 160 CONTINUE
+ 170 CONTINUE
+ IF (.NOT.DABS(DD2).LE.RGAMSQ) GO TO 190
+ IF (DD2.EQ.ZERO) GO TO 220
+ ASSIGN 180 TO IGO
+* FIX-H..
+ GO TO 70
+ 180 CONTINUE
+ DD2 = DD2*GAM**2
+ DH21 = DH21/GAM
+ DH22 = DH22/GAM
+ GO TO 170
+ 190 CONTINUE
+ 200 CONTINUE
+ IF (.NOT.DABS(DD2).GE.GAMSQ) GO TO 220
+ ASSIGN 210 TO IGO
+* FIX-H..
+ GO TO 70
+ 210 CONTINUE
+ DD2 = DD2/GAM**2
+ DH21 = DH21*GAM
+ DH22 = DH22*GAM
+ GO TO 200
+ 220 CONTINUE
+ IF (DFLAG) 250,230,240
+ 230 CONTINUE
+ DPARAM(3) = DH21
+ DPARAM(4) = DH12
+ GO TO 260
+ 240 CONTINUE
+ DPARAM(2) = DH11
+ DPARAM(5) = DH22
+ GO TO 260
+ 250 CONTINUE
+ DPARAM(2) = DH11
+ DPARAM(3) = DH21
+ DPARAM(4) = DH12
+ DPARAM(5) = DH22
+ 260 CONTINUE
+ DPARAM(1) = DFLAG
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/dscal.f b/components/cism/glimmer-cism/libglimmer-solve/blas/dscal.f
new file mode 100644
index 0000000000..0b423cf253
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/dscal.f
@@ -0,0 +1,57 @@
+ SUBROUTINE DSCAL(N,DA,DX,INCX)
+* .. Scalar Arguments ..
+ DOUBLE PRECISION DA
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION DX(*)
+* ..
+*
+* Purpose
+* =======
+**
+* scales a vector by a constant.
+* uses unrolled loops for increment equal to one.
+* jack dongarra, linpack, 3/11/78.
+* modified 3/93 to return if incx .le. 0.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ INTEGER I,M,MP1,NINCX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC MOD
+* ..
+ IF (N.LE.0 .OR. INCX.LE.0) RETURN
+ IF (INCX.EQ.1) GO TO 20
+*
+* code for increment not equal to 1
+*
+ NINCX = N*INCX
+ DO 10 I = 1,NINCX,INCX
+ DX(I) = DA*DX(I)
+ 10 CONTINUE
+ RETURN
+*
+* code for increment equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,5)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ DX(I) = DA*DX(I)
+ 30 CONTINUE
+ IF (N.LT.5) RETURN
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,5
+ DX(I) = DA*DX(I)
+ DX(I+1) = DA*DX(I+1)
+ DX(I+2) = DA*DX(I+2)
+ DX(I+3) = DA*DX(I+3)
+ DX(I+4) = DA*DX(I+4)
+ 50 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/dsdot.f b/components/cism/glimmer-cism/libglimmer-solve/blas/dsdot.f
new file mode 100644
index 0000000000..4845123ba0
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/dsdot.f
@@ -0,0 +1,96 @@
+ DOUBLE PRECISION FUNCTION DSDOT(N,SX,INCX,SY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ REAL SX(*),SY(*)
+* ..
+*
+* AUTHORS
+* =======
+* Lawson, C. L., (JPL), Hanson, R. J., (SNLA),
+* Kincaid, D. R., (U. of Texas), Krogh, F. T., (JPL)
+*
+* Purpose
+* =======
+* Compute the inner product of two vectors with extended
+* precision accumulation and result.
+*
+* Returns D.P. dot product accumulated in D.P., for S.P. SX and SY
+* DSDOT = sum for I = 0 to N-1 of SX(LX+I*INCX) * SY(LY+I*INCY),
+* where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is
+* defined in a similar way using INCY.
+*
+* Arguments
+* =========
+*
+* N (input) INTEGER
+* number of elements in input vector(s)
+*
+* SX (input) REAL array, dimension(N)
+* single precision vector with N elements
+*
+* INCX (input) INTEGER
+* storage spacing between elements of SX
+*
+* SY (input) REAL array, dimension(N)
+* single precision vector with N elements
+*
+* INCY (input) INTEGER
+* storage spacing between elements of SY
+*
+* DSDOT (output) DOUBLE PRECISION
+* DSDOT double precision dot product (zero if N.LE.0)
+*
+* REFERENCES
+* ==========
+*
+* C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T.
+* Krogh, Basic linear algebra subprograms for Fortran
+* usage, Algorithm No. 539, Transactions on Mathematical
+* Software 5, 3 (September 1979), pp. 308-323.
+*
+* REVISION HISTORY (YYMMDD)
+* ==========================
+*
+* 791001 DATE WRITTEN
+* 890831 Modified array declarations. (WRB)
+* 890831 REVISION DATE from Version 3.2
+* 891214 Prologue converted to Version 4.0 format. (BAB)
+* 920310 Corrected definition of LX in DESCRIPTION. (WRB)
+* 920501 Reformatted the REFERENCES section. (WRB)
+* 070118 Reformat to LAPACK style (JL)
+*
+* =====================================================================
+*
+* .. Local Scalars ..
+ INTEGER I,KX,KY,NS
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC DBLE
+* ..
+ DSDOT = 0.0D0
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.INCY .AND. INCX.GT.0) GO TO 20
+*
+* Code for unequal or nonpositive increments.
+*
+ KX = 1
+ KY = 1
+ IF (INCX.LT.0) KX = 1 + (1-N)*INCX
+ IF (INCY.LT.0) KY = 1 + (1-N)*INCY
+ DO 10 I = 1,N
+ DSDOT = DSDOT + DBLE(SX(KX))*DBLE(SY(KY))
+ KX = KX + INCX
+ KY = KY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* Code for equal, positive, non-unit increments.
+*
+ 20 NS = N*INCX
+ DO 30 I = 1,NS,INCX
+ DSDOT = DSDOT + DBLE(SX(I))*DBLE(SY(I))
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/dswap.f b/components/cism/glimmer-cism/libglimmer-solve/blas/dswap.f
new file mode 100644
index 0000000000..79c123b6dd
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/dswap.f
@@ -0,0 +1,70 @@
+ SUBROUTINE DSWAP(N,DX,INCX,DY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION DX(*),DY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* interchanges two vectors.
+* uses unrolled loops for increments equal one.
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ DOUBLE PRECISION DTEMP
+ INTEGER I,IX,IY,M,MP1
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC MOD
+* ..
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments not equal
+* to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ DTEMP = DX(IX)
+ DX(IX) = DY(IY)
+ DY(IY) = DTEMP
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,3)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ DTEMP = DX(I)
+ DX(I) = DY(I)
+ DY(I) = DTEMP
+ 30 CONTINUE
+ IF (N.LT.3) RETURN
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,3
+ DTEMP = DX(I)
+ DX(I) = DY(I)
+ DY(I) = DTEMP
+ DTEMP = DX(I+1)
+ DX(I+1) = DY(I+1)
+ DY(I+1) = DTEMP
+ DTEMP = DX(I+2)
+ DX(I+2) = DY(I+2)
+ DY(I+2) = DTEMP
+ 50 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/icamax.f b/components/cism/glimmer-cism/libglimmer-solve/blas/icamax.f
new file mode 100644
index 0000000000..9a6afc1753
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/icamax.f
@@ -0,0 +1,54 @@
+ INTEGER FUNCTION ICAMAX(N,CX,INCX)
+* .. Scalar Arguments ..
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ COMPLEX CX(*)
+* ..
+*
+* Purpose
+* =======
+*
+* finds the index of element having max. absolute value.
+* jack dongarra, linpack, 3/11/78.
+* modified 3/93 to return if incx .le. 0.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ REAL SMAX
+ INTEGER I,IX
+* ..
+* .. External Functions ..
+ REAL SCABS1
+ EXTERNAL SCABS1
+* ..
+ ICAMAX = 0
+ IF (N.LT.1 .OR. INCX.LE.0) RETURN
+ ICAMAX = 1
+ IF (N.EQ.1) RETURN
+ IF (INCX.EQ.1) GO TO 20
+*
+* code for increment not equal to 1
+*
+ IX = 1
+ SMAX = SCABS1(CX(1))
+ IX = IX + INCX
+ DO 10 I = 2,N
+ IF (SCABS1(CX(IX)).LE.SMAX) GO TO 5
+ ICAMAX = I
+ SMAX = SCABS1(CX(IX))
+ 5 IX = IX + INCX
+ 10 CONTINUE
+ RETURN
+*
+* code for increment equal to 1
+*
+ 20 SMAX = SCABS1(CX(1))
+ DO 30 I = 2,N
+ IF (SCABS1(CX(I)).LE.SMAX) GO TO 30
+ ICAMAX = I
+ SMAX = SCABS1(CX(I))
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/idamax.f b/components/cism/glimmer-cism/libglimmer-solve/blas/idamax.f
new file mode 100644
index 0000000000..44729fe48e
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/idamax.f
@@ -0,0 +1,53 @@
+ INTEGER FUNCTION IDAMAX(N,DX,INCX)
+* .. Scalar Arguments ..
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION DX(*)
+* ..
+*
+* Purpose
+* =======
+*
+* finds the index of element having max. absolute value.
+* jack dongarra, linpack, 3/11/78.
+* modified 3/93 to return if incx .le. 0.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ DOUBLE PRECISION DMAX
+ INTEGER I,IX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC DABS
+* ..
+ IDAMAX = 0
+ IF (N.LT.1 .OR. INCX.LE.0) RETURN
+ IDAMAX = 1
+ IF (N.EQ.1) RETURN
+ IF (INCX.EQ.1) GO TO 20
+*
+* code for increment not equal to 1
+*
+ IX = 1
+ DMAX = DABS(DX(1))
+ IX = IX + INCX
+ DO 10 I = 2,N
+ IF (DABS(DX(IX)).LE.DMAX) GO TO 5
+ IDAMAX = I
+ DMAX = DABS(DX(IX))
+ 5 IX = IX + INCX
+ 10 CONTINUE
+ RETURN
+*
+* code for increment equal to 1
+*
+ 20 DMAX = DABS(DX(1))
+ DO 30 I = 2,N
+ IF (DABS(DX(I)).LE.DMAX) GO TO 30
+ IDAMAX = I
+ DMAX = DABS(DX(I))
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/isamax.f b/components/cism/glimmer-cism/libglimmer-solve/blas/isamax.f
new file mode 100644
index 0000000000..f6fd31210d
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/isamax.f
@@ -0,0 +1,53 @@
+ INTEGER FUNCTION ISAMAX(N,SX,INCX)
+* .. Scalar Arguments ..
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ REAL SX(*)
+* ..
+*
+* Purpose
+* =======
+*
+* finds the index of element having max. absolute value.
+* jack dongarra, linpack, 3/11/78.
+* modified 3/93 to return if incx .le. 0.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ REAL SMAX
+ INTEGER I,IX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC ABS
+* ..
+ ISAMAX = 0
+ IF (N.LT.1 .OR. INCX.LE.0) RETURN
+ ISAMAX = 1
+ IF (N.EQ.1) RETURN
+ IF (INCX.EQ.1) GO TO 20
+*
+* code for increment not equal to 1
+*
+ IX = 1
+ SMAX = ABS(SX(1))
+ IX = IX + INCX
+ DO 10 I = 2,N
+ IF (ABS(SX(IX)).LE.SMAX) GO TO 5
+ ISAMAX = I
+ SMAX = ABS(SX(IX))
+ 5 IX = IX + INCX
+ 10 CONTINUE
+ RETURN
+*
+* code for increment equal to 1
+*
+ 20 SMAX = ABS(SX(1))
+ DO 30 I = 2,N
+ IF (ABS(SX(I)).LE.SMAX) GO TO 30
+ ISAMAX = I
+ SMAX = ABS(SX(I))
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/sasum.f b/components/cism/glimmer-cism/libglimmer-solve/blas/sasum.f
new file mode 100644
index 0000000000..0677ba47aa
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/sasum.f
@@ -0,0 +1,59 @@
+ REAL FUNCTION SASUM(N,SX,INCX)
+* .. Scalar Arguments ..
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ REAL SX(*)
+* ..
+*
+* Purpose
+* =======
+*
+* takes the sum of the absolute values.
+* uses unrolled loops for increment equal to one.
+* jack dongarra, linpack, 3/11/78.
+* modified 3/93 to return if incx .le. 0.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+
+* .. Local Scalars ..
+ REAL STEMP
+ INTEGER I,M,MP1,NINCX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC ABS,MOD
+* ..
+ SASUM = 0.0e0
+ STEMP = 0.0e0
+ IF (N.LE.0 .OR. INCX.LE.0) RETURN
+ IF (INCX.EQ.1) GO TO 20
+*
+* code for increment not equal to 1
+*
+ NINCX = N*INCX
+ DO 10 I = 1,NINCX,INCX
+ STEMP = STEMP + ABS(SX(I))
+ 10 CONTINUE
+ SASUM = STEMP
+ RETURN
+*
+* code for increment equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,6)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ STEMP = STEMP + ABS(SX(I))
+ 30 CONTINUE
+ IF (N.LT.6) GO TO 60
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,6
+ STEMP = STEMP + ABS(SX(I)) + ABS(SX(I+1)) + ABS(SX(I+2)) +
+ + ABS(SX(I+3)) + ABS(SX(I+4)) + ABS(SX(I+5))
+ 50 CONTINUE
+ 60 SASUM = STEMP
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/saxpy.f b/components/cism/glimmer-cism/libglimmer-solve/blas/saxpy.f
new file mode 100644
index 0000000000..6241a71d1b
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/saxpy.f
@@ -0,0 +1,62 @@
+ SUBROUTINE SAXPY(N,SA,SX,INCX,SY,INCY)
+* .. Scalar Arguments ..
+ REAL SA
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ REAL SX(*),SY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* SAXPY constant times a vector plus a vector.
+* uses unrolled loop for increments equal to one.
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ INTEGER I,IX,IY,M,MP1
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC MOD
+* ..
+ IF (N.LE.0) RETURN
+ IF (SA.EQ.0.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments
+* not equal to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ SY(IY) = SY(IY) + SA*SX(IX)
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,4)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ SY(I) = SY(I) + SA*SX(I)
+ 30 CONTINUE
+ IF (N.LT.4) RETURN
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,4
+ SY(I) = SY(I) + SA*SX(I)
+ SY(I+1) = SY(I+1) + SA*SX(I+1)
+ SY(I+2) = SY(I+2) + SA*SX(I+2)
+ SY(I+3) = SY(I+3) + SA*SX(I+3)
+ 50 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/scasum.f b/components/cism/glimmer-cism/libglimmer-solve/blas/scasum.f
new file mode 100644
index 0000000000..5a4abfa97d
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/scasum.f
@@ -0,0 +1,47 @@
+ REAL FUNCTION SCASUM(N,CX,INCX)
+* .. Scalar Arguments ..
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ COMPLEX CX(*)
+* ..
+*
+* Purpose
+* =======
+*
+* takes the sum of the absolute values of a complex vector and
+* returns a single precision result.
+* jack dongarra, linpack, 3/11/78.
+* modified 3/93 to return if incx .le. 0.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ REAL STEMP
+ INTEGER I,NINCX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC ABS,AIMAG,REAL
+* ..
+ SCASUM = 0.0e0
+ STEMP = 0.0e0
+ IF (N.LE.0 .OR. INCX.LE.0) RETURN
+ IF (INCX.EQ.1) GO TO 20
+*
+* code for increment not equal to 1
+*
+ NINCX = N*INCX
+ DO 10 I = 1,NINCX,INCX
+ STEMP = STEMP + ABS(REAL(CX(I))) + ABS(AIMAG(CX(I)))
+ 10 CONTINUE
+ SCASUM = STEMP
+ RETURN
+*
+* code for increment equal to 1
+*
+ 20 DO 30 I = 1,N
+ STEMP = STEMP + ABS(REAL(CX(I))) + ABS(AIMAG(CX(I)))
+ 30 CONTINUE
+ SCASUM = STEMP
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/scnrm2.f b/components/cism/glimmer-cism/libglimmer-solve/blas/scnrm2.f
new file mode 100644
index 0000000000..160e2c4151
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/scnrm2.f
@@ -0,0 +1,72 @@
+ REAL FUNCTION SCNRM2(N,X,INCX)
+* .. Scalar Arguments ..
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ COMPLEX X(*)
+* ..
+*
+* Purpose
+* =======
+*
+* SCNRM2 returns the euclidean norm of a vector via the function
+* name, so that
+*
+* SCNRM2 := sqrt( conjg( x' )*x )
+*
+*
+*
+* -- This version written on 25-October-1982.
+* Modified on 14-October-1993 to inline the call to CLASSQ.
+* Sven Hammarling, Nag Ltd.
+*
+*
+* .. Parameters ..
+ REAL ONE,ZERO
+ PARAMETER (ONE=1.0E+0,ZERO=0.0E+0)
+* ..
+* .. Local Scalars ..
+ REAL NORM,SCALE,SSQ,TEMP
+ INTEGER IX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC ABS,AIMAG,REAL,SQRT
+* ..
+ IF (N.LT.1 .OR. INCX.LT.1) THEN
+ NORM = ZERO
+ ELSE
+ SCALE = ZERO
+ SSQ = ONE
+* The following loop is equivalent to this call to the LAPACK
+* auxiliary routine:
+* CALL CLASSQ( N, X, INCX, SCALE, SSQ )
+*
+ DO 10 IX = 1,1 + (N-1)*INCX,INCX
+ IF (REAL(X(IX)).NE.ZERO) THEN
+ TEMP = ABS(REAL(X(IX)))
+ IF (SCALE.LT.TEMP) THEN
+ SSQ = ONE + SSQ* (SCALE/TEMP)**2
+ SCALE = TEMP
+ ELSE
+ SSQ = SSQ + (TEMP/SCALE)**2
+ END IF
+ END IF
+ IF (AIMAG(X(IX)).NE.ZERO) THEN
+ TEMP = ABS(AIMAG(X(IX)))
+ IF (SCALE.LT.TEMP) THEN
+ SSQ = ONE + SSQ* (SCALE/TEMP)**2
+ SCALE = TEMP
+ ELSE
+ SSQ = SSQ + (TEMP/SCALE)**2
+ END IF
+ END IF
+ 10 CONTINUE
+ NORM = SCALE*SQRT(SSQ)
+ END IF
+*
+ SCNRM2 = NORM
+ RETURN
+*
+* End of SCNRM2.
+*
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/scopy.f b/components/cism/glimmer-cism/libglimmer-solve/blas/scopy.f
new file mode 100644
index 0000000000..ad04ee697e
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/scopy.f
@@ -0,0 +1,63 @@
+ SUBROUTINE SCOPY(N,SX,INCX,SY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ REAL SX(*),SY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* copies a vector, x, to a vector, y.
+* uses unrolled loops for increments equal to 1.
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ INTEGER I,IX,IY,M,MP1
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC MOD
+* ..
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments
+* not equal to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ SY(IY) = SX(IX)
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,7)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ SY(I) = SX(I)
+ 30 CONTINUE
+ IF (N.LT.7) RETURN
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,7
+ SY(I) = SX(I)
+ SY(I+1) = SX(I+1)
+ SY(I+2) = SX(I+2)
+ SY(I+3) = SX(I+3)
+ SY(I+4) = SX(I+4)
+ SY(I+5) = SX(I+5)
+ SY(I+6) = SX(I+6)
+ 50 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/sdot.f b/components/cism/glimmer-cism/libglimmer-solve/blas/sdot.f
new file mode 100644
index 0000000000..deebc348bc
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/sdot.f
@@ -0,0 +1,64 @@
+ REAL FUNCTION SDOT(N,SX,INCX,SY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ REAL SX(*),SY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* forms the dot product of two vectors.
+* uses unrolled loops for increments equal to one.
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+
+* .. Local Scalars ..
+ REAL STEMP
+ INTEGER I,IX,IY,M,MP1
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC MOD
+* ..
+ STEMP = 0.0e0
+ SDOT = 0.0e0
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments
+* not equal to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ STEMP = STEMP + SX(IX)*SY(IY)
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ SDOT = STEMP
+ RETURN
+*
+* code for both increments equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,5)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ STEMP = STEMP + SX(I)*SY(I)
+ 30 CONTINUE
+ IF (N.LT.5) GO TO 60
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,5
+ STEMP = STEMP + SX(I)*SY(I) + SX(I+1)*SY(I+1) +
+ + SX(I+2)*SY(I+2) + SX(I+3)*SY(I+3) + SX(I+4)*SY(I+4)
+ 50 CONTINUE
+ 60 SDOT = STEMP
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/sdsdot.f b/components/cism/glimmer-cism/libglimmer-solve/blas/sdsdot.f
new file mode 100644
index 0000000000..f6349a1426
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/sdsdot.f
@@ -0,0 +1,105 @@
+ REAL FUNCTION SDSDOT(N,SB,SX,INCX,SY,INCY)
+* .. Scalar Arguments ..
+ REAL SB
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ REAL SX(*),SY(*)
+* ..
+*
+* PURPOSE
+* =======
+*
+* Compute the inner product of two vectors with extended
+* precision accumulation.
+*
+* Returns S.P. result with dot product accumulated in D.P.
+* SDSDOT = SB + sum for I = 0 to N-1 of SX(LX+I*INCX)*SY(LY+I*INCY),
+* where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is
+* defined in a similar way using INCY.
+*
+* AUTHOR
+* ======
+* Lawson, C. L., (JPL), Hanson, R. J., (SNLA),
+* Kincaid, D. R., (U. of Texas), Krogh, F. T., (JPL)
+*
+* ARGUMENTS
+* =========
+*
+* N (input) INTEGER
+* number of elements in input vector(s)
+*
+* SB (input) REAL
+* single precision scalar to be added to inner product
+*
+* SX (input) REAL array, dimension (N)
+* single precision vector with N elements
+*
+* INCX (input) INTEGER
+* storage spacing between elements of SX
+*
+* SY (input) REAL array, dimension (N)
+* single precision vector with N elements
+*
+* INCY (input) INTEGER
+* storage spacing between elements of SY
+*
+* SDSDOT (output) REAL
+* single precision dot product (SB if N .LE. 0)
+*
+* REFERENCES
+* ==========
+*
+* C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T.
+* Krogh, Basic linear algebra subprograms for Fortran
+* usage, Algorithm No. 539, Transactions on Mathematical
+* Software 5, 3 (September 1979), pp. 308-323.
+*
+* REVISION HISTORY (YYMMDD)
+* ==========================
+*
+* 791001 DATE WRITTEN
+* 890531 Changed all specific intrinsics to generic. (WRB)
+* 890831 Modified array declarations. (WRB)
+* 890831 REVISION DATE from Version 3.2
+* 891214 Prologue converted to Version 4.0 format. (BAB)
+* 920310 Corrected definition of LX in DESCRIPTION. (WRB)
+* 920501 Reformatted the REFERENCES section. (WRB)
+* 070118 Reformat to LAPACK coding style
+*
+* =====================================================================
+*
+* .. Local Scalars ..
+ DOUBLE PRECISION DSDOT
+ INTEGER I,KX,KY,NS
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC DBLE
+* ..
+ DSDOT = SB
+ IF (N.LE.0) GO TO 30
+ IF (INCX.EQ.INCY .AND. INCX.GT.0) GO TO 40
+*
+* Code for unequal or nonpositive increments.
+*
+ KX = 1
+ KY = 1
+ IF (INCX.LT.0) KX = 1 + (1-N)*INCX
+ IF (INCY.LT.0) KY = 1 + (1-N)*INCY
+ DO 10 I = 1,N
+ DSDOT = DSDOT + DBLE(SX(KX))*DBLE(SY(KY))
+ KX = KX + INCX
+ KY = KY + INCY
+ 10 CONTINUE
+ 30 SDSDOT = DSDOT
+ RETURN
+*
+* Code for equal and positive increments.
+*
+ 40 NS = N*INCX
+ DO 50 I = 1,NS,INCX
+ DSDOT = DSDOT + DBLE(SX(I))*DBLE(SY(I))
+ 50 CONTINUE
+ SDSDOT = DSDOT
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/snrm2.f b/components/cism/glimmer-cism/libglimmer-solve/blas/snrm2.f
new file mode 100644
index 0000000000..fa54ba1022
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/snrm2.f
@@ -0,0 +1,66 @@
+ REAL FUNCTION SNRM2(N,X,INCX)
+* .. Scalar Arguments ..
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ REAL X(*)
+* ..
+*
+* Purpose
+* =======
+*
+* SNRM2 returns the euclidean norm of a vector via the function
+* name, so that
+*
+* SNRM2 := sqrt( x'*x ).
+*
+* Further Details
+* ===============
+*
+* -- This version written on 25-October-1982.
+* Modified on 14-October-1993 to inline the call to SLASSQ.
+* Sven Hammarling, Nag Ltd.
+*
+*
+* .. Parameters ..
+ REAL ONE,ZERO
+ PARAMETER (ONE=1.0E+0,ZERO=0.0E+0)
+* ..
+* .. Local Scalars ..
+ REAL ABSXI,NORM,SCALE,SSQ
+ INTEGER IX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC ABS,SQRT
+* ..
+ IF (N.LT.1 .OR. INCX.LT.1) THEN
+ NORM = ZERO
+ ELSE IF (N.EQ.1) THEN
+ NORM = ABS(X(1))
+ ELSE
+ SCALE = ZERO
+ SSQ = ONE
+* The following loop is equivalent to this call to the LAPACK
+* auxiliary routine:
+* CALL SLASSQ( N, X, INCX, SCALE, SSQ )
+*
+ DO 10 IX = 1,1 + (N-1)*INCX,INCX
+ IF (X(IX).NE.ZERO) THEN
+ ABSXI = ABS(X(IX))
+ IF (SCALE.LT.ABSXI) THEN
+ SSQ = ONE + SSQ* (SCALE/ABSXI)**2
+ SCALE = ABSXI
+ ELSE
+ SSQ = SSQ + (ABSXI/SCALE)**2
+ END IF
+ END IF
+ 10 CONTINUE
+ NORM = SCALE*SQRT(SSQ)
+ END IF
+*
+ SNRM2 = NORM
+ RETURN
+*
+* End of SNRM2.
+*
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/srot.f b/components/cism/glimmer-cism/libglimmer-solve/blas/srot.f
new file mode 100644
index 0000000000..e9f1cf711e
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/srot.f
@@ -0,0 +1,54 @@
+ SUBROUTINE SROT(N,SX,INCX,SY,INCY,C,S)
+* .. Scalar Arguments ..
+ REAL C,S
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ REAL SX(*),SY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* applies a plane rotation.
+*
+* Further Details
+* ===============
+*
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+
+* .. Local Scalars ..
+ REAL STEMP
+ INTEGER I,IX,IY
+* ..
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments not equal
+* to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ STEMP = C*SX(IX) + S*SY(IY)
+ SY(IY) = C*SY(IY) - S*SX(IX)
+ SX(IX) = STEMP
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+*
+ 20 DO 30 I = 1,N
+ STEMP = C*SX(I) + S*SY(I)
+ SY(I) = C*SY(I) - S*SX(I)
+ SX(I) = STEMP
+ 30 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/srotg.f b/components/cism/glimmer-cism/libglimmer-solve/blas/srotg.f
new file mode 100644
index 0000000000..2625bd589c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/srotg.f
@@ -0,0 +1,38 @@
+ SUBROUTINE SROTG(SA,SB,C,S)
+* .. Scalar Arguments ..
+ REAL C,S,SA,SB
+* ..
+*
+* Purpose
+* =======
+*
+* construct givens plane rotation.
+* jack dongarra, linpack, 3/11/78.
+*
+*
+* .. Local Scalars ..
+ REAL R,ROE,SCALE,Z
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC ABS,SIGN,SQRT
+* ..
+ ROE = SB
+ IF (ABS(SA).GT.ABS(SB)) ROE = SA
+ SCALE = ABS(SA) + ABS(SB)
+ IF (SCALE.NE.0.0) GO TO 10
+ C = 1.0
+ S = 0.0
+ R = 0.0
+ Z = 0.0
+ GO TO 20
+ 10 R = SCALE*SQRT((SA/SCALE)**2+ (SB/SCALE)**2)
+ R = SIGN(1.0,ROE)*R
+ C = SA/R
+ S = SB/R
+ Z = 1.0
+ IF (ABS(SA).GT.ABS(SB)) Z = S
+ IF (ABS(SB).GE.ABS(SA) .AND. C.NE.0.0) Z = 1.0/C
+ 20 SA = R
+ SB = Z
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/srotm.f b/components/cism/glimmer-cism/libglimmer-solve/blas/srotm.f
new file mode 100644
index 0000000000..3523f99f76
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/srotm.f
@@ -0,0 +1,148 @@
+ SUBROUTINE SROTM(N,SX,INCX,SY,INCY,SPARAM)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ REAL SPARAM(5),SX(1),SY(1)
+* ..
+*
+* Purpose
+* =======
+*
+* APPLY THE MODIFIED GIVENS TRANSFORMATION, H, TO THE 2 BY N MATRIX
+*
+* (SX**T) , WHERE **T INDICATES TRANSPOSE. THE ELEMENTS OF SX ARE IN
+* (DX**T)
+*
+* SX(LX+I*INCX), I = 0 TO N-1, WHERE LX = 1 IF INCX .GE. 0, ELSE
+* LX = (-INCX)*N, AND SIMILARLY FOR SY USING USING LY AND INCY.
+* WITH SPARAM(1)=SFLAG, H HAS ONE OF THE FOLLOWING FORMS..
+*
+* SFLAG=-1.E0 SFLAG=0.E0 SFLAG=1.E0 SFLAG=-2.E0
+*
+* (SH11 SH12) (1.E0 SH12) (SH11 1.E0) (1.E0 0.E0)
+* H=( ) ( ) ( ) ( )
+* (SH21 SH22), (SH21 1.E0), (-1.E0 SH22), (0.E0 1.E0).
+* SEE SROTMG FOR A DESCRIPTION OF DATA STORAGE IN SPARAM.
+*
+*
+* Arguments
+* =========
+*
+* N (input) INTEGER
+* number of elements in input vector(s)
+*
+* SX (input/output) REAL array, dimension N
+* double precision vector with 5 elements
+*
+* INCX (input) INTEGER
+* storage spacing between elements of SX
+*
+* SY (input/output) REAL array, dimension N
+* double precision vector with N elements
+*
+* INCY (input) INTEGER
+* storage spacing between elements of SY
+*
+* SPARAM (input/output) REAL array, dimension 5
+* SPARAM(1)=SFLAG
+* SPARAM(2)=SH11
+* SPARAM(3)=SH21
+* SPARAM(4)=SH12
+* SPARAM(5)=SH22
+*
+* =====================================================================
+*
+* .. Local Scalars ..
+ REAL SFLAG,SH11,SH12,SH21,SH22,TWO,W,Z,ZERO
+ INTEGER I,KX,KY,NSTEPS
+* ..
+* .. Data statements ..
+ DATA ZERO,TWO/0.E0,2.E0/
+* ..
+*
+ SFLAG = SPARAM(1)
+ IF (N.LE.0 .OR. (SFLAG+TWO.EQ.ZERO)) GO TO 140
+ IF (.NOT. (INCX.EQ.INCY.AND.INCX.GT.0)) GO TO 70
+*
+ NSTEPS = N*INCX
+ IF (SFLAG) 50,10,30
+ 10 CONTINUE
+ SH12 = SPARAM(4)
+ SH21 = SPARAM(3)
+ DO 20 I = 1,NSTEPS,INCX
+ W = SX(I)
+ Z = SY(I)
+ SX(I) = W + Z*SH12
+ SY(I) = W*SH21 + Z
+ 20 CONTINUE
+ GO TO 140
+ 30 CONTINUE
+ SH11 = SPARAM(2)
+ SH22 = SPARAM(5)
+ DO 40 I = 1,NSTEPS,INCX
+ W = SX(I)
+ Z = SY(I)
+ SX(I) = W*SH11 + Z
+ SY(I) = -W + SH22*Z
+ 40 CONTINUE
+ GO TO 140
+ 50 CONTINUE
+ SH11 = SPARAM(2)
+ SH12 = SPARAM(4)
+ SH21 = SPARAM(3)
+ SH22 = SPARAM(5)
+ DO 60 I = 1,NSTEPS,INCX
+ W = SX(I)
+ Z = SY(I)
+ SX(I) = W*SH11 + Z*SH12
+ SY(I) = W*SH21 + Z*SH22
+ 60 CONTINUE
+ GO TO 140
+ 70 CONTINUE
+ KX = 1
+ KY = 1
+ IF (INCX.LT.0) KX = 1 + (1-N)*INCX
+ IF (INCY.LT.0) KY = 1 + (1-N)*INCY
+*
+ IF (SFLAG) 120,80,100
+ 80 CONTINUE
+ SH12 = SPARAM(4)
+ SH21 = SPARAM(3)
+ DO 90 I = 1,N
+ W = SX(KX)
+ Z = SY(KY)
+ SX(KX) = W + Z*SH12
+ SY(KY) = W*SH21 + Z
+ KX = KX + INCX
+ KY = KY + INCY
+ 90 CONTINUE
+ GO TO 140
+ 100 CONTINUE
+ SH11 = SPARAM(2)
+ SH22 = SPARAM(5)
+ DO 110 I = 1,N
+ W = SX(KX)
+ Z = SY(KY)
+ SX(KX) = W*SH11 + Z
+ SY(KY) = -W + SH22*Z
+ KX = KX + INCX
+ KY = KY + INCY
+ 110 CONTINUE
+ GO TO 140
+ 120 CONTINUE
+ SH11 = SPARAM(2)
+ SH12 = SPARAM(4)
+ SH21 = SPARAM(3)
+ SH22 = SPARAM(5)
+ DO 130 I = 1,N
+ W = SX(KX)
+ Z = SY(KY)
+ SX(KX) = W*SH11 + Z*SH12
+ SY(KY) = W*SH21 + Z*SH22
+ KX = KX + INCX
+ KY = KY + INCY
+ 130 CONTINUE
+ 140 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/srotmg.f b/components/cism/glimmer-cism/libglimmer-solve/blas/srotmg.f
new file mode 100644
index 0000000000..7b3bd42728
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/srotmg.f
@@ -0,0 +1,208 @@
+ SUBROUTINE SROTMG(SD1,SD2,SX1,SY1,SPARAM)
+* .. Scalar Arguments ..
+ REAL SD1,SD2,SX1,SY1
+* ..
+* .. Array Arguments ..
+ REAL SPARAM(5)
+* ..
+*
+* Purpose
+* =======
+*
+* CONSTRUCT THE MODIFIED GIVENS TRANSFORMATION MATRIX H WHICH ZEROS
+* THE SECOND COMPONENT OF THE 2-VECTOR (SQRT(SD1)*SX1,SQRT(SD2)*
+* SY2)**T.
+* WITH SPARAM(1)=SFLAG, H HAS ONE OF THE FOLLOWING FORMS..
+*
+* SFLAG=-1.E0 SFLAG=0.E0 SFLAG=1.E0 SFLAG=-2.E0
+*
+* (SH11 SH12) (1.E0 SH12) (SH11 1.E0) (1.E0 0.E0)
+* H=( ) ( ) ( ) ( )
+* (SH21 SH22), (SH21 1.E0), (-1.E0 SH22), (0.E0 1.E0).
+* LOCATIONS 2-4 OF SPARAM CONTAIN SH11,SH21,SH12, AND SH22
+* RESPECTIVELY. (VALUES OF 1.E0, -1.E0, OR 0.E0 IMPLIED BY THE
+* VALUE OF SPARAM(1) ARE NOT STORED IN SPARAM.)
+*
+* THE VALUES OF GAMSQ AND RGAMSQ SET IN THE DATA STATEMENT MAY BE
+* INEXACT. THIS IS OK AS THEY ARE ONLY USED FOR TESTING THE SIZE
+* OF SD1 AND SD2. ALL ACTUAL SCALING OF DATA IS DONE USING GAM.
+*
+*
+* Arguments
+* =========
+*
+*
+* SD1 (input/output) REAL
+*
+* SD2 (input/output) REAL
+*
+* SX1 (input/output) REAL
+*
+* SY1 (input) REAL
+*
+*
+* SPARAM (input/output) REAL array, dimension 5
+* SPARAM(1)=SFLAG
+* SPARAM(2)=SH11
+* SPARAM(3)=SH21
+* SPARAM(4)=SH12
+* SPARAM(5)=SH22
+*
+* =====================================================================
+*
+* .. Local Scalars ..
+ REAL GAM,GAMSQ,ONE,RGAMSQ,SFLAG,SH11,SH12,SH21,SH22,SP1,SP2,SQ1,
+ + SQ2,STEMP,SU,TWO,ZERO
+ INTEGER IGO
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC ABS
+* ..
+* .. Data statements ..
+*
+ DATA ZERO,ONE,TWO/0.E0,1.E0,2.E0/
+ DATA GAM,GAMSQ,RGAMSQ/4096.E0,1.67772E7,5.96046E-8/
+* ..
+
+ IF (.NOT.SD1.LT.ZERO) GO TO 10
+* GO ZERO-H-D-AND-SX1..
+ GO TO 60
+ 10 CONTINUE
+* CASE-SD1-NONNEGATIVE
+ SP2 = SD2*SY1
+ IF (.NOT.SP2.EQ.ZERO) GO TO 20
+ SFLAG = -TWO
+ GO TO 260
+* REGULAR-CASE..
+ 20 CONTINUE
+ SP1 = SD1*SX1
+ SQ2 = SP2*SY1
+ SQ1 = SP1*SX1
+*
+ IF (.NOT.ABS(SQ1).GT.ABS(SQ2)) GO TO 40
+ SH21 = -SY1/SX1
+ SH12 = SP2/SP1
+*
+ SU = ONE - SH12*SH21
+*
+ IF (.NOT.SU.LE.ZERO) GO TO 30
+* GO ZERO-H-D-AND-SX1..
+ GO TO 60
+ 30 CONTINUE
+ SFLAG = ZERO
+ SD1 = SD1/SU
+ SD2 = SD2/SU
+ SX1 = SX1*SU
+* GO SCALE-CHECK..
+ GO TO 100
+ 40 CONTINUE
+ IF (.NOT.SQ2.LT.ZERO) GO TO 50
+* GO ZERO-H-D-AND-SX1..
+ GO TO 60
+ 50 CONTINUE
+ SFLAG = ONE
+ SH11 = SP1/SP2
+ SH22 = SX1/SY1
+ SU = ONE + SH11*SH22
+ STEMP = SD2/SU
+ SD2 = SD1/SU
+ SD1 = STEMP
+ SX1 = SY1*SU
+* GO SCALE-CHECK
+ GO TO 100
+* PROCEDURE..ZERO-H-D-AND-SX1..
+ 60 CONTINUE
+ SFLAG = -ONE
+ SH11 = ZERO
+ SH12 = ZERO
+ SH21 = ZERO
+ SH22 = ZERO
+*
+ SD1 = ZERO
+ SD2 = ZERO
+ SX1 = ZERO
+* RETURN..
+ GO TO 220
+* PROCEDURE..FIX-H..
+ 70 CONTINUE
+ IF (.NOT.SFLAG.GE.ZERO) GO TO 90
+*
+ IF (.NOT.SFLAG.EQ.ZERO) GO TO 80
+ SH11 = ONE
+ SH22 = ONE
+ SFLAG = -ONE
+ GO TO 90
+ 80 CONTINUE
+ SH21 = -ONE
+ SH12 = ONE
+ SFLAG = -ONE
+ 90 CONTINUE
+ GO TO IGO(120,150,180,210)
+* PROCEDURE..SCALE-CHECK
+ 100 CONTINUE
+ 110 CONTINUE
+ IF (.NOT.SD1.LE.RGAMSQ) GO TO 130
+ IF (SD1.EQ.ZERO) GO TO 160
+ ASSIGN 120 TO IGO
+* FIX-H..
+ GO TO 70
+ 120 CONTINUE
+ SD1 = SD1*GAM**2
+ SX1 = SX1/GAM
+ SH11 = SH11/GAM
+ SH12 = SH12/GAM
+ GO TO 110
+ 130 CONTINUE
+ 140 CONTINUE
+ IF (.NOT.SD1.GE.GAMSQ) GO TO 160
+ ASSIGN 150 TO IGO
+* FIX-H..
+ GO TO 70
+ 150 CONTINUE
+ SD1 = SD1/GAM**2
+ SX1 = SX1*GAM
+ SH11 = SH11*GAM
+ SH12 = SH12*GAM
+ GO TO 140
+ 160 CONTINUE
+ 170 CONTINUE
+ IF (.NOT.ABS(SD2).LE.RGAMSQ) GO TO 190
+ IF (SD2.EQ.ZERO) GO TO 220
+ ASSIGN 180 TO IGO
+* FIX-H..
+ GO TO 70
+ 180 CONTINUE
+ SD2 = SD2*GAM**2
+ SH21 = SH21/GAM
+ SH22 = SH22/GAM
+ GO TO 170
+ 190 CONTINUE
+ 200 CONTINUE
+ IF (.NOT.ABS(SD2).GE.GAMSQ) GO TO 220
+ ASSIGN 210 TO IGO
+* FIX-H..
+ GO TO 70
+ 210 CONTINUE
+ SD2 = SD2/GAM**2
+ SH21 = SH21*GAM
+ SH22 = SH22*GAM
+ GO TO 200
+ 220 CONTINUE
+ IF (SFLAG) 250,230,240
+ 230 CONTINUE
+ SPARAM(3) = SH21
+ SPARAM(4) = SH12
+ GO TO 260
+ 240 CONTINUE
+ SPARAM(2) = SH11
+ SPARAM(5) = SH22
+ GO TO 260
+ 250 CONTINUE
+ SPARAM(2) = SH11
+ SPARAM(3) = SH21
+ SPARAM(4) = SH12
+ SPARAM(5) = SH22
+ 260 CONTINUE
+ SPARAM(1) = SFLAG
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/sscal.f b/components/cism/glimmer-cism/libglimmer-solve/blas/sscal.f
new file mode 100644
index 0000000000..b900be9a36
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/sscal.f
@@ -0,0 +1,57 @@
+ SUBROUTINE SSCAL(N,SA,SX,INCX)
+* .. Scalar Arguments ..
+ REAL SA
+ INTEGER INCX,N
+* ..
+* .. Array Arguments ..
+ REAL SX(*)
+* ..
+*
+* Purpose
+* =======
+*
+* scales a vector by a constant.
+* uses unrolled loops for increment equal to 1.
+* jack dongarra, linpack, 3/11/78.
+* modified 3/93 to return if incx .le. 0.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ INTEGER I,M,MP1,NINCX
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC MOD
+* ..
+ IF (N.LE.0 .OR. INCX.LE.0) RETURN
+ IF (INCX.EQ.1) GO TO 20
+*
+* code for increment not equal to 1
+*
+ NINCX = N*INCX
+ DO 10 I = 1,NINCX,INCX
+ SX(I) = SA*SX(I)
+ 10 CONTINUE
+ RETURN
+*
+* code for increment equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,5)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ SX(I) = SA*SX(I)
+ 30 CONTINUE
+ IF (N.LT.5) RETURN
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,5
+ SX(I) = SA*SX(I)
+ SX(I+1) = SA*SX(I+1)
+ SX(I+2) = SA*SX(I+2)
+ SX(I+3) = SA*SX(I+3)
+ SX(I+4) = SA*SX(I+4)
+ 50 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/blas/sswap.f b/components/cism/glimmer-cism/libglimmer-solve/blas/sswap.f
new file mode 100644
index 0000000000..e23f380357
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/blas/sswap.f
@@ -0,0 +1,70 @@
+ SUBROUTINE SSWAP(N,SX,INCX,SY,INCY)
+* .. Scalar Arguments ..
+ INTEGER INCX,INCY,N
+* ..
+* .. Array Arguments ..
+ REAL SX(*),SY(*)
+* ..
+*
+* Purpose
+* =======
+*
+* interchanges two vectors.
+* uses unrolled loops for increments equal to 1.
+* jack dongarra, linpack, 3/11/78.
+* modified 12/3/93, array(1) declarations changed to array(*)
+*
+*
+* .. Local Scalars ..
+ REAL STEMP
+ INTEGER I,IX,IY,M,MP1
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC MOD
+* ..
+ IF (N.LE.0) RETURN
+ IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20
+*
+* code for unequal increments or equal increments not equal
+* to 1
+*
+ IX = 1
+ IY = 1
+ IF (INCX.LT.0) IX = (-N+1)*INCX + 1
+ IF (INCY.LT.0) IY = (-N+1)*INCY + 1
+ DO 10 I = 1,N
+ STEMP = SX(IX)
+ SX(IX) = SY(IY)
+ SY(IY) = STEMP
+ IX = IX + INCX
+ IY = IY + INCY
+ 10 CONTINUE
+ RETURN
+*
+* code for both increments equal to 1
+*
+*
+* clean-up loop
+*
+ 20 M = MOD(N,3)
+ IF (M.EQ.0) GO TO 40
+ DO 30 I = 1,M
+ STEMP = SX(I)
+ SX(I) = SY(I)
+ SY(I) = STEMP
+ 30 CONTINUE
+ IF (N.LT.3) RETURN
+ 40 MP1 = M + 1
+ DO 50 I = MP1,N,3
+ STEMP = SX(I)
+ SX(I) = SY(I)
+ SY(I) = STEMP
+ STEMP = SX(I+1)
+ SX(I+1) = SY(I+1)
+ SY(I+1) = STEMP
+ STEMP = SX(I+2)
+ SX(I+2) = SY(I+2)
+ SY(I+2) = STEMP
+ 50 CONTINUE
+ RETURN
+ END
diff --git a/components/cism/glimmer-cism/libglimmer-solve/glimmer_sparse.F90 b/components/cism/glimmer-cism/libglimmer-solve/glimmer_sparse.F90
new file mode 100644
index 0000000000..0cd58b000f
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/glimmer_sparse.F90
@@ -0,0 +1,470 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_sparse.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glimmer_sparse
+
+ ! This module used to be a wrapper for the umfpack and pardiso solvers.
+ ! These have been removed, and now it is just a wrapper for the slap solver.
+
+ use glimmer_global, only: dp
+ use glimmer_sparse_type
+ use glimmer_sparse_slap
+ use glide_types
+
+ implicit none
+
+ type sparse_solver_options
+ type(sparse_solver_options_base) :: base
+ type(slap_solver_options) :: slap
+ end type
+
+ type sparse_solver_workspace
+ type(slap_solver_workspace), pointer :: slap => null()
+ end type
+
+
+ ! These module level parameters are assigned from similar parameters defined in glide_types.F90
+ integer, parameter :: SPARSE_HO_NONLIN_PICARD = HO_NONLIN_PICARD
+ integer, parameter :: SPARSE_HO_NONLIN_JFNK = HO_NONLIN_JFNK
+
+ ! The first three options use the SLAP solver and work only on one processor.
+ integer, parameter :: SPARSE_SOLVER_PCG_INCH = HO_SPARSE_PCG_INCH ! SLAP PCG with incomplete Cholesky preconditioner
+ integer, parameter :: SPARSE_SOLVER_BICG = HO_SPARSE_BICG ! SLAP biconjugate gradient
+ integer, parameter :: SPARSE_SOLVER_GMRES = HO_SPARSE_GMRES ! SLAP GMRES
+ integer, parameter :: STANDALONE_PCG_STANDARD = HO_SPARSE_PCG_STANDARD ! Native PCG, parallel-enabled, standard solver
+ integer, parameter :: STANDALONE_PCG_CHRONGEAR = HO_SPARSE_PCG_CHRONGEAR ! Native PCG, parallel-enabled, Chronopoulos-Gear solver
+ integer, parameter :: STANDALONE_TRILINOS_SOLVER = HO_SPARSE_TRILINOS ! Trilinos solver
+
+
+
+contains
+
+
+
+ subroutine sparse_solver_default_options(method, opt, nonlinear)
+
+ use parallel
+ integer, intent(in) :: method ! sparse solver: BiCG, GMRES, PCG, etc.
+ integer, optional, intent(in) :: nonlinear ! Picard vs. JFNK flag
+ type(sparse_solver_options) :: opt !TODO - intent inout or out?
+
+ opt%base%method = method
+ opt%base%tolerance = 1.0d-08 !WHL - used to be 1e-11
+ opt%base%maxiters = 200
+
+ if ( present(nonlinear) )then
+ if (nonlinear .eq. SPARSE_HO_NONLIN_PICARD) opt%base%tolerance = 1.0d-08 ! Picard
+ if (nonlinear .eq. SPARSE_HO_NONLIN_JFNK) opt%base%tolerance = 1.0d-03 ! JFNK
+ else ! Picard
+ opt%base%tolerance = 1.0d-08
+ end if
+
+ !TODO - Remove calls to not_parallel?
+ ! These seem unnecessary when running SLAP solver. Commented out for now.
+
+ !TODO - Remove calls to slap_default_options; set appropriate options here instead.
+
+ !Solver specific options
+
+ if (method == SPARSE_SOLVER_BICG) then
+! call not_parallel(__FILE__,__LINE__)
+ call slap_default_options(opt%slap, opt%base)
+ opt%base%method = SPARSE_SOLVER_BICG
+! opt%slap%itol = 2 ! current default = 2 in slap_default_options
+
+ else if (method == SPARSE_SOLVER_GMRES) then
+! call not_parallel(__FILE__,__LINE__)
+ call slap_default_options(opt%slap, opt%base)
+ opt%base%method = SPARSE_SOLVER_GMRES
+! opt%slap%itol = 2 ! current default = 2 in slap_default_options
+
+ else if (method == SPARSE_SOLVER_PCG_INCH) then
+! call not_parallel(__FILE__, __LINE__)
+ call slap_default_options(opt%slap, opt%base)
+ opt%base%method = SPARSE_SOLVER_PCG_INCH
+ opt%slap%itol = 1
+ !WHL - itol = 2 does not work for simple test problems
+
+ else
+ !call glide_finalise_all(.true.)
+ call write_log("Invalid sparse matrix option", GM_FATAL)
+
+ end if
+
+ end subroutine sparse_solver_default_options
+
+ subroutine sparse_allocate_workspace(matrix, options, workspace, max_nonzeros_arg)
+
+ use parallel
+ !> Allocate solver workspace. This needs to be done once
+ !> (when the maximum number of nonzero entries is first known)
+ !> This function need not be safe to call on already allocated memory
+ !>
+ !> Note that the max_nonzeros argument must be optional, and if
+ !> it is not supplied the current number of nonzeroes must be used.
+ type(sparse_matrix_type), intent(in) :: matrix
+ type(sparse_solver_options) :: options
+ type(sparse_solver_workspace) :: workspace
+ integer, optional :: max_nonzeros_arg
+ integer :: max_nonzeros
+
+ if (present(max_nonzeros_arg)) then
+ max_nonzeros = max_nonzeros_arg
+ else
+ max_nonzeros = matrix%nonzeros
+ end if
+
+ !TODO - Anything needed for standalone_pcg_solver?
+
+ if (options%base%method == SPARSE_SOLVER_BICG .or. &
+ options%base%method == SPARSE_SOLVER_GMRES .or. &
+ options%base%method == SPARSE_SOLVER_PCG_INCH) then
+! call not_parallel(__FILE__,__LINE__)
+ allocate(workspace%slap)
+ call slap_allocate_workspace(matrix, options%slap, workspace%slap, max_nonzeros)
+ end if
+
+ end subroutine sparse_allocate_workspace
+
+ subroutine sparse_solver_preprocess(matrix, options, workspace)
+ !> Performs any preprocessing needed to be performed on the slap
+ !> matrix. Workspace must have already been allocated.
+ !> This function should be safe to call more than once.
+ !>
+ !> It is an error to call this function on a workspace without
+ !> allocated memory
+ !>
+ !> In general slap_allocate_workspace should perform any actions
+ !> that depend on the *size* of the slap matrix, and
+ !> sprase_solver_preprocess should perform any actions that depend
+ !> upon the *contents* of the slap matrix.
+ type(sparse_matrix_type), intent(in) :: matrix
+ type(sparse_solver_options) :: options
+ type(sparse_solver_workspace) :: workspace
+
+ if (options%base%method == SPARSE_SOLVER_BICG .or. &
+ options%base%method == SPARSE_SOLVER_GMRES .or. &
+ options%base%method == SPARSE_SOLVER_PCG_INCH) then
+
+ call slap_solver_preprocess(matrix, options%slap, workspace%slap)
+
+ end if
+
+ end subroutine sparse_solver_preprocess
+
+ function sparse_solve(matrix, rhs, solution, &
+ options, workspace, &
+ err, niters, verbose)
+
+ !> Solves the linear system, and reports status information.
+ !> This function returns an error code that should be zero if the
+ !> call succeeded and nonzero if it failed. No additional error codes
+ !> are defined. Although this function reports back the final error
+ !> and the number of iterations needed to converge, these should *not*
+ !> be relied upon as not every slap linear solver may report them.
+
+ ! Note: The matrix needs to be intent(in), not (inout).
+ ! If the matrix is modified, then the residual will be computed incorrectly
+ ! in the higher-level subroutine that calls sparse_solve.
+
+ type(sparse_matrix_type), intent(in) :: matrix
+ !> Sparse matrix to solve
+
+ real(kind=dp), dimension(:), intent(in) :: rhs
+ !> Right hand side of the solution vector
+
+ real(kind=dp), dimension(:), intent(inout) :: solution
+ !> Solution vector, containing an initial guess
+
+ type(sparse_solver_options), intent(in) :: options
+ !> Options such as convergence criteria
+
+ type(sparse_solver_workspace), intent(inout) :: workspace
+ !> Internal solver workspace
+
+ real(kind=dp), intent(out) :: err
+ !> Final solution error
+
+ integer, intent(out) :: niters
+ !> Number of iterations required to reach the solution
+
+ logical, optional, intent(in) :: verbose
+ !> If present and true, this argument may cause diagnostic information
+ !> to be printed by the solver (not every solver may implement this).
+
+ integer :: sparse_solve
+
+ logical :: verbose_var
+
+ verbose_var = .false.
+ if (present(verbose)) then
+ verbose_var = verbose
+ end if
+
+ if (options%base%method == SPARSE_SOLVER_BICG .or. &
+ options%base%method == SPARSE_SOLVER_GMRES .or. &
+ options%base%method == SPARSE_SOLVER_PCG_INCH) then
+
+ sparse_solve = slap_solve(matrix, rhs, solution, &
+ options%slap, workspace%slap, &
+ err, niters, verbose_var)
+
+ end if
+
+ end function sparse_solve
+
+
+ subroutine sparse_solver_postprocess(matrix, options, workspace)
+ type(sparse_matrix_type), intent(in) :: matrix
+ type(sparse_solver_options) :: options
+ type(sparse_solver_workspace) :: workspace
+
+ if (options%base%method == SPARSE_SOLVER_BICG .or. &
+ options%base%method == SPARSE_SOLVER_GMRES .or. &
+ options%base%method == SPARSE_SOLVER_PCG_INCH) then
+
+ call slap_solver_postprocess(matrix, options%slap, workspace%slap)
+
+ end if
+
+ end subroutine sparse_solver_postprocess
+
+ subroutine sparse_destroy_workspace(matrix, options, workspace)
+
+ !> Deallocates all working memory for the slap linear solver.
+ !> This need *not* be safe to call of an unallocated workspace
+ !> No slap solver should call this automatically.
+
+ type(sparse_matrix_type), intent(in) :: matrix
+ type(sparse_solver_options) :: options
+ type(sparse_solver_workspace) :: workspace
+
+ if (options%base%method == SPARSE_SOLVER_BICG .or. &
+ options%base%method == SPARSE_SOLVER_GMRES .or. &
+ options%base%method == SPARSE_SOLVER_PCG_INCH) then
+
+ call slap_destroy_workspace(matrix, options%slap, workspace%slap)
+ deallocate(workspace%slap)
+
+
+ end if
+
+ end subroutine sparse_destroy_workspace
+
+ subroutine sparse_interpret_error(options, error_code, error_string)
+
+ !> takes an error code output from slap_solve and interprets it.
+ !> error_string must be an optional argument.
+ !> If it is not provided, the error is printed to standard out
+ !> instead of being put in the string
+
+ type(sparse_solver_options) :: options
+ integer :: error_code
+ character(*), optional, intent(out) :: error_string
+ character(256) :: tmp_error_string
+
+ if (options%base%method == SPARSE_SOLVER_BICG .or. &
+ options%base%method == SPARSE_SOLVER_GMRES .or. &
+ options%base%method == SPARSE_SOLVER_PCG_INCH) then
+
+ call slap_interpret_error(error_code, tmp_error_string)
+
+ endif
+
+ if (present(error_string)) then
+ error_string = tmp_error_string
+ else
+ write(*,*) tmp_error_string
+ endif
+
+ end subroutine sparse_interpret_error
+
+ subroutine sparse_easy_solve(matrix, rhs, answer, err, iter, method_arg, &
+ calling_file, calling_line, nonlinear_solver )
+
+ !This subroutine wraps the basic (though probably the most inefficient)
+ !workflow to solve a sparse matrix using the sparse matrix solver
+ !framework. It handles errors gracefully, and reports back the
+ !iterations required and the error estimate in the case of an iterative
+ !solver. At the very least it is an encapsulated example of how to
+ !use the sparse solver routines, and is easy enough to drop in your
+ !code if you don't care about allocating and deallocating workspace
+ !every single timestep.
+
+ type(sparse_matrix_type), intent(in) :: matrix
+ real(dp), dimension(:), intent(in) :: rhs
+ real(dp), dimension(:), intent(inout) :: answer
+
+ real(dp), intent(out) :: err
+ integer, intent(out) :: iter
+
+ integer, optional, intent(in) :: method_arg ! solver method: BiCG, GMRES, PCG, etc.
+ integer, optional, intent(in) :: nonlinear_solver ! Picard or JFNK
+
+ character(*), optional :: calling_file
+ integer, optional :: calling_line
+
+ type(sparse_solver_options) :: opt
+ type(sparse_solver_workspace) :: wk
+
+ integer :: ierr
+ integer :: method
+ integer :: nonlinear
+
+ if (present(method_arg)) then
+ method = method_arg
+ else
+ method = SPARSE_SOLVER_BICG
+ endif
+
+ if (present(nonlinear_solver)) then
+ nonlinear = nonlinear_solver
+ else
+ nonlinear = SPARSE_HO_NONLIN_PICARD
+ endif
+
+ if (verbose_slap) then
+ print*, ' '
+ print*, 'In sparse_easy_solve'
+ print*, 'method (0=BiCG, 1=GMRES, 2=PCG_INCH) =', method
+ print*, 'nonlinear (0=Picard, 1=JFNK) =', nonlinear
+ print*, 'matrix%order =', matrix%order
+ print*, 'matrix%nonzeros =', matrix%nonzeros
+ print*, 'size(rhs) =', size(rhs)
+ print*, 'size(answer) =', size(answer)
+ print*, 'size(row) =', size(matrix%row)
+ print*, 'size(col) =', size(matrix%col)
+ print*, 'size(val) =', size(matrix%val)
+ endif
+
+ call sparse_solver_default_options(method, opt, nonlinear)
+
+ call sparse_allocate_workspace(matrix, opt, wk)
+
+ call sparse_solver_preprocess(matrix, opt, wk)
+
+ ierr = sparse_solve(matrix, rhs, answer, opt, wk, err, iter, .false.)
+
+ if (verbose_slap) then
+ print*, ' '
+ print*, 'Called sparse_solve: iter, err =', iter, err
+ endif
+
+ call sparse_solver_postprocess(matrix, opt, wk)
+
+ if (ierr /= 0) then
+ if (present(calling_file) .and. present(calling_line)) then
+ call handle_sparse_error(matrix, opt, ierr, calling_file, calling_line)
+ else
+ call handle_sparse_error(matrix, opt, ierr, __FILE__, __LINE__)
+ end if
+ end if
+
+ call sparse_destroy_workspace(matrix, opt, wk)
+
+ end subroutine sparse_easy_solve
+
+ subroutine handle_sparse_error(matrix, solver_options, error, error_file, error_line, time)
+
+ !Checks a sparse error flag and, if an error occurred, log it to
+ !the GLIMMER log file. This does not stop Glimmer, it just writes
+ !to the log
+ !use glide_stop
+ use glimmer_log
+ use glimmer_filenames
+
+ integer :: error
+ integer, optional :: error_line
+ character(*), optional :: error_file
+ real(dp), optional :: time
+
+ type(sparse_matrix_type), intent(in) :: matrix
+ type(sparse_solver_options) :: solver_options
+ integer :: isym
+ integer :: lunit
+ integer :: i
+
+ character(512) :: message
+ character(128) :: errfname
+ character(256) :: errdesc
+
+ !If no error happened, this routine should be a nop
+ if (error == 0 .OR. error == 2 .OR. error == 6) return
+
+ !Aquire a file unit, and open the file
+ lunit = get_free_unit()
+ errfname = trim(process_path('sparse_dump.txt'))
+ open(lunit,file=errfname)
+
+ if (matrix%symmetric) then
+ isym = 1
+ else
+ isym = 0
+ end if
+
+ !Output sparse matrix data to the file
+
+ call dcpplt(matrix%order, matrix%nonzeros, matrix%row, matrix%col, matrix%val,&
+ isym, lunit)
+
+ write(lunit,*) '***Sparse matrix structure ends. Value listing begins'
+ do i=1,matrix%nonzeros
+ write(lunit,*) matrix%val(i)
+ end do
+
+ !Close unit and finish off
+ close(lunit)
+
+ !Grab the error message from the sparse solver
+ call sparse_interpret_error(solver_options, error, errdesc)
+
+ !construct the error message and write it to the log file
+ if (present(time)) then
+ write(message, *)'Sparse matrix error at time: ', time, &
+ 'Error description: ', errdesc, &
+ 'Data dumped to ', trim(errfname)
+ else
+ write(message, *)'Sparse matrix error. Error description: ', errdesc, &
+ 'Data dumped to ', trim(errfname)
+ end if
+
+ write(*,*)message
+
+ !call glide_finalise_all(.true.)
+
+ if (present(error_file) .and. present(error_line)) then
+ call write_log(trim(errdesc), GM_FATAL, error_file, error_line)
+ else
+ call write_log(trim(errdesc), GM_FATAL, __FILE__, __LINE__)
+ end if
+
+ end subroutine handle_sparse_error
+
+end module glimmer_sparse
diff --git a/components/cism/glimmer-cism/libglimmer-solve/glimmer_sparse_slap.F90 b/components/cism/glimmer-cism/libglimmer-solve/glimmer_sparse_slap.F90
new file mode 100644
index 0000000000..230ee7148b
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/glimmer_sparse_slap.F90
@@ -0,0 +1,462 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_sparse_slap.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+module glimmer_sparse_slap
+ !> This module builds on the glimmer_slap module to provide an easy
+ !> interface to SLAP. The SLAP interface is intended to be both
+ !> usable and a guide to implementing other interfaces
+
+ use glimmer_sparse_type
+ use glimmer_global, only: dp, size_t
+ use glimmer_log
+ implicit none
+
+ type slap_solver_workspace
+ !> This type contains any working memory needed for the slap solver.
+ !> It is used to store states between calls to the solver
+ !> In the SLAP implementation, it is used to store the SLAP workspace
+ !> This module must have this type, but its contents should be opaque
+ !> to the user (e.g. client code should only manipulate the
+ !> slap_solver_workspace as a whole and should never touch its members)
+ real(kind=dp), dimension(:), pointer :: rwork => NULL()
+ integer, dimension(:), pointer :: iwork => NULL()
+ integer :: max_nelt !> Maximum number of nonzeroes allowed given the allocated workspace
+ end type slap_solver_workspace
+
+ type slap_solver_options
+ !> This type holds options that are passed to the slap solver, such
+ !> as preconditioner type, error tolerances, etc. At a minimum, it
+ !> must define the tolerance and maxiters field, as these will be
+ !> common to any iterative slap linear solver. Other options
+ !> can be defined as necessary.
+ !>
+ !> Design note: the options are separated from the workspace because
+ !> one set of options could apply to multiple matrices, and the
+ !> lifecycles for each could be different (a workspace need only
+ !> exist as long as the matrix does, the options could persist
+ !> throughout the entire program)
+
+ integer :: itol !> Tolerance code, see SLAP documentation
+ integer :: gmres_saved_vectors !> How many vectors to save while performing GMRES iteration
+ type(sparse_solver_options_base), pointer :: base => null() !> Pointer to basic options
+
+ end type slap_solver_options
+
+ logical, parameter :: verbose_slap = .false.
+
+contains
+
+!TODO - It may be better to set the desired defaults for each method individually (GMRES, BiCG, PCG, etc.)
+
+ subroutine slap_default_options(opt, base)
+
+ !> Populates a slap_solver_options (defined above) with default
+ !> options. This is necessary because different solvers may define
+ !> different options beyond the required fields defined above.
+ !> Filling them in this function allows client code to pick "good"
+ !> values in a generic way.
+
+ type(slap_solver_options), intent(out) :: opt
+ type(sparse_solver_options_base), intent(in), target :: base
+
+ !TODO - This value of itol may not be optimal for all solver options.
+ ! The PCG solver fails for simple test matrices with itol=2, but does fine with itol=1.
+ opt%itol = 2
+ opt%gmres_saved_vectors = 20
+ opt%base => base
+
+ end subroutine slap_default_options
+
+ subroutine slap_allocate_workspace(matrix, options, workspace, max_nonzeros_arg)
+ !> Allocate solver workspace. This needs to be done once
+ !> (when the maximum number of nonzero entries is first known)
+ !> This function need not be safe to call on already allocated memory
+ !>
+ !> Note that the max_nonzeros argument must be optional, and if
+ !> it is not supplied the current number of nonzeroes must be used.
+
+ type(sparse_matrix_type), intent(in) :: matrix
+ type(slap_solver_options) :: options
+ type(slap_solver_workspace) :: workspace
+ integer, optional :: max_nonzeros_arg
+ integer :: max_nonzeros
+ integer(kind=size_t) :: lenrw
+ integer(kind=size_t) :: leniw
+
+ if (present(max_nonzeros_arg)) then
+ max_nonzeros = max_nonzeros_arg
+ else
+ max_nonzeros = matrix%nonzeros
+ end if
+
+ !Only allocate the memory if it hasn't been allocated or it needs to grow
+
+ if (.not. associated(workspace%rwork) .or. workspace%max_nelt < max_nonzeros) then
+ !If memory is already allocated get rid of it
+ if (associated(workspace%rwork)) then
+ deallocate(workspace%rwork)
+ deallocate(workspace%iwork)
+ end if
+
+ !Figure out how much memory to allocate. These figures were derived
+ !from the SLAP documentation.
+ lenrw = 20*max_nonzeros
+ leniw = 20*max_nonzeros
+
+ if (lenrw < 0 .or. leniw < 0) then
+ call write_log("The amount of workspace memory that SLAP needs caused a numerical overflow. " // &
+ "If you are not running on a 64-bit architecture, you will need to decrease" // &
+ "the size of your data set. If you are running a 64-bit architecture, try" // &
+ "modifying size_t in glimmer_global to a larger size and recompiling Glimmer.", GM_FATAL)
+ end if
+
+ !write(*,*) "MAX NONZEROS",max_nonzeros
+ !write(*,*) "ALLOCATING WORKSPACE",lenrw,leniw
+
+ allocate(workspace%rwork(lenrw))
+ allocate(workspace%iwork(leniw))
+ !Recored the number of nonzeros so we know whether to allocate more
+ !memory in the future
+ workspace%max_nelt = max_nonzeros
+ end if
+ end subroutine slap_allocate_workspace
+
+
+ subroutine slap_solver_preprocess(matrix, options, workspace)
+
+ !> Performs any preprocessing needed for the slap solver.
+ !> Workspace must have already been allocated.
+ !> This function should be safe to call more than once.
+ !>
+ !> It is an error to call this function on a workspace without
+ !> allocated memory
+ !>
+ !> In general slap_allocate_workspace should perform any actions
+ !> that depend on the *size* of the slap matrix, and
+ !> sparse_solver_preprocess should perform any actions that depend
+ !> upon the *contents* of the slap matrix.
+
+ type(sparse_matrix_type), intent(in) :: matrix
+ type(slap_solver_options) :: options
+ type(slap_solver_workspace) :: workspace
+
+ ! Nothing to do here. Move along.
+
+ end subroutine slap_solver_preprocess
+
+ function slap_solve (matrix, rhs, solution, options, workspace,err,niters, verbose)
+
+ use glide_types ! only for HO_SPARSE parameter values
+
+ !> Solves the slap linear system, and reports status information.
+ !> This function returns an error code that should be zero if the
+ !> call succeeded and nonzero if it failed. No additional error codes
+ !> are defined. Although this function reports back the final error
+ !> and the number of iterations needed to converge, these should *not*
+ !> be relied upon as not every slap linear solver may report them.
+
+ !Note: The matrix should be intent(in) rather than (inout).
+ ! This requires making a local copy of some data.
+
+ type(sparse_matrix_type), intent(in) :: matrix
+ !> Sparse matrix to solve.
+
+ real(kind=dp), dimension(:), intent(in) :: rhs
+ !> Right hand side of the solution vector
+
+ real(kind=dp), dimension(:), intent(inout) :: solution
+ !> Solution vector, containing an initial guess.
+
+ type(slap_solver_options), intent(in) :: options
+ !> Options such as convergence criteria
+
+ type(slap_solver_workspace), intent(inout) :: workspace
+ !> Internal solver workspace
+
+ real(kind=dp), intent(out) :: err
+ !> Final solution error
+
+ integer, intent(out) :: niters
+ !> Number of iterations required to reach the solution
+
+ logical, optional, intent(in) :: verbose
+ !> If present and true, this argument may cause diagnostic information
+ !> to be printed by the solver (not every solver may implement this).
+
+ integer, dimension(matrix%nonzeros) :: &
+ matrix_row, &! local copy of matrix%row
+ matrix_col ! local copy of matrix%col
+
+ real(kind=dp), dimension(matrix%nonzeros) :: &
+ matrix_val ! local copy of matrix%val
+
+ integer :: slap_solve
+
+ integer :: ierr !SLAP-provided error code
+ integer :: iunit !Unit number to print verbose output to (6=stdout, 0=no output)
+ integer :: isym !Whether matrix is symmetric
+
+ logical :: allzeros
+ integer :: i
+
+ !WHL - debug (for checking matrix symmetry)
+ integer :: n, m, j
+ logical, parameter :: &
+ check_symmetry = .false. ! if true, check matrix symmetry (takes a long time for big matrices)
+ logical :: sym_partner
+ real(dp) :: avg_val
+
+ iunit = 0
+ if (present(verbose)) then
+ if(verbose) then
+ iunit=6
+ write(*,*) 'Tolerance=',options%base%tolerance
+ end if
+ end if
+
+ if (matrix%symmetric) then
+ isym = 1
+ else
+ isym = 0
+ end if
+
+ allzeros = .true.
+
+ !Check if the RHS is zero; if it is, don't iterate! The biconjugate
+ !gradient method doesn't work in this case
+ zero_check: do i = 1, size(rhs)
+ if (rhs(i) /= 0) then
+ allzeros = .false.
+ exit zero_check
+ end if
+ end do zero_check
+
+ !----------------------------------------------
+ ! RN_20091102: An example of calls to Trilinos solvers
+ !#ifdef HAVE_TRILINOS
+ !call helloworld()
+ !#endif
+ !----------------------------------------------
+
+ if (allzeros) then
+ err = 0
+ ierr = 0
+ niters = 0
+ solution = 0
+ call write_log("RHS of all zeros passed to BCG method; iteration not performed.", &
+ GM_WARNING, __FILE__, __LINE__)
+ else
+
+ !Set up SLAP if it hasn't been already
+ call slap_solver_preprocess(matrix, options, workspace)
+
+ if (verbose_slap) then
+ print*, ' '
+ print*, 'In slap_solve'
+ print*, 'method =', options%base%method
+ print*, 'order =', matrix%order
+ print*, 'nonzeros =', matrix%nonzeros
+ print*, 'isym =', isym
+ print*, 'itol =', options%itol
+ print*, 'tolerance =', options%base%tolerance
+ print*, 'maxiters =', options%base%maxiters
+ print*, 'size(row) = ', size(matrix%row)
+ print*, 'size(col) = ', size(matrix%col)
+ print*, 'size(val) = ', size(matrix%val)
+ print*, 'size(rwork) =', size(workspace%rwork)
+ print*, 'size(iwork) =', size(workspace%iwork)
+ endif
+
+ ! Make a local copy of the nonzero matrix entries.
+ ! These local arrays can be passed to the various SLAP solvers with intent(inout)
+ ! and modified by SLAP without changing matrix%row, matrix%col, and matrix%val.
+
+ do n = 1, matrix%nonzeros
+ matrix_row(n) = matrix%row(n)
+ matrix_col(n) = matrix%col(n)
+ matrix_val(n) = matrix%val(n)
+ enddo
+
+ !TODO - Remove this code when no longer needed for debugging
+ ! This can take a long time. It's more efficient to check symmetry at a higher level,
+ ! in the glissade velo solver.
+
+ if (check_symmetry) then
+ print*, 'Check symmetry...could take a while'
+ do n = 1, matrix%nonzeros
+ i = matrix_row(n)
+ j = matrix_col(n)
+ sym_partner = .false.
+ do m = 1, matrix%nonzeros
+ if (matrix_col(m)==i .and. matrix_row(m)==j) then
+ if (matrix_val(m) == matrix_val(n)) then
+ sym_partner = .true.
+ else ! fix if difference is small, else abort
+ if ( abs ((matrix_val(m)-matrix_val(n))/matrix_val(m)) < 1.e-10 ) then
+ avg_val = 0.5d0 * (matrix_val(m) + matrix_val(n))
+ matrix_val(m) = avg_val
+ matrix_val(n) = avg_val
+ sym_partner = .true.
+ else
+ print*, ' '
+ print*, 'Entry (i,j) not equal to (j,i)'
+ print*, 'i, j, val(i,j), val(j,i):', i, j, matrix%val(n), matrix%val(m)
+!! stop
+ endif
+ endif
+ go to 100
+ endif
+ enddo
+ if (.not. sym_partner) then
+ print*, ' '
+ print*, 'Entry (i,j) has no corresponding (j,i): n, i, j, val =', n, i, j, matrix%val(n)
+ endif
+100 continue
+ enddo
+
+ endif ! check_symmetry
+
+
+ select case(options%base%method)
+
+ ! Case values come from parameters defined in glide_types.F90.
+ ! (These parameter values are also used in glimmer_sparse.F90.)
+
+ case(HO_SPARSE_GMRES) ! GMRES
+
+ if (verbose_slap) then
+ print*, 'Call dslugm (GMRES)'
+ print*, 'maxiters, tolerance =', options%base%maxiters, options%base%tolerance
+ endif
+
+ call dslugm(matrix%order, rhs, solution, matrix%nonzeros, &
+ matrix_row, matrix_col, matrix_val, &
+ isym, options%gmres_saved_vectors, options%itol, &
+ options%base%tolerance, options%base%maxiters, &
+ niters, err, ierr, iunit, &
+ workspace%rwork, size(workspace%rwork), workspace%iwork, size(workspace%iwork))
+
+ if (verbose_slap) print*, 'GMRES: iters, err =', niters, err
+
+ case(HO_SPARSE_PCG_STANDARD) ! PCG with incomplete Cholesky preconditioner
+
+ if (verbose_slap) then
+ print*, 'Call dsiccg (PCG, incomplete Cholesky)'
+ endif
+
+ !TODO - Pass in just half the matrix?
+ ! If we pass in the entire matrix, then the preconditioner is fragile in the sense
+ ! that it can fail with very small departures from symmetry (due to roundoff errors)
+
+ call dsiccg(matrix%order, rhs, solution, matrix%nonzeros, &
+ matrix_row, matrix_col, matrix_val, &
+ isym, options%itol, options%base%tolerance, options%base%maxiters,&
+ niters, err, ierr, iunit, &
+ workspace%rwork, size(workspace%rwork), workspace%iwork, size(workspace%iwork))
+
+ if (verbose_slap) print*, 'PCG_inch: iters, err =', niters, err
+
+ case (HO_SPARSE_BICG) ! Biconjugate gradient
+
+ if (verbose_slap) then
+ print*, 'Call dslucs (biconjugate gradient)'
+ print*, 'maxiters, tolerance =', options%base%maxiters, options%base%tolerance
+ endif
+
+ call dslucs(matrix%order, rhs, solution, matrix%nonzeros, &
+ matrix_row, matrix_col, matrix_val, &
+ isym, options%itol, options%base%tolerance, options%base%maxiters,&
+ niters, err, ierr, iunit, &
+ workspace%rwork, size(workspace%rwork), workspace%iwork, size(workspace%iwork))
+
+ if (verbose_slap) print*, 'BiCG: iters, err =', niters, err
+
+ case default
+ call write_log('Unknown method passed to SLAP solver', GM_FATAL)
+
+ end select ! slap solver
+
+ endif ! allzeros
+
+ slap_solve = ierr
+
+ end function slap_solve
+
+ subroutine slap_solver_postprocess(matrix, options, workspace)
+ type(sparse_matrix_type), intent(in) :: matrix
+ type(slap_solver_options) :: options
+ type(slap_solver_workspace) :: workspace
+ end subroutine
+
+ subroutine slap_destroy_workspace(matrix, options, workspace)
+ !> Deallocates all working memory for the slap linear solver.
+ !> This need *not* be safe to call of an unallocated workspace
+ !> No slap solver should call this automatically.
+ type(sparse_matrix_type), intent(in) :: matrix
+ type(slap_solver_options) :: options
+ type(slap_solver_workspace) :: workspace
+ !Deallocate all of the working memory
+ deallocate(workspace%rwork)
+ deallocate(workspace%iwork)
+ end subroutine slap_destroy_workspace
+
+ subroutine slap_interpret_error(error_code, error_string)
+ !> takes an error code output from slap_solve and interprets it.
+ !> error_string must be an optional argument.
+ !> If it is not provided, the error is printed to standard out
+ !> instead of being put in the string
+ integer :: error_code
+ character(*), optional, intent(out) :: error_string
+ character(256) :: tmp_error_string
+
+ select case (error_code)
+ case (0)
+ tmp_error_string="All went well"
+ case (1)
+ tmp_error_string="Insufficient space allocated for WORK or IWORK"
+ case (2)
+ tmp_error_string="Method failed to converge in ITMAX steps"
+ case (3)
+ tmp_error_string="Error in user input. Check input values of N, ITOL."
+ case (4)
+ tmp_error_string="User error tolerance set too tight."
+ case (5)
+ tmp_error_string="Breakdown of the method detected. (r0,r) approximately 0."
+ case (6)
+ tmp_error_string="Stagnation of the method detected. (r0, v) approximately 0."
+ case (7)
+ tmp_error_string="Incomplete factorization broke down and was fudged."
+ end select
+
+
+ if (present(error_string)) then
+ error_string = tmp_error_string
+ else
+ write(*,*) tmp_error_string
+ endif
+ end subroutine slap_interpret_error
+
+end module glimmer_sparse_slap
diff --git a/components/cism/glimmer-cism/libglimmer-solve/glimmer_sparse_type.F90 b/components/cism/glimmer-cism/libglimmer-solve/glimmer_sparse_type.F90
new file mode 100644
index 0000000000..0a7e790320
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-solve/glimmer_sparse_type.F90
@@ -0,0 +1,483 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_sparse_type.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glimmer_sparse_type
+
+ use glimmer_global, only:dp
+ implicit none
+
+ ! sparse matrix type
+ type sparse_matrix_type
+ integer :: nonzeros ! number of nonzero elements currently stored
+ integer :: order ! order of the matrix (e.g. number of rows)
+ logical :: symmetric ! True only if triangle of the symmetric matrix stored
+ integer, dimension(:), pointer :: col => NULL() ! column index
+ integer, dimension(:), pointer :: row => NULL() ! row index
+ real(kind=dp), dimension(:), pointer :: val => NULL() ! values
+ end type sparse_matrix_type
+
+ type sparse_solver_options_base
+ real(kind=dp) :: tolerance ! Error tolerance
+ integer :: maxiters ! Max iterations before giving up
+ integer :: method
+ end type
+
+ ! size of sparse matrix
+ integer, parameter, private :: chunksize=1000
+
+ !MAKE_RESTART
+!EIB!#ifdef RESTARTS
+!EIB!#define RST_GLIMMER_SPARSE
+!EIB!#include "glimmer_rst_head.inc"
+!EIB!#undef RST_GLIMMER_SPARSE
+!EIB!#endif
+
+contains
+
+!EIB!#ifdef RESTARTS
+!EIB!#define RST_GLIMMER_SPARSE
+!EIB!#include "glimmer_rst_body.inc"
+!EIB!#undef RST_GLIMMER_SPARSE
+!EIB!#endif
+
+ subroutine new_sparse_matrix(order,n,mat)
+ ! create a new sparse matrix
+ implicit none
+ integer, intent(in) :: n ! initial size of matrix (non-zeros)
+ type(sparse_matrix_type) :: mat ! matrix
+ integer, intent(in) :: order ! Order (number of rows and columns) of the matrix
+
+ if (.not.associated(mat%col)) then
+ allocate(mat%row(n))
+ !SLAP's sparse column scheme looks past the assumed bounds of col to see
+ !what sparse storage format we're in. To avoid array bounds problems, we
+ !add 2 to the column size. See mailing list discussion at:
+ !http://forge.nesc.ac.uk/pipermail/glimmer-discuss/2005-February/000078.html
+ allocate(mat%col(n+2))
+ allocate(mat%val(n))
+ else
+ if (size(mat%row).lt.n) then
+ call del_sparse_matrix(mat)
+ allocate(mat%row(n))
+ allocate(mat%col(n+2))
+ allocate(mat%val(n))
+ end if
+ end if
+ mat%nonzeros = 0
+ mat%order = order
+ mat%symmetric = .false.
+ end subroutine new_sparse_matrix
+
+ subroutine copy_sparse_matrix(inmat,outmat)
+ ! copy a sparse matrix.
+ ! Slap workspace allocation on the new
+ ! matrix is *not* done.
+ implicit none
+ type(sparse_matrix_type) :: inmat ! matrix to be copied
+ type(sparse_matrix_type) :: outmat ! result matrix
+
+ call new_sparse_matrix(inmat%order,inmat%nonzeros,outmat)
+ outmat%row(:) = inmat%row(:)
+ outmat%col(:) = inmat%col(:)
+ outmat%val(:) = inmat%val(:)
+ outmat%nonzeros = inmat%nonzeros
+ outmat%symmetric = inmat%symmetric
+ end subroutine copy_sparse_matrix
+
+ subroutine grow_sparse_matrix(matrix)
+ ! grow sparse matrix
+ implicit none
+ type(sparse_matrix_type) :: matrix ! matrix
+
+ integer, dimension(:), pointer :: newrow,newcol
+ real(kind=dp), dimension(:), pointer :: newval
+ integer oldsize
+
+ oldsize = size(matrix%val)
+
+ allocate(newrow(chunksize+oldsize))
+ allocate(newcol(chunksize+oldsize))
+ allocate(newval(chunksize+oldsize))
+ write(*,*)size(matrix%col), size(matrix%row), size(matrix%val), size(newcol), size(newrow), size(newval)
+ newcol(1:oldsize) = matrix%col(:)
+ newrow(1:oldsize) = matrix%row(:)
+ newval(1:oldsize) = matrix%val(:)
+
+ deallocate(matrix%col)
+ deallocate(matrix%row)
+ deallocate(matrix%val)
+
+ matrix%col => newcol
+ matrix%row => newrow
+ matrix%val => newval
+
+ end subroutine grow_sparse_matrix
+
+ subroutine del_sparse_matrix(matrix)
+ ! delete sparse matrix
+ implicit none
+ type(sparse_matrix_type) :: matrix ! matrix
+
+ if (associated(matrix%col)) then
+ deallocate(matrix%col)
+ deallocate(matrix%row)
+ deallocate(matrix%val)
+ end if
+
+ end subroutine del_sparse_matrix
+
+ subroutine print_sparse(matrix, unit)
+ ! print sparse matrix
+ implicit none
+ type(sparse_matrix_type) :: matrix ! matrix
+ integer, intent(in) :: unit ! unit to be printed to
+
+ integer i
+ do i = 1, matrix%nonzeros
+ write(unit,*) matrix%col(i), matrix%row(i), matrix%val(i)
+ end do
+ end subroutine print_sparse
+
+ subroutine sparse_matrix_vec_prod(matrix, vec, res)
+ ! sparse matrix vector product
+ implicit none
+ type(sparse_matrix_type) :: matrix ! matrix
+ real(kind=dp), intent(in), dimension(:) :: vec ! input vector
+ real(kind=dp), intent(out), dimension(:) :: res ! result vector
+
+ integer i
+
+ res = 0.
+ do i=1,matrix%nonzeros
+ res(matrix%col(i)) = res(matrix%col(i)) + vec(matrix%row(i))*matrix%val(i)
+ end do
+ end subroutine sparse_matrix_vec_prod
+
+ subroutine sparse_insert_val(matrix, i, j, val)
+ ! insert value into sparse matrix. This is safe to call even if val=0
+ implicit none
+ type(sparse_matrix_type) :: matrix ! matrix
+ integer, intent(in) :: i,j ! column and row
+ real(kind=dp), intent(in) :: val ! value
+ if (val /= 0.d0 .and. i > 0 .and. j > 0 .and. i <= matrix%order .and. j <= matrix%order) then
+ matrix%nonzeros = matrix%nonzeros + 1
+ matrix%row(matrix%nonzeros) = i
+ matrix%col(matrix%nonzeros) = j
+ matrix%val(matrix%nonzeros) = val
+
+ if (matrix%nonzeros .eq. size(matrix%val)) then
+ call grow_sparse_matrix(matrix)
+ end if
+ end if
+ end subroutine sparse_insert_val
+
+ subroutine sparse_clear(matrix)
+ ! Clears the sparse matrix, without deallocating any of the
+ ! previously used memory
+ type(sparse_matrix_type) :: matrix
+
+ matrix%nonzeros = 0
+ !Clearing these shouldn't be strictly necessary, but SLAP barfs if we don't
+ matrix%row = 0
+ matrix%col = 0
+ matrix%val = 0
+ end subroutine
+
+ function is_triad_format(matrix)
+ type(sparse_matrix_type) :: matrix
+ logical :: is_triad_format
+
+ is_triad_format = .not. is_column_format(matrix) .and. .not. is_row_format(matrix)
+ end function
+
+ function is_row_format(matrix)
+ type(sparse_matrix_type) :: matrix
+ logical :: is_row_format
+
+ is_row_format = matrix%row(matrix%order + 1) == matrix%nonzeros + 1
+ end function
+!-----------------------------------------------------------------------
+ subroutine coicsr (n,nnz,job,a,ja,ia,iwk)
+ use glimmer_global, only : dp
+ implicit none
+ integer,intent(in) :: n,nnz,job
+ real(dp),dimension(:),intent(inout) :: a
+ integer, dimension(:),intent(inout) :: ja,ia
+ integer, dimension(:),intent(inout) :: iwk
+
+ !Local
+ real(kind=dp) :: t,tnext
+ logical :: values
+ integer :: i,j,k,init,ipos,inext,jnext
+
+!------------------------------------------------------------------------
+! IN-PLACE coo-csr conversion routine.
+!------------------------------------------------------------------------
+! this subroutine converts a matrix stored in coordinate format into
+! the csr format. The conversion is done in place in that the arrays
+! a,ja,ia of the result are overwritten onto the original arrays.
+!------------------------------------------------------------------------
+! on entry:
+!---------
+! n = integer. row dimension of A.
+! nnz = integer. number of nonzero elements in A.
+! job = integer. Job indicator. when job=1, the real values in a are
+! filled. Otherwise a is not touched and the structure of the
+! array only (i.e. ja, ia) is obtained.
+! a = real array of size nnz (number of nonzero elements in A)
+! containing the nonzero elements
+! ja = integer array of length nnz containing the column positions
+! of the corresponding elements in a.
+! ia = integer array of length nnz containing the row positions
+! of the corresponding elements in a.
+! iwk = integer work array of length n+1
+! on return:
+!----------
+! a
+! ja
+! ia = contains the compressed sparse row data structure for the
+! resulting matrix.
+! Note:
+!-------
+! the entries of the output matrix are not sorted (the column
+! indices in each are not in increasing order) use coocsr
+! if you want them sorted.
+!----------------------------------------------------------------------c
+! Coded by Y. Saad, Sep. 26 1989 c
+! Released under the LGPL
+!
+! Converted to F90 by JVJ -- 11/3/09
+!----------------------------------------------------------------------c
+!-----------------------------------------------------------------------
+ values = (job .eq. 1)
+! find pointer array for resulting matrix.
+ do i=1,n+1
+ iwk(i) = 0
+ end do
+ do k=1,nnz
+ i = ia(k)
+ iwk(i+1) = iwk(i+1)+1
+ end do
+!------------------------------------------------------------------------
+ iwk(1) = 1
+ do i=2,n
+ iwk(i) = iwk(i-1) + iwk(i)
+ end do
+!
+! loop for a cycle in chasing process.
+!
+ init = 1
+ k = 0
+ 5 if (values) t = a(init)
+ i = ia(init)
+ j = ja(init)
+ ia(init) = -1
+!------------------------------------------------------------------------
+ 6 k = k+1
+! current row number is i. determine where to go.
+ ipos = iwk(i)
+! save the chased element.
+ if (values) tnext = a(ipos)
+ inext = ia(ipos)
+ jnext = ja(ipos)
+! then occupy its location.
+ if (values) a(ipos) = t
+ ja(ipos) = j
+! update pointer information for next element to come in row i.
+ iwk(i) = ipos+1
+! determine next element to be chased,
+ if (ia(ipos) .lt. 0) goto 65
+ t = tnext
+ i = inext
+ j = jnext
+ ia(ipos) = -1
+ if (k .lt. nnz) goto 6
+ goto 70
+ 65 init = init+1
+ if (init .gt. nnz) goto 70
+ if (ia(init) .lt. 0) goto 65
+! restart chasing --
+ goto 5
+ 70 do i=1,n
+ ia(i+1) = iwk(i)
+ end do
+ ia(1) = 1
+ return
+ end subroutine
+!----------------- end of coicsr ----------------------------------------
+
+
+!-----------------------------------------------------------------------
+ subroutine coocsr(nrow,nnz,a,ir,jc,ao,jao,iao)
+ use glimmer_global, only : dp
+ implicit none
+ integer, intent(in) :: nrow,nnz
+ real(dp),dimension(:),intent(in) :: a
+ integer,dimension(:),intent(in) :: ir
+ integer,dimension(:),intent(in) :: jc
+ real(dp),dimension(:),intent(out) :: ao
+ integer, dimension(:),intent(out) :: jao
+ integer, dimension(:),intent(out) :: iao
+
+ ! Local
+ real(dp) :: x
+ integer :: i,k,j,k0,iad
+!----------------------j-------------------------------------------------
+! Coordinate to Compressed Sparse Row
+! Written by Yousef Saad as part of SparseKit2
+! Released under the LGPL
+!
+! Converted to F90 by JVJ -- 10/21/09
+!-----------------------------------------------------------------------
+! converts a matrix that is stored in coordinate format
+! a, ir, jc into a row general sparse ao, jao, iao format.
+!
+! on entry:
+!---------
+! nrow = dimension of the matrix
+! nnz = number of nonzero elements in matrix
+! a,
+! ir,
+! jc = matrix in coordinate format. a(k), ir(k), jc(k) store the nnz
+! nonzero elements of the matrix with a(k) = actual real value of
+! the elements, ir(k) = its row number and jc(k) = its column
+! number. The order of the elements is arbitrary.
+!
+! on return:
+!-----------
+! ir is destroyed
+!
+! ao, jao, iao = matrix in general sparse matrix format with ao
+! continung the real values, jao containing the column indices,
+! and iao being the pointer to the beginning of the row,
+! in arrays ao, jao.
+!------------------------------------------------------------------------
+ iao = 0
+! determine row-lengths.
+ do k=1, nnz
+ iao(ir(k)) = iao(ir(k))+1
+ end do
+! starting position of each row..
+ k = 1
+ do j=1,nrow+1
+ k0 = iao(j)
+ iao(j) = k
+ k = k+k0
+ end do
+! go through the structure once more. Fill in output matrix.
+ do k=1, nnz
+ i = ir(k)
+ j = jc(k)
+ x = a(k)
+ iad = iao(i)
+ ao(iad) = x
+ jao(iad) = j
+ iao(i) = iad+1
+ end do
+! shift back iao
+ do j=nrow,1,-1
+ iao(j+1) = iao(j)
+ end do
+ iao(1) = 1
+ return
+ end subroutine
+!------------- end of coocsr -------------------------------------------
+ function is_column_format(matrix)
+ type(sparse_matrix_type) :: matrix
+ logical :: is_column_format
+
+ is_column_format = matrix%col(matrix%order + 1) == matrix%nonzeros + 1
+ end function
+
+ subroutine to_column_format(matrix)
+ type(sparse_matrix_type) :: matrix
+
+ if(is_triad_format(matrix)) then
+ call ds2y(matrix%order, matrix%nonzeros, matrix%row, matrix%col, matrix%val, 0)
+ end if
+ end subroutine
+
+ subroutine sort_column_format(matrix)
+ ! Takes a column format matrix and sorts the row indices within each column
+ ! This is not strictly needed in some compressed-column matrices
+ ! (e.g. those used in SLAP), but it *is* necessary in some other libraries
+ ! (e.g. UMFPACK). For this reason, it is not done automatically in
+ ! to_column_format.
+ implicit none
+ type(sparse_matrix_type) :: matrix
+ integer :: i
+
+ do i=1,matrix%order !Loop through each column index
+ call sort(matrix%val, matrix%row, matrix%col(i), matrix%col(i+1)-1)
+ end do
+ end subroutine
+
+ subroutine sort_row_format(matrix)
+ ! Takes a row format matrix and sorts the column indices within each row
+ ! This is not strictly needed in some compressed-row matrices
+ ! (e.g. those used in SLAP), but it *is* necessary in some other libraries
+ ! (e.g. PARDISO).
+ implicit none
+ type(sparse_matrix_type),intent(inout) :: matrix
+ integer :: i
+
+ do i=1,matrix%order !Loop through each column index
+ call sort(matrix%val, matrix%col, matrix%row(i), matrix%row(i+1)-1)
+ end do
+ end subroutine
+
+
+ subroutine sort(values, indices, startindex, endindex)
+ implicit none
+ real(dp),dimension(:),intent(inout) :: values
+ integer,dimension(:),intent(inout) :: indices
+ integer, intent(in) :: startindex
+ integer, intent(in) :: endindex
+ integer :: currentindex
+ real(dp) :: currentvalue
+ integer :: i,j
+
+ !Insertion Sort
+ do i=startindex+1,endindex
+ currentindex = indices(i)
+ currentvalue = values(i)
+
+ j = i-1
+ do while (j >= startindex .and. indices(j) > currentindex)
+ indices(j+1) = indices(j)
+ values(j+1) = values(j)
+ j = j - 1
+ end do
+ indices(j+1) = currentindex
+ values(j+1) = currentvalue
+ end do
+ end subroutine
+
+end module glimmer_sparse_type
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/CMakeLists.txt b/components/cism/glimmer-cism/libglimmer-trilinos/CMakeLists.txt
new file mode 100644
index 0000000000..22d9af4993
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/CMakeLists.txt
@@ -0,0 +1,8 @@
+#
+FILE(GLOB CPPSOURCES *.cpp)
+FILE(GLOB CPPHEADERS *.hpp)
+
+add_library(glimmercismcpp ${CPPSOURCES} ${CPPHEADERS})
+include_directories ( ${CISM_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
+ ${Trilinos_INCLUDE_DIRS} ${Trilinos_TPL_INCLUDE_DIRS})
+
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/Makefile.Trilinos.export.in b/components/cism/glimmer-cism/libglimmer-trilinos/Makefile.Trilinos.export.in
new file mode 100644
index 0000000000..59d92d7e10
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/Makefile.Trilinos.export.in
@@ -0,0 +1,16 @@
+## Configure processes this file and replaces the strings in @..@
+## If configured --with-trilinos, this pulls in a Makefile where
+## Trilinos variables are defined (e.g. Trilinos_LIBS), which
+## are used in libglide/Makefile.am within an if Trilinos block.
+## Else, this just becomes an unused comment
+
+## For Trilinos 10.4 or later, use these lines
+@TRILINOS_TRUE@include @TRILINOS_PREFIX@/include/Makefile.export.Trilinos
+@TRILINOS_TRUE@TRILINOS_LIBS_BASE = $(top_builddir)/libglimmer-trilinos/libglimmertrilinos.la $(Trilinos_LIBRARIES) $(Trilinos_TPL_LIBRARIES)
+
+## For versions of Trilinos before 10.4, uncomment these two lines instead to grab
+## libraries from NOX since a unified Makefile.export.Trilinos did not yet exist
+#@TRILINOS_TRUE@include @TRILINOS_PREFIX@/include/Makefile.export.NOX
+#@TRILINOS_TRUE@TRILINOS_LIBS_BASE = $(top_builddir)/libglimmer-trilinos/libglimmertrilinos.la $(NOX_LIBRARIES) $(NOX_TPL_LIBRARIES)
+
+TRILINOS_LIBS_ALL = $(TRILINOS_LIBS_BASE) @EXTRA_LDLIBS_SUBST@
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/README b/components/cism/glimmer-cism/libglimmer-trilinos/README
new file mode 100644
index 0000000000..cd81fe3700
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/README
@@ -0,0 +1,57 @@
+AGS:08/04/2010
+AGS:05/11/2011
+
+GLIMMER with TRILINOS:
+The libglimmer-trilinos directory contains code to interface
+to the Trilinos linear solvers. These files are only compiled
+when glimmer is configured --with-trilinos. A library
+called libglimmertrilinos.a is the result, and is linked
+into simple_glide.
+
+LINKING AGAINST TRILINOS:
+The list of the few dozen Trilinos libraries needed on the
+link line is automatically picked up through the logic
+in Makefile.Trilinos.export.in. This grabs makefile
+variables defined during the trilinos build, and
+installed with trilinos in include/Makefile.export.Trilinos.
+Blas and Lapack should be picked up this way as well.
+
+Glimmer is now linked with the C++ compiler. The autoconf
+system picks up a variable called FCLIBS that should
+contain all the fortran libraries needed to link fortran
+code with C++ linker. This is not always complete.
+Configure scripts for glimmer are being archived in
+../configure-scripts.
+
+BUILDING TRILINOS:
+Sample cmake configuration scripts for Trilinos on various
+platforms are commited in cmake-scrips directory. More
+examples are in Trilinos/sample-scripts.
+
+USING TRILINOS IN GLIMMER:
+The trilinos solvers are accessed by selecting
+* which_ho_sparse = 4
+This uses trilinosLinearSolver.cpp (and matrixInterface.cpp)
+The entries in the matrix, if owned by this processor,
+are loaded directly into the Trilinos (Epetra_CrsMatrix)
+format. The hook to this is in putpcgc in glam_strs2.F90.
+
+SELECTING TRILINOS SOLVER OPTIONS:
+The options that control the linear solver methods are
+read from an input file called trilinosOptions.xml in
+a sublist call "Stratimikos". Stratimikos is the Linear
+Solver Strategies package in trilinos that provides a
+single interface to all the preconditioners and linear
+solvers in Trilinos (Belos, Aztec, Ifpack, ML, Amesos, ...).
+The full list of options is documented on the Trilinos
+web pages. Click on the Stratimikos package, documentation
+of the Trilinos version that you are using, and then the
+link to Stratimikos::DefaultLinearSolverBuilder.
+
+http://trilinos.sandia.gov/packages/docs/r10.6/packages/stratimikos/doc/html/classStratimikos_1_1DefaultLinearSolverBuilder.html
+
+
+NONLINEAR SOLVERS:
+An interface to Trilinos Nonliner solvers (NOX) is progressing
+in a different svn branch. The C++ interface code will appear
+in this directory.
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/matrixInterface.cpp b/components/cism/glimmer-cism/libglimmer-trilinos/matrixInterface.cpp
new file mode 100644
index 0000000000..6a170bcf7b
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/matrixInterface.cpp
@@ -0,0 +1,68 @@
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//
+// matrixInterface.cpp - part of the Community Ice Sheet Model (CISM)
+//
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//
+// Copyright (C) 2005-2014
+// CISM contributors - see AUTHORS file for list of contributors
+//
+// This file is part of CISM.
+//
+// CISM is free software: you can redistribute it and/or modify it
+// under the terms of the Lesser GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// CISM is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// Lesser GNU General Public License for more details.
+//
+// You should have received a copy of the Lesser GNU General Public License
+// along with CISM. If not, see .
+//
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#include
+#include "Teuchos_TestForException.hpp"
+#include "matrixInterface.hpp"
+
+// Constructor
+TrilinosMatrix_Interface::TrilinosMatrix_Interface
+ (const Teuchos::RCP& rowMap,
+ int bandwidth, const Epetra_Comm& comm)
+ : rowMap_(rowMap), bandwidth_(bandwidth), matrixOrder_(-1), comm_(comm) {
+
+ matrixOrder_ = rowMap->NumGlobalElements();
+
+ operator_ = Teuchos::rcp(new Epetra_CrsMatrix(Copy, *rowMap, bandwidth) );
+ isFillCompleted_ = false;
+}
+
+// Destructor
+TrilinosMatrix_Interface::~TrilinosMatrix_Interface() {
+}
+
+// Accessor methods
+bool TrilinosMatrix_Interface::isSparsitySet() const {return isFillCompleted_;}
+int TrilinosMatrix_Interface::bandwidth() const {return bandwidth_;}
+int TrilinosMatrix_Interface::matrixOrder() const {return matrixOrder_;}
+const Epetra_Map& TrilinosMatrix_Interface::getRowMap() const {return *rowMap_;}
+Teuchos::RCP& TrilinosMatrix_Interface::getOperator() {return operator_;}
+
+
+// Fix the sparsity patter by calling FillComplete
+void TrilinosMatrix_Interface::finalizeSparsity() {
+ isFillCompleted_ = true;
+ int ierr = operator_->FillComplete();
+ TEUCHOS_TEST_FOR_EXCEPTION(ierr != 0, std::logic_error,
+ "Error: Trilinos Fill Complete returned nozero error code ( " << ierr << " )\n");
+
+}
+
+// Update the operator and also the corresponding row map.
+void TrilinosMatrix_Interface::updateOperator(Teuchos::RCP newOperator) {
+ operator_ = newOperator;
+ isFillCompleted_ = operator_->Filled();
+}
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/matrixInterface.hpp b/components/cism/glimmer-cism/libglimmer-trilinos/matrixInterface.hpp
new file mode 100644
index 0000000000..37812365b7
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/matrixInterface.hpp
@@ -0,0 +1,49 @@
+#ifndef TRILINOSMATIX_INTERFACE_H
+#define TRILINOSMATIX_INTERFACE_H
+
+#include
+#include "Epetra_Comm.h"
+#include "Epetra_Map.h"
+#include "Epetra_LocalMap.h"
+#ifdef GLIMMER_MPI
+#include "mpi.h"
+#include "Epetra_MpiComm.h"
+#else
+#include "Epetra_SerialComm.h"
+#endif
+#include "Epetra_CrsMatrix.h"
+#include "Epetra_Vector.h"
+#include "Epetra_Import.h"
+
+#include "Teuchos_ConfigDefs.hpp"
+#include "Teuchos_FancyOStream.hpp"
+
+class TrilinosMatrix_Interface {
+public:
+ // Constructor
+ TrilinosMatrix_Interface(const Teuchos::RCP& rowMap,
+ int bandwidth, const Epetra_Comm& comm);
+
+ // Destructor
+ ~TrilinosMatrix_Interface();
+
+ // Accessors
+ bool isSparsitySet() const;
+ int bandwidth() const;
+ int matrixOrder() const;
+ const Epetra_Map& getRowMap() const;
+ Teuchos::RCP& getOperator();
+
+ // Mutators
+ void finalizeSparsity(); // Call FillComplet to lock in sparsity pattern
+ void updateOperator(Teuchos::RCP newOperator);
+
+private:
+ bool isFillCompleted_; // to indicate if operator_ is "FillComplete()"ed
+ int bandwidth_;
+ int matrixOrder_;
+ const Epetra_Comm& comm_;
+ Teuchos::RCP operator_;
+ Teuchos::RCP rowMap_;
+};
+#endif
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/trilinosGlissadeSolver.cpp b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosGlissadeSolver.cpp
new file mode 100644
index 0000000000..1fd5d8767d
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosGlissadeSolver.cpp
@@ -0,0 +1,412 @@
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//
+// trilinosGLissadeSolver.cpp - part of the Community Ice Sheet Model (CISM)
+//
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//
+// Copyright (C) 2005-2014
+// CISM contributors - see AUTHORS file for list of contributors
+//
+// This file is part of CISM.
+//
+// CISM is free software: you can redistribute it and/or modify it
+// under the terms of the Lesser GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// CISM is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// Lesser GNU General Public License for more details.
+//
+// You should have received a copy of the Lesser GNU General Public License
+// along with CISM. If not, see .
+//
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#include
+#include "Epetra_Import.h"
+#include "Epetra_Vector.h"
+#include "Epetra_CrsMatrix.h"
+
+#include "Teuchos_ParameterList.hpp"
+#include "Teuchos_XMLParameterListHelpers.hpp"
+#include "Teuchos_Time.hpp"
+//#include "Teuchos_TimeMonitor.hpp"
+#include "Teuchos_StandardCatchMacros.hpp"
+
+#include "Stratimikos_DefaultLinearSolverBuilder.hpp"
+#include "Thyra_LinearOpWithSolveFactoryHelpers.hpp"
+#include "Thyra_EpetraThyraWrappers.hpp"
+#include "Thyra_EpetraLinearOp.hpp"
+
+#ifdef GLIMMER_MPI
+#include "Teuchos_DefaultMpiComm.hpp"
+#include "Epetra_MpiComm.h"
+#else
+#include "Teuchos_DefaultSerialComm.hpp"
+#include "Epetra_SerialComm.h"
+#endif
+
+#include "config.inc"
+
+// Uncomment this #define to write out linear system
+//#define WRITE_OUT_LINEAR_SYSTEM
+#ifdef WRITE_OUT_LINEAR_SYSTEM
+#include "EpetraExt_RowMatrixOut.h"
+#include "EpetraExt_MultiVectorOut.h"
+int solvecount=0;
+#endif
+
+// Turn this on to check validity of sparse matrix entries
+//#define CHECK_FOR_ROGUE_COLUMNS
+
+// Define variables that are global to this file.
+// If this were a C++ class, these would be member data.
+Teuchos::RCP rhs;
+Teuchos::RCP paramList;
+Teuchos::RCP tout;
+Teuchos::RCP matrix;
+Teuchos::RCP > linOp;
+Teuchos::RCP > linOpFactory;
+Teuchos::RCP > thyraOp;
+bool successFlag = true;
+
+// Flag for operations done once per time step (e.g. define active unknowns)
+bool firstMatrixAssemblyForTimeStep = true;
+
+// Flag for operations done once per run (e.g. read in trilinosOPtions.xml)
+bool firstCallToInitializeTGS = true;
+
+int linSolveCount=0, linSolveSuccessCount=0, linSolveIters_last=0, linSolveIters_total=0;
+double linSolveAchievedTol;
+bool printLinSolDetails=true; // Need to set in input file.
+
+extern "C" {
+
+ // Prototypes for locally called functions
+ void linSolveDetails_tgs(Thyra::SolveStatus& status);
+ void check_for_rogue_columns_tgs( Epetra_CrsMatrix& mat);
+
+ //================================================================
+ // This needs to be called only once per time step in the beginning
+ // to set up the owned unknow map for the problem.
+ //================================================================
+
+ void FC_FUNC(initializetgs,initializetgs)
+ (int& mySize, int* myIndicies, int* mpi_comm_f) {
+ // mySize: the number of active_owned_unknowns on this processor
+ // myIndicies[]: global_active_owned_unknowns integer array in glissade-speak
+ // mpi_comm_f: CISM's fortran mpi communicator
+
+ // Define output stream that only prints on Proc 0
+ tout = Teuchos::VerboseObjectBase::getDefaultOStream();
+
+#ifdef GLIMMER_MPI
+ // Make sure the MPI_Init in Fortran is recognized by C++.
+ // We used to call an extra MPI_Init if (!flag), but the behavior of doing so is uncertain,
+ // especially if CISM's MPI communicator is a subset of MPI_COMM_WORLD (as can be the case in CESM).
+ // Thus, for now, we die with an error message if C++ perceives MPI to be uninitialized.
+ // If this causes problems (e.g., if certain MPI implementations seem not to recognize
+ // that MPI has already been initialized), then we will revisit how to handle this.
+ int flag;
+ MPI_Initialized(&flag);
+ if (!flag) {
+ *tout << "ERROR in initializetgs: MPI not initialized according to C++ code" << std::endl;
+ exit(1);
+ }
+ MPI_Comm mpi_comm_c = MPI_Comm_f2c(*mpi_comm_f);
+ Epetra_MpiComm comm(mpi_comm_c);
+ Teuchos::MpiComm tcomm(Teuchos::opaqueWrapper(mpi_comm_c));
+#else
+ Epetra_SerialComm comm;
+ Teuchos::SerialComm tcomm;
+#endif
+
+
+ // Read parameter list from XML file once per run
+ if (firstCallToInitializeTGS) {
+ // Set flag so following code is executed only once per code run
+ firstCallToInitializeTGS = false;
+ try {
+ paramList = Teuchos::rcp(new Teuchos::ParameterList("Trilinos Options"));
+ Teuchos::updateParametersFromXmlFileAndBroadcast("trilinosOptions.xml", paramList.ptr(), tcomm);
+
+ Teuchos::ParameterList validPL("Valid List");;
+ validPL.sublist("Stratimikos"); validPL.sublist("Piro");
+ paramList->validateParameters(validPL, 0);
+
+ // Set the coordinate position of the nodes for ML for repartitioning (important for #procs > 100s)
+ if (paramList->sublist("Stratimikos").isParameter("Preconditioner Type")) {
+ if ("ML" == paramList->sublist("Stratimikos").get("Preconditioner Type")) {
+ *tout << "\nNOTE: ML preconditioner can work much better when interface is extended\n"
+ << "\tto include Nodal XYZ coordinates.\n" << std::endl;
+ Teuchos::ParameterList& mlList =
+ paramList->sublist("Stratimikos").sublist("Preconditioner Types").sublist("ML").sublist("ML Settings");
+ //mlList.set("x-coordinates",myX);
+ //mlList.set("y-coordinates",myY);
+ //mlList.set("z-coordinates",myZ);
+ mlList.set("PDE equations", 2);
+ }
+ }
+
+ // Set up solver (preconditioner, iterative method) based on XML file
+ Stratimikos::DefaultLinearSolverBuilder linearSolverBuilder;
+ linearSolverBuilder.setParameterList(Teuchos::sublist(paramList, "Stratimikos"));
+ linOpFactory = linearSolverBuilder.createLinearSolveStrategy("");
+ linOpFactory->setOStream(tout);
+ linOpFactory->setVerbLevel(Teuchos::VERB_LOW);
+
+ linOp=Teuchos::null;
+ thyraOp=Teuchos::null;
+ }
+ catch (std::exception& e) {
+ std::cout << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
+ << e.what() << "\nExiting: Invalid trilinosOptions.xml file."
+ << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" << std::endl;
+ exit(1);
+ }
+ catch (...) {
+ std::cout << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
+ << "\nExiting: Invalid trilinosOptions.xml file."
+ << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" << std::endl;
+ exit(1);
+ }
+ }
+
+ // Continue setup that needs to be redone every time step
+ try {
+ // Flag to let subsequent functions know that a new matrix has been created
+ firstMatrixAssemblyForTimeStep = true;
+
+ Teuchos::RCP rowMap =
+ Teuchos::rcp(new Epetra_Map(-1, mySize, myIndicies, 1, comm) );
+
+ TEUCHOS_TEST_FOR_EXCEPTION(!rowMap->UniqueGIDs(), std::logic_error,
+ "Error: initializetgs, myIndicies array needs to have unique entries"
+ << " across all processors.");
+
+ // Diagnostic output for partitioning
+ int minSize, maxSize;
+ comm.MinAll(&mySize, &minSize, 1);
+ comm.MaxAll(&mySize, &maxSize, 1);
+ if (comm.MyPID()==0)
+ *tout << "\nPartition Info in init_trilinos: Total nodes = " << rowMap->NumGlobalElements()
+ << " Max = " << maxSize << " Min = " << minSize
+ << " Ave = " << rowMap->NumGlobalElements() / comm.NumProc() << std::endl;
+
+ // rhs is the b vector, rhs of linear system (owned, active)
+ rhs = Teuchos::rcp(new Epetra_Vector(*rowMap));
+
+ // Reset counters every time step: can remove these lines to have averages over entire run
+ linSolveIters_total = 0;
+ linSolveCount=0;
+ linSolveSuccessCount = 0;
+
+ // Construct the CrsMatrix based on the row map and bandwidth estimate
+ const int bandwidth = 54;
+ matrix = Teuchos::rcp(new Epetra_CrsMatrix(Copy, *rowMap, bandwidth));
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, successFlag);
+ if (!successFlag) exit(1);
+
+ //Teuchos::TimeMonitor::summarize(*tout,false,true,false/*zero timers*/);
+ }
+
+ //============================================================
+ // Insert one row of entries into matrix and RHS
+ //============================================================
+
+ void FC_FUNC(insertrowtgs,INSERTROWTGS)
+ (int& rowInd, int& numColumns, int* columns,
+ double* matrixValues, double& rhsValue ) {
+ // rowInd: global row number
+ // numColumns: number of columns in this row (typically 54, but less on boundaries)
+ // columns[]: array with numColumns valid entries of global column numbers
+ // matrixValues[]: array with corresponding matrix entries
+ // rhsValue: entry into "b" vector for that same row.
+ //
+ //TEUCHOS_FUNC_TIME_MONITOR("> insertRowTGS");
+
+ try {
+ int ierr;
+ const Epetra_Map& rowMap = matrix->RowMap();
+
+ // If this row is not owned on this processor, then throw error
+ TEUCHOS_TEST_FOR_EXCEPTION(!rowMap.MyGID(rowInd), std::logic_error,
+ "Error: Trilinos matrix has detected an invalid row entry (row="
+ << rowInd << ").\n");
+
+ // Insert contribution to rhs a.k.a. b vector (as in Au=b)
+ rhs->ReplaceGlobalValues(1, &rhsValue, &rowInd);
+
+ if (firstMatrixAssemblyForTimeStep) {
+
+//#define ONE_PROC_DEBUG
+#ifdef ONE_PROC_DEBUG
+ if (rowMap.Comm().NumProc()==1)
+ for (int col=0; colInsertGlobalValues(rowInd, numColumns, matrixValues, columns);
+
+ if (ierr<0) {std::cout << "Error Code for " << rowInd << " = ("<< ierr <<")"<0) std::cout << "Warning Code for " << rowInd << " = ("<< ierr <<")"<ReplaceGlobalValues(rowInd, 1, &matrixValues[col], &columns[col]);
+
+ TEUCHOS_TEST_FOR_EXCEPTION(ierr != 0, std::logic_error,
+ "Error: Trilinos matrix has detected a new column entry A("
+ << rowInd << ", " << columns[col] << ") = " << matrixValues[col]
+ << "\n\t that did not exist before.");
+ }
+#else
+ // Subsequent matrix fills of each time step.
+ ierr = matrix->ReplaceGlobalValues(rowInd, numColumns, matrixValues, columns);
+
+ TEUCHOS_TEST_FOR_EXCEPTION(ierr != 0, std::logic_error,
+ "Error: Trilinos matrix has detected a new column entry in row ("
+ << rowInd << ")\n\t that did not exist before.");
+#endif
+ }
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, successFlag);
+ if (!successFlag) exit(1);
+ }
+
+ //============================================================
+ // Call to perform solve of previously assembled linear system
+ //============================================================
+
+ void FC_FUNC(solvevelocitytgs,SOLVEVELOCITYTGS)
+ (double* velocityResult) {
+ // velocityResult[]: array of length mySize from initializetgs call, that
+ // upon return will have the velocities from Au=b solve.
+ //TEUCHOS_FUNC_TIME_MONITOR("> solveVelocityTGS");
+
+ try {
+ //Teuchos::Time linearTime("LinearTime"); linearTime.start();
+
+ // Lock in sparsity pattern of CrsMatrix -- first solve only
+ if (firstMatrixAssemblyForTimeStep) {
+ firstMatrixAssemblyForTimeStep = false;
+
+ matrix->FillComplete();
+#ifdef CHECK_FOR_ROGUE_COLUMNS
+ check_for_rogue_columns_tgs(*matrix);
+#endif
+
+ // Associate matrix with solver strategy layers
+ thyraOp = Thyra::epetraLinearOp(matrix);
+ }
+ // Need to do this call to invoke fresh preconditioner
+ linOp = Thyra::linearOpWithSolve(*linOpFactory, thyraOp);
+
+ // Wrap velocity vector inside Epetra Vector data structure
+ Teuchos::RCP solution
+ = Teuchos::rcp(new Epetra_Vector(View, matrix->RowMap(), velocityResult));
+
+#ifdef WRITE_OUT_LINEAR_SYSTEM
+ solvecount++;
+ if (solvecount==1) {
+ EpetraExt::RowMatrixToMatrixMarketFile("matrix1", *matrix);
+ EpetraExt::MultiVectorToMatrixMarketFile("vector1", *rhs);
+ }
+#endif
+
+ // Wrap Epetra Vetors as Thyra vectors, as the solver requires
+ Teuchos::RCP >
+ thyraRhs = Thyra::create_Vector(rhs, thyraOp->range() );
+ Teuchos::RCP >
+ thyraSol = Thyra::create_Vector(solution, thyraOp->domain() );
+ Thyra::SolveStatus
+ status = Thyra::solve(*linOp, Thyra::NOTRANS, *thyraRhs, thyraSol.ptr());
+
+ if (printLinSolDetails) linSolveDetails_tgs(status);
+
+ //elapsedTime = linearTime.stop();
+ //*tout << "Total time elapsed for calling Solve(): " << elapsedTime << std::endl;
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, successFlag);
+ if (!successFlag) exit(1);
+ }
+
+ //============================================================
+
+ void linSolveDetails_tgs(Thyra::SolveStatus& status) {
+ ++linSolveCount;
+ bool haveData=false;
+ if (status.extraParameters != Teuchos::null) {
+ if (status.extraParameters->isParameter("Belos/Iteration Count")) {
+ linSolveIters_last = status.extraParameters->get("Belos/Iteration Count");
+ linSolveIters_total += linSolveIters_last;
+ haveData=true;
+ }
+ if (status.extraParameters->isParameter("Belos/Achieved Tolerance"))
+ linSolveAchievedTol = status.extraParameters->get("Belos/Achieved Tolerance");
+ if (status.extraParameters->isParameter("AztecOO/Iteration Count")) {
+ linSolveIters_last = status.extraParameters->get("AztecOO/Iteration Count");
+ linSolveIters_total += linSolveIters_last;
+ haveData=true;
+ }
+ if (status.extraParameters->isParameter("AztecOO/Achieved Tolerance"))
+ linSolveAchievedTol = status.extraParameters->get("AztecOO/Achieved Tolerance");
+
+ if (haveData) {
+ *tout << "Precon Linear Solve ";
+ if (status.solveStatus == Thyra::SOLVE_STATUS_CONVERGED)
+ {*tout << "Succeeded: "; ++linSolveSuccessCount;}
+ else *tout << "Failed: ";
+ *tout << std::setprecision(3)
+ << linSolveAchievedTol << " drop in "
+ << linSolveIters_last << " its (avg: "
+ << linSolveIters_total / (double) linSolveCount << " its/slv, "
+ << 100.0* linSolveSuccessCount / (double) linSolveCount << "% success)"
+ << std::endl;
+ }
+ }
+ }
+
+ /* Debugging utility to check if columns have been Inserted into the
+ * matrix that do not correspond to a row on any processor
+ */
+ void check_for_rogue_columns_tgs( Epetra_CrsMatrix& mat) {
+ // Set up rowVector of 0s and column vector of 1s
+ const Epetra_Map& rowMap = mat.RowMap();
+ const Epetra_Map& colMap = mat.ColMap();
+ Epetra_Vector rowVec(rowMap); rowVec.PutScalar(0.0);
+ Epetra_Vector colVec(colMap); colVec.PutScalar(1.0);
+ Epetra_Import importer(colMap, rowMap);
+
+ // Overwrite colVec 1s with rowVec 0s
+ colVec.Import(rowVec, importer, Insert);
+
+ // Check that all 1s have been overwritten
+ double nrm=0.0;
+ colVec.Norm1(&nrm); // nrm = number of columns not overwritten by rows
+
+ // If any rogue columns, exit now (or just get nans later)
+ if (nrm>=1.0) {
+ *tout << "ERROR: Column map has " << nrm
+ << " rogue entries that are not associated with any row." << std::endl;
+ rowMap.Comm().Barrier();
+ exit(-3);
+ }
+ else {
+ *tout << "Debugging check for rogue column indices passed."
+ << " Turn off for production runs.\n" << std::endl;
+ }
+ }
+
+ //============================================================
+
+} // extern"C"
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/trilinosLinearSolver.cpp b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosLinearSolver.cpp
new file mode 100644
index 0000000000..cd83aae9f1
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosLinearSolver.cpp
@@ -0,0 +1,457 @@
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//
+// trilinosLinearSolver.cpp - part of the Community Ice Sheet Model (CISM)
+//
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//
+// Copyright (C) 2005-2014
+// CISM contributors - see AUTHORS file for list of contributors
+//
+// This file is part of CISM.
+//
+// CISM is free software: you can redistribute it and/or modify it
+// under the terms of the Lesser GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// CISM is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// Lesser GNU General Public License for more details.
+//
+// You should have received a copy of the Lesser GNU General Public License
+// along with CISM. If not, see .
+//
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#include
+#include "Epetra_LocalMap.h"
+#include "Epetra_Import.h"
+#include "Epetra_CombineMode.h"
+#include "matrixInterface.hpp"
+
+#include "Teuchos_ParameterList.hpp"
+#include "Teuchos_XMLParameterListHelpers.hpp"
+#include "Teuchos_Time.hpp"
+#include "Teuchos_StandardCatchMacros.hpp"
+
+
+#include "Stratimikos_DefaultLinearSolverBuilder.hpp"
+#include "Thyra_LinearOpWithSolveFactoryHelpers.hpp"
+#include "Thyra_EpetraThyraWrappers.hpp"
+#include "Thyra_EpetraLinearOp.hpp"
+
+#ifdef GLIMMER_MPI
+#include "Teuchos_DefaultMpiComm.hpp"
+#else
+#include "Teuchos_DefaultSerialComm.hpp"
+#endif
+
+#include "config.inc"
+
+// Uncomment this #define to write out linear system
+//#define WRITE_OUT_LINEAR_SYSTEM
+#ifdef WRITE_OUT_LINEAR_SYSTEM
+#include "EpetraExt_RowMatrixOut.h"
+#include "EpetraExt_MultiVectorOut.h"
+int solvecount=0;
+#endif
+
+// Turn this on to check validity of sparse matrix entries
+#define CHECK_FOR_ROGUE_COLUMNS
+
+// Define variables that are global to this file.
+// If this were a C++ class, these would be member data.
+Teuchos::RCP interface;
+Teuchos::RCP savedMatrix_A;
+Teuchos::RCP savedMatrix_C;
+Teuchos::RCP soln;
+Teuchos::RCP pl;
+Teuchos::RCP out;
+Teuchos::RCP > lows;
+Teuchos::RCP > lowsFactory;
+Teuchos::RCP > thyraOper;
+bool success = true;
+
+int linearSolveCount=0, linearSolveSuccessCount=0, linearSolveIters_last=0, linearSolveIters_total=0;
+double linearSolveAchievedTol;
+bool printDetails=false; // Need to set in input file.
+
+extern "C" {
+
+ // Prototype for locally called function
+ void linSolveDetails(Thyra::SolveStatus& status);
+ void check_for_rogue_columns( Epetra_CrsMatrix& mat);
+
+ //================================================================
+ //================================================================
+ // RN_20091215: This needs to be called only once per time step
+ // in the beginning to set up the problem.
+ //================================================================
+ void FC_FUNC(inittrilinos,INITTRILINOS) (int& bandwidth, int& mySize,
+ int* myIndicies, double* myX, double* myY, double* myZ,
+ int* mpi_comm_f) {
+// mpi_comm_f: CISM's fortran mpi communicator
+
+#ifdef GLIMMER_MPI
+ // Make sure the MPI_Init in Fortran is recognized by C++.
+ // We used to call an extra MPI_Init if (!flag), but the behavior of doing so is uncertain,
+ // especially if CISM's MPI communicator is a subset of MPI_COMM_WORLD (as can be the case in CESM).
+ // Thus, for now, we die with an error message if C++ perceives MPI to be uninitialized.
+ // If this causes problems (e.g., if certain MPI implementations seem not to recognize
+ // that MPI has already been initialized), then we will revisit how to handle this.
+ int flag;
+ MPI_Initialized(&flag);
+ if (!flag) {
+ std::cout << "ERROR in inittrilinos: MPI not initialized according to C++ code" << std::endl;
+ exit(1);
+ }
+ MPI_Comm mpi_comm_c = MPI_Comm_f2c(*mpi_comm_f);
+ Epetra_MpiComm comm(mpi_comm_c);
+ Teuchos::MpiComm tcomm(Teuchos::opaqueWrapper(mpi_comm_c));
+#else
+ Epetra_SerialComm comm;
+ Teuchos::SerialComm tcomm;
+#endif
+
+ Teuchos::RCP rowMap =
+ Teuchos::rcp(new Epetra_Map(-1,mySize,myIndicies,1,comm) );
+
+ TEUCHOS_TEST_FOR_EXCEPTION(!rowMap->UniqueGIDs(), std::logic_error,
+ "Error: inittrilinos, myIndices array needs to have Unique entries"
+ << " across all processor.");
+
+ // Diagnostic output for partitioning
+ int minSize, maxSize;
+ comm.MinAll(&mySize, &minSize, 1);
+ comm.MaxAll(&mySize, &maxSize, 1);
+ if (comm.MyPID()==0)
+ std::cout << "\nPartition Info in init_trilinos: Total nodes = " << rowMap->NumGlobalElements()
+ << " Max = " << maxSize << " Min = " << minSize
+ << " Ave = " << rowMap->NumGlobalElements() / comm.NumProc() << std::endl;
+
+ soln = Teuchos::rcp(new Epetra_Vector(*rowMap));
+
+ // Read parameter list once
+ try {
+ pl = Teuchos::rcp(new Teuchos::ParameterList("Trilinos Options"));
+ Teuchos::updateParametersFromXmlFileAndBroadcast("trilinosOptions.xml", pl.ptr(), tcomm);
+
+ Teuchos::ParameterList validPL("Valid List");;
+ validPL.sublist("Stratimikos"); validPL.sublist("Piro");
+ pl->validateParameters(validPL, 0);
+ }
+ catch (std::exception& e) {
+ std::cout << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
+ << e.what() << "\nExiting: Invalid trilinosOptions.xml file."
+ << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" << std::endl;
+ exit(1);
+ }
+ catch (...) {
+ std::cout << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
+ << "\nExiting: Invalid trilinosOptions.xml file."
+ << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" << std::endl;
+ exit(1);
+ }
+
+ try {
+ // Set the coordinate position of the nodes for ML for repartitioning (important for #procs > 100s)
+ if (pl->sublist("Stratimikos").isParameter("Preconditioner Type")) {
+ if ("ML" == pl->sublist("Stratimikos").get("Preconditioner Type")) {
+ Teuchos::ParameterList& mlList =
+ pl->sublist("Stratimikos").sublist("Preconditioner Types").sublist("ML").sublist("ML Settings");
+ mlList.set("x-coordinates",myX);
+ mlList.set("y-coordinates",myY);
+ mlList.set("z-coordinates",myZ);
+ mlList.set("PDE equations", 1);
+ }
+ }
+
+ out = Teuchos::VerboseObjectBase::getDefaultOStream();
+
+ // Reset counters every time step: can remove these lines to have averages over entire run
+ linearSolveIters_total = 0;
+ linearSolveCount=0;
+ linearSolveSuccessCount = 0;
+
+ // Create an interface that holds a CrsMatrix instance and some useful methods.
+ interface = Teuchos::rcp(new TrilinosMatrix_Interface(rowMap, bandwidth, comm));
+
+ Stratimikos::DefaultLinearSolverBuilder linearSolverBuilder;
+ linearSolverBuilder.setParameterList(Teuchos::sublist(pl, "Stratimikos"));
+ lowsFactory = linearSolverBuilder.createLinearSolveStrategy("");
+ lowsFactory->setOStream(out);
+ lowsFactory->setVerbLevel(Teuchos::VERB_LOW);
+
+ lows=Teuchos::null;
+ thyraOper=Teuchos::null;
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);
+ if (!success) exit(1);
+ }
+
+ //============================================================
+ // RN_20091118: This is to update the matrix with new entries.
+ //============================================================
+
+ void FC_FUNC(putintotrilinosmatrix,PUTINTOTRILINOSMATRIX)
+ (int& rowInd, int& colInd, double& val) {
+
+ try {
+ int ierr;
+ const Epetra_Map& map = interface->getRowMap();
+ // If this row is not owned on this processor, then throw error
+ TEUCHOS_TEST_FOR_EXCEPTION(!map.MyGID(rowInd), std::logic_error,
+ "Error: Trilinos matrix has detected an invalide row entry (row="
+ << rowInd << ",col=" << colInd << ",val=" << val << ").\n");
+
+ Epetra_CrsMatrix& matrix = *(interface->getOperator());
+
+ if (!interface->isSparsitySet()) {
+
+ // The matrix has not been "FillComplete()"ed. First fill of time step.
+ ierr = matrix.InsertGlobalValues(rowInd, 1, &val, &colInd);
+ if (ierr<0) {std::cout << "Error Code for " << rowInd << " " << colInd << " = ("<< ierr <<")"<0) std::cout << "Warning Code for " << rowInd << " " << colInd << " = ("<< ierr <<")"<isSparsitySet()) {
+ interface->finalizeSparsity();
+#ifdef CHECK_FOR_ROGUE_COLUMNS
+ check_for_rogue_columns(*interface->getOperator());
+#endif
+ }
+
+ const Epetra_Map& map = interface->getRowMap();
+ Teuchos::RCP epetraSol = soln;
+ Teuchos::RCP epetraRhs;
+ epetraRhs = Teuchos::rcp(new Epetra_Vector(View, map, rhs));
+
+ thyraOper = Thyra::epetraLinearOp(interface->getOperator());
+ Teuchos::RCP >
+ thyraRhs = Thyra::create_Vector(epetraRhs, thyraOper->range() );
+ Teuchos::RCP >
+ thyraSol = Thyra::create_Vector(epetraSol, thyraOper->domain() );
+
+ lows = Thyra::linearOpWithSolve(*lowsFactory, thyraOper);
+
+ // Uncomment following block to Dump out two matrices Avv, Auu.
+ // This function is called twice per Picard iter, which is twice
+ // per outer GMRES step for Newton solves, so writing at
+ // solvecount==1 is first system, solvecount==51 is 26th Picard iter.
+
+#ifdef WRITE_OUT_LINEAR_SYSTEM
+ solvecount++;
+ if (solvecount==1) {
+ EpetraExt::RowMatrixToMatrixMarketFile("matrix1", *interface->getOperator());
+ EpetraExt::MultiVectorToMatrixMarketFile("vector1", *epetraRhs);
+ }
+#endif
+
+ Thyra::SolveStatus
+ status = Thyra::solve(*lows, Thyra::NOTRANS, *thyraRhs, thyraSol.ptr());
+
+ if (printDetails) linSolveDetails(status);
+
+ soln->ExtractCopy(answer);
+
+ //elapsedTime = linearTime.stop(); *out << "Total time elapsed for calling Solve(): " << elapsedTime << std::endl;
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);
+ if (!success) exit(1);
+ }
+
+
+ void FC_FUNC(savetrilinosmatrix,SAVETRILINOSMATRIX) (int* i) {
+ try {
+ if (!interface->isSparsitySet()) {
+ interface->finalizeSparsity();
+#ifdef CHECK_FOR_ROGUE_COLUMNS
+ check_for_rogue_columns(*interface->getOperator());
+#endif
+ }
+ if (*i==0)
+ savedMatrix_A = Teuchos::rcp(new Epetra_CrsMatrix(*(interface->getOperator())));
+ else if (*i==1)
+ savedMatrix_C = Teuchos::rcp(new Epetra_CrsMatrix(*(interface->getOperator())));
+ else if (*i==2) {
+ savedMatrix_A = Teuchos::rcp(new Epetra_CrsMatrix(*(interface->getOperator())));
+ savedMatrix_C = Teuchos::rcp(new Epetra_CrsMatrix(*(interface->getOperator())));
+ }
+ else
+ assert(false);
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);
+ if (!success) exit(1);
+ }
+
+
+ void FC_FUNC(restoretrilinosmatrix,RESTORTRILINOSMATRIX) (int* i) {
+ try {
+ if (*i==0)
+ interface->updateOperator(savedMatrix_A);
+ else if (*i==1)
+ interface->updateOperator(savedMatrix_C);
+ else
+ assert(false);
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);
+ if (!success) exit(1);
+ }
+
+ void FC_FUNC(matvecwithtrilinos,MATVECWITHTRILINOS)
+ (double* x, double* answer) {
+ try {
+ const Epetra_Map& map = interface->getRowMap();
+
+ Teuchos::RCP epetra_x;
+ epetra_x = Teuchos::rcp(new Epetra_Vector(View, map, x));
+
+ Epetra_Vector y(map);
+ interface->getOperator()->Multiply(false, *epetra_x, y);
+
+ y.ExtractCopy(answer);
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);
+ if (!success) exit(1);
+ }
+
+
+ //============================================================
+ // Functionality here is for FEM fills. These differ in that
+ // contributions to matrix entried can come in multiple parts,
+ // so we need to ZeroOut and SumInto the matris, instead of
+ // Replace matrix entries.
+ //
+ // This first attempt will not work in parallel -- we need to
+ // add functionality to deal with off-processor contributions.
+ //============================================================
+
+ void FC_FUNC(zeroouttrilinosmatrix,ZEROOUTTRILINOSMATRIX)() {
+ try {
+ // Zero out matrix. Don't do anything for first call, when matrix is empty.
+ if (interface->isSparsitySet()) {
+ Epetra_CrsMatrix& matrix = *(interface->getOperator());
+ matrix.PutScalar(0.0);
+ }
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);
+ if (!success) exit(1);
+ }
+
+ void FC_FUNC(sumintotrilinosmatrix,SUMINTOTRILINOSMATRIX)
+ (int& rowInd, int& numEntries, int* colInd, double* val) {
+
+ try {
+ const Epetra_Map& map = interface->getRowMap();
+
+ Epetra_CrsMatrix& matrix = *(interface->getOperator());
+
+ if (!interface->isSparsitySet()) {
+ // The matrix has not been "FillComplete()"ed. First fill of time step.
+ // Inserted values at this stage will be summed together later
+ int ierr = matrix.InsertGlobalValues(rowInd, numEntries, val, colInd);
+ if (ierr<0) {std::cout << "Error Code for " << rowInd << " " << colInd[0] << " = ("<< ierr <<")"<0) std::cout << "Warning Code for " << rowInd << " " << colInd[0] << " = ("<< ierr <<")"<& status) {
+ ++linearSolveCount;
+ bool haveData=false;
+ if (status.extraParameters != Teuchos::null) {
+ if (status.extraParameters->isParameter("Belos/Iteration Count")) {
+ linearSolveIters_last = status.extraParameters->get("Belos/Iteration Count");
+ linearSolveIters_total += linearSolveIters_last;
+ haveData=true;
+ }
+ if (status.extraParameters->isParameter("Belos/Achieved Tolerance"))
+ linearSolveAchievedTol = status.extraParameters->get("Belos/Achieved Tolerance");
+ if (status.extraParameters->isParameter("AztecOO/Iteration Count")) {
+ linearSolveIters_last = status.extraParameters->get("AztecOO/Iteration Count");
+ linearSolveIters_total += linearSolveIters_last;
+ haveData=true;
+ }
+ if (status.extraParameters->isParameter("AztecOO/Achieved Tolerance"))
+ linearSolveAchievedTol = status.extraParameters->get("AztecOO/Achieved Tolerance");
+
+ if (haveData) {
+ *out << "Precon Linear Solve ";
+ if (status.solveStatus == Thyra::SOLVE_STATUS_CONVERGED)
+ {*out << "Succeeded: "; ++linearSolveSuccessCount;}
+ else *out << "Failed: ";
+ *out << std::setprecision(3)
+ << linearSolveAchievedTol << " drop in "
+ << linearSolveIters_last << " its (avg: "
+ << linearSolveIters_total / (double) linearSolveCount << " its/slv, "
+ << 100.0* linearSolveSuccessCount / (double) linearSolveCount << "% success)"
+ << std::endl;
+ }
+ }
+ }
+
+ /* Debugging utility to check if columns have been Inserted into the
+ * matrix that do not correspond to a row on any processor
+ */
+ void check_for_rogue_columns( Epetra_CrsMatrix& mat) {
+ // Set up rowVector of 0s and column vector of 1s
+ const Epetra_Map& rowMap = mat.RowMap();
+ const Epetra_Map& colMap = mat.ColMap();
+ Epetra_Vector rowVec(rowMap); rowVec.PutScalar(0.0);
+ Epetra_Vector colVec(colMap); colVec.PutScalar(1.0);
+ Epetra_Import importer(colMap, rowMap);
+
+ // Overwrite colVec 1s with rowVec 0s
+ colVec.Import(rowVec, importer, Insert);
+
+ // Check that all 1s have been overwritten
+ double nrm=0.0;
+ colVec.Norm1(&nrm); // nrm = number of columns not overwritten by rows
+
+ // If any rogue columns, exit now (or just get nans later)
+ if (nrm>=1.0) {
+ *out << "ERROR: Column map has " << nrm
+ << " rogue entries that are not associated with any row." << std::endl;
+ rowMap.Comm().Barrier();
+ exit(-3);
+ }
+ }
+
+ //============================================================
+
+} // extern"C"
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/trilinosModelEvaluator.cpp b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosModelEvaluator.cpp
new file mode 100644
index 0000000000..0a35a51c8a
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosModelEvaluator.cpp
@@ -0,0 +1,171 @@
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//
+// trilinosModelEvaluator.cpp - part of the Community Ice Sheet Model (CISM)
+//
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//
+// Copyright (C) 2005-2014
+// CISM contributors - see AUTHORS file for list of contributors
+//
+// This file is part of CISM.
+//
+// CISM is free software: you can redistribute it and/or modify it
+// under the terms of the Lesser GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// CISM is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// Lesser GNU General Public License for more details.
+//
+// You should have received a copy of the Lesser GNU General Public License
+// along with CISM. If not, see .
+//
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#include "trilinosModelEvaluator.hpp"
+#include "Teuchos_StandardCatchMacros.hpp"
+
+
+extern "C" {
+ void calc_F(double* x, double* f, int N, void* bb, int ispert);
+ void apply_precond_nox(double* x, double* y, int n, void* bb);
+ void reset_effstrmin(const double* esm);
+}
+/*******************************************************************************/
+/*******************************************************************************/
+/*******************************************************************************/
+
+trilinosModelEvaluator::trilinosModelEvaluator (
+ int N_, double* statevector,
+ const Epetra_Comm& comm_, void* blackbox_res_)
+ : N(N_), comm(comm_), blackbox_res(blackbox_res_)
+{
+ bool succeeded=true;
+ try {
+ xMap = Teuchos::rcp(new Epetra_Map(-1, N, 0, comm));
+ xVec = Teuchos::rcp(new Epetra_Vector(Copy, *xMap, statevector));
+
+ precOp = Teuchos::rcp(new trilinosPreconditioner(N, xVec, xMap, blackbox_res));
+
+ pMap = Teuchos::rcp(new Epetra_LocalMap(1, 0, comm));
+ pVec = Teuchos::rcp(new Epetra_Vector(*pMap));
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, succeeded);
+ if (!succeeded) exit(1);
+}
+
+/*******************************************************************************/
+// Return solution vector map
+Teuchos::RCP trilinosModelEvaluator::get_x_map() const{
+ return xMap;
+}
+
+// Return residual vector map
+Teuchos::RCP trilinosModelEvaluator::get_f_map() const{
+ return xMap;
+}
+
+// Return initial solution and x_dot init
+Teuchos::RCP trilinosModelEvaluator::get_x_init() const{
+ return xVec;
+}
+
+Teuchos::RCP
+trilinosModelEvaluator::create_WPrec() const
+{
+ // bool is answer to: "Prec is already inverted?"
+ return Teuchos::rcp(new EpetraExt::ModelEvaluator::Preconditioner(precOp,true));
+}
+
+Teuchos::RCP trilinosModelEvaluator::get_p_map(int l) const{
+ return pMap;
+}
+Teuchos::RCP trilinosModelEvaluator::get_p_init(int l) const{
+ return pVec;
+}
+Teuchos::RCP > trilinosModelEvaluator::get_p_names(int l) const{
+ RCP > p_names =
+ rcp(new Teuchos::Array(1) );
+ (*p_names)[0] = "Effstrmin Factor";
+
+ return p_names;
+}
+
+/*******************************************************************************/
+// Create InArgs
+EpetraExt::ModelEvaluator::InArgs trilinosModelEvaluator::createInArgs() const{
+ InArgsSetup inArgs;
+
+ inArgs.setModelEvalDescription(this->description());
+ inArgs.setSupports(IN_ARG_x,true);
+ inArgs.set_Np(1);
+ return inArgs;
+}
+
+/*******************************************************************************/
+// Create OutArgs
+EpetraExt::ModelEvaluator::OutArgs trilinosModelEvaluator::createOutArgs() const{
+ OutArgsSetup outArgs;
+ outArgs.setModelEvalDescription(this->description());
+ outArgs.set_Np_Ng(1, 0);
+ outArgs.setSupports(OUT_ARG_f,true);
+ outArgs.setSupports(OUT_ARG_WPrec, true);
+
+ return outArgs;
+}
+
+/*******************************************************************************/
+// Evaluate model on InArgs
+void trilinosModelEvaluator::evalModel(const InArgs& inArgs, const OutArgs& outArgs) const{
+
+ // Get the solution vector x from inArgs and residual vector from outArgs
+ RCP x = inArgs.get_x();
+ EpetraExt::ModelEvaluator::Evaluation f = outArgs.get_f();
+
+ if (x == Teuchos::null) throw "trilinosModelEvaluator::evalModel: x was NOT specified!";
+
+ // Check if a "Effminstr Factor" parameter is being set by LOCA
+ Teuchos::RCP p_in = inArgs.get_p(0);
+ if (p_in.get()) reset_effstrmin(&(*p_in)[0]);
+
+ // Save the current solution, which makes it initial guess for next nonlienar solve
+ *xVec = *x;
+
+ if (f != Teuchos::null) {
+ // Check if this is a perturbed eval. Glimmer only saves off matrices for unperturbed case.
+ int ispert =0;
+ if (f.getType() == EpetraExt::ModelEvaluator::EVAL_TYPE_APPROX_DERIV) ispert=1;
+
+ f->PutScalar(0.0);
+ calc_F(x->Values(), f->Values(), N, blackbox_res, ispert);
+ }
+
+ RCP WPrec = outArgs.get_WPrec();
+ if (WPrec != Teuchos::null) {
+ //cout << "evalModel called for WPrec -- doing nothing " << endl;
+ }
+}
+/*******************************************************************************/
+/*******************************************************************************/
+/*******************************************************************************/
+trilinosPreconditioner::trilinosPreconditioner (
+ int N_, RCP xVec_, RCP xMap_, void* blackbox_res_)
+ : N(N_), xVec(xVec_), xMap(xMap_), blackbox_res(blackbox_res_)
+{
+}
+
+int trilinosPreconditioner::ApplyInverse(const Epetra_MultiVector& X, Epetra_MultiVector& Y) const
+{
+ bool succeeded=true;
+ try {
+ apply_precond_nox(Y(0)->Values(), X(0)->Values(), N, blackbox_res);
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, succeeded);
+ if (!succeeded) exit(1);
+
+ return 0;
+}
+
+
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/trilinosModelEvaluator.hpp b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosModelEvaluator.hpp
new file mode 100644
index 0000000000..b175d84c91
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosModelEvaluator.hpp
@@ -0,0 +1,97 @@
+#ifndef GLIMMER_MODELEVALUATOR_HPP
+#define GLIMMER_MODELEVALUATOR_HPP
+
+#include "Teuchos_RCP.hpp"
+#include "EpetraExt_ModelEvaluator.h"
+#include "Epetra_Map.h"
+#include "Epetra_LocalMap.h"
+#include "Epetra_Comm.h"
+#include "Epetra_Operator.h"
+
+using Teuchos::RCP;
+
+
+class trilinosModelEvaluator : public EpetraExt::ModelEvaluator {
+public:
+
+
+ trilinosModelEvaluator(int N_,
+ double* statevector,
+ const Epetra_Comm& comm_,
+ void* blackbox_res);
+ //@{
+
+ //! Return solution vector map
+ RCP get_x_map() const;
+
+ //! Return residual vector map
+ RCP get_f_map() const;
+
+ //! Return initial solution and x_dot init
+ RCP get_x_init() const;
+
+ RCP create_WPrec() const;
+
+ //! Parameter setting functions for LOCA continuation
+ RCP get_p_map(int l) const;
+ RCP get_p_init(int l) const;
+ RCP > get_p_names(int l) const;
+
+ //! Create InArgs
+ InArgs createInArgs() const;
+
+ //! Create OutArgs
+ OutArgs createOutArgs() const;
+
+ //! Reset State
+ // void ResetState(double *statevector,void* blackbox_res_);
+
+ //! Evaluate model on InArgs
+ void evalModel(const InArgs& inArgs, const OutArgs& outArgs) const;
+ //@}
+
+private:
+ // Solution vector and map
+ int N;
+ RCP xMap;
+ RCP xVec;
+ const Epetra_Comm& comm;
+ void* blackbox_res;
+ RCP precOp;
+
+ RCP pMap;
+ RCP pVec;
+};
+
+
+class trilinosPreconditioner : public Epetra_Operator {
+
+public:
+ // Preconditioner as Epetra_Operator required methods
+
+ trilinosPreconditioner(int N, RCP xVec, RCP xMap,
+ void* blackbox_res);
+
+ int ApplyInverse(const Epetra_MultiVector& V, Epetra_MultiVector& Y) const;
+
+ // Trivial implemetations
+ int SetUseTranspose(bool UseTranspose) { TEUCHOS_TEST_FOR_EXCEPT(UseTranspose); return 0;};
+ int Apply(const Epetra_MultiVector& X, Epetra_MultiVector& Y) const
+ { throw "No Apply() in TrilinosPreconditioner";};
+ double NormInf() const { throw "NO NormInf Implemented in trilinosPrecon";};
+ const char* Label () const { return "trilinosPrec"; };
+ bool UseTranspose() const { return false; };
+ bool HasNormInf() const { return false; };
+ const Epetra_Comm & Comm() const { return xMap->Comm();};
+ const Epetra_Map& OperatorDomainMap () const { return *xMap;};
+ const Epetra_Map& OperatorRangeMap () const { return *xMap;};
+
+private:
+ int N;
+ RCP xVec;
+ RCP xMap;
+ void* blackbox_res;
+};
+
+#endif
+
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/trilinosNoxSolver.cpp b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosNoxSolver.cpp
new file mode 100644
index 0000000000..b052fa6a08
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosNoxSolver.cpp
@@ -0,0 +1,200 @@
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//
+// trilinosNoxSolver.cpp - part of the Community Ice Sheet Model (CISM)
+//
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//
+// Copyright (C) 2005-2014
+// CISM contributors - see AUTHORS file for list of contributors
+//
+// This file is part of CISM.
+//
+// CISM is free software: you can redistribute it and/or modify it
+// under the terms of the Lesser GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// CISM is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// Lesser GNU General Public License for more details.
+//
+// You should have received a copy of the Lesser GNU General Public License
+// along with CISM. If not, see .
+//
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+// Trilinos Objects
+#include "Piro_Epetra_NOXSolver.hpp"
+#include "Piro_Epetra_LOCASolver.hpp"
+#include "trilinosModelEvaluator.hpp"
+
+#include "Epetra_MpiComm.h"
+#include "Teuchos_RCP.hpp"
+#include "Teuchos_ParameterList.hpp"
+#include "Teuchos_XMLParameterListHelpers.hpp"
+#include "Teuchos_StandardCatchMacros.hpp"
+
+#include "Teuchos_DefaultMpiComm.hpp"
+
+#include "config.inc"
+
+using namespace std;
+using Teuchos::RCP;
+using Teuchos::rcp;
+
+// Objects that are global to the file
+static RCP Nsolver;
+static RCP model;
+static RCP paramList;
+static RCP Comm_;
+
+static EpetraExt::ModelEvaluator::InArgs inArgs;
+static EpetraExt::ModelEvaluator::OutArgs outArgs;
+static bool printProc;
+static int timeStep=1; // time step counter
+// Use continuation instead of straight Newton for this many time steps:
+
+void setCismLocaDefaults(Teuchos::ParameterList& locaList) {
+ Teuchos::ParameterList& predList = locaList.sublist("Predictor");
+ Teuchos::ParameterList& stepperList = locaList.sublist("Stepper");
+ Teuchos::ParameterList& stepSizeList = locaList.sublist("Step Size");
+
+ // If not set in XML list, set these defaults instead
+ (void) predList.get("Method","Constant");
+ (void) stepperList.get("Continuation Method","Natural");
+ (void) stepperList.get("Continuation Parameter","Effstrmin Factor");
+ (void) stepperList.get("Initial Value",10.0);
+ (void) stepperList.get("Max Steps",10);
+ (void) stepperList.get("Max Value",100.0); // not used
+ (void) stepperList.get("Min Value",0.0); // Important!!
+
+ (void) stepSizeList.get("Initial Step Size",-3.0); // Important!!
+ (void) stepSizeList.get("Aggressiveness",2.0); // Important!!
+}
+
+
+extern "C" {
+void FC_FUNC(noxinit,NOXINIT) ( int* nelems, double* statevector,
+ int* mpi_comm_f, void* blackbox_res)
+// mpi_comm_f: CISM's fortran mpi communicator
+{
+
+ bool succeeded=true;
+ try {
+
+ // Build the epetra communicator
+ MPI_Comm mpi_comm_c = MPI_Comm_f2c(*mpi_comm_f);
+ Comm_=rcp(new Epetra_MpiComm(mpi_comm_c));
+ Epetra_Comm& Comm=*Comm_;
+ printProc = (Comm_->MyPID() == 0);
+ Teuchos::MpiComm tcomm(Teuchos::opaqueWrapper(mpi_comm_c));
+
+ if (printProc) std::cout << "NOXINIT CALLED for nelem=" << *nelems << std::endl;
+
+ try { // Check that the parameter list is valid at the top
+ RCP pl =
+ rcp(new Teuchos::ParameterList("Trilinos Options for NOX"));
+ Teuchos::updateParametersFromXmlFileAndBroadcast(
+ "trilinosOptions.xml", pl.ptr(),tcomm);
+
+ Teuchos::ParameterList validPL("Valid List");;
+ validPL.sublist("Stratimikos"); validPL.sublist("Piro");
+ pl->validateParameters(validPL, 0);
+ paramList = Teuchos::sublist(pl,"Piro",true);
+ }
+ catch (std::exception& e) {
+ std::cout << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
+ << e.what() << "\nExiting: Invalid trilinosOptions.xml file."
+ << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" << std::endl;
+ exit(1);
+ }
+
+ paramList->set("Lean Matrix Free",true); // Saves some GMRES steps
+ //pw if (printProc) std::cout << "NOXInit: param list is: (delete this debug line)\n" << *paramList << std::endl;
+
+ model = rcp(new trilinosModelEvaluator(*nelems, statevector, Comm, blackbox_res));
+
+ // Logic to see if we want to use LOCA continuation or NOX single steady solve
+ // Turn on LOCA by having a LOCA sublist OR setting "CISM: Number of Time Steps To Use LOCA"
+ bool useLoca=false;
+ // If LOCA sublist exists, defaults to using it for 1 time step; but can be set in XML.
+ int numStepsToUseLOCA = 0;
+ if (paramList->isSublist("LOCA"))
+ numStepsToUseLOCA = paramList->get("CISM: Number of Time Steps To Use LOCA",1);
+ else
+ numStepsToUseLOCA = paramList->get("CISM: Number of Time Steps To Use LOCA",0);
+
+ if (timeStep <= numStepsToUseLOCA) useLoca=true;
+
+ if (useLoca) if (printProc)
+ std::cout << "\nUsing LOCA continuation for first " << numStepsToUseLOCA << " time steps." << std::endl;
+
+ if (useLoca) {
+ setCismLocaDefaults(paramList->sublist("LOCA"));
+ Nsolver = rcp(new Piro::Epetra::LOCASolver(paramList, model));
+ }
+ else
+ Nsolver = rcp(new Piro::Epetra::NOXSolver(paramList, model));
+
+ inArgs=Nsolver->createInArgs();
+ outArgs=Nsolver->createOutArgs();
+
+ // Ask the model for the converged solution from g(0)
+ RCP xmap = Nsolver->get_g_map(0);
+ RCP xout = rcp(new Epetra_Vector(*xmap));
+
+ outArgs.set_g(0,xout);
+
+ // Set up parameter vector for continuation runs
+ if (useLoca) {
+ RCP pmap = Nsolver->get_p_map(0);
+ RCP pvec = rcp(new Epetra_Vector(*pmap));
+ inArgs.set_p(0, pvec);
+ }
+
+ // Time step counter: just for deciding whether to use continuation on relaxatin param
+ timeStep++;
+
+ } //end try block
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, succeeded);
+ if (!succeeded) exit(1);
+}
+
+/****************************************************/
+void FC_FUNC(noxsolve,NOXSOLVE) (int* nelems, double* statevector, void* blackbox_res)
+{
+ bool succeeded=true;
+ try {
+ TEUCHOS_TEST_FOR_EXCEPTION(Nsolver==Teuchos::null, logic_error,
+ "Exception: noxsolve called with solver=null: \n"
+ << "You either did not call noxinit first, or called noxfinish already");
+ if (printProc) std::cout << "NOXSolve called" << std::endl;
+
+ // Solve
+ Nsolver->evalModel(inArgs,outArgs);
+
+ // Copy out the solution
+ RCP xout = outArgs.get_g(0);
+ if(xout == Teuchos::null) throw "evalModel is NOT returning a vector";
+
+ for (int i=0; i<*nelems; i++) statevector[i] = (*xout)[i];
+ }
+ TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, succeeded);
+ if (!succeeded) exit(1);
+
+}
+
+/****************************************************/
+void FC_FUNC(noxfinish,NOXFINISH) (void)
+{
+ if (printProc) std::cout << "NOXFinish called" << std::endl;
+
+ // Free memory
+ Nsolver = Teuchos::null;
+ model = Teuchos::null;
+ paramList = Teuchos::null;
+ Comm_ = Teuchos::null;
+}
+
+} //extern "C"
diff --git a/components/cism/glimmer-cism/libglimmer-trilinos/trilinosOptions.xml b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosOptions.xml
new file mode 100644
index 0000000000..9507c0751b
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer-trilinos/trilinosOptions.xml
@@ -0,0 +1,105 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/components/cism/glimmer-cism/libglimmer/cfortran.h b/components/cism/glimmer-cism/libglimmer/cfortran.h
new file mode 100644
index 0000000000..ed23011d84
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/cfortran.h
@@ -0,0 +1,2363 @@
+/* cfortran.h 4.3 */
+/* http://www-zeus.desy.de/~burow/cfortran/ */
+/* Burkhard Burow burow@desy.de 1990 - 2001. */
+
+#ifndef __CFORTRAN_LOADED
+#define __CFORTRAN_LOADED
+
+/*
+ THIS FILE IS PROPERTY OF BURKHARD BUROW. IF YOU ARE USING THIS FILE YOU
+ SHOULD ALSO HAVE ACCESS TO CFORTRAN.DOC WHICH PROVIDES TERMS FOR USING,
+ MODIFYING, COPYING AND DISTRIBUTING THE CFORTRAN.H PACKAGE.
+*/
+
+/*
+ Avoid symbols already used by compilers and system *.h:
+ __ - OSF1 zukal06 V3.0 347 alpha, cc -c -std1 cfortest.c
+
+ */
+
+
+/* First prepare for the C compiler. */
+
+#ifndef ANSI_C_preprocessor /* i.e. user can override. */
+#ifdef __CF__KnR
+#define ANSI_C_preprocessor 0
+#else
+#ifdef __STDC__
+#define ANSI_C_preprocessor 1
+#else
+#define _cfleft 1
+#define _cfright
+#define _cfleft_cfright 0
+#define ANSI_C_preprocessor _cfleft/**/_cfright
+#endif
+#endif
+#endif
+
+#if ANSI_C_preprocessor
+#define _0(A,B) A##B
+#define _(A,B) _0(A,B) /* see cat,xcat of K&R ANSI C p. 231 */
+#define _2(A,B) A##B /* K&R ANSI C p.230: .. identifier is not replaced */
+#define _3(A,B,C) _(A,_(B,C))
+#else /* if it turns up again during rescanning. */
+#define _(A,B) A/**/B
+#define _2(A,B) A/**/B
+#define _3(A,B,C) A/**/B/**/C
+#endif
+
+#if (defined(vax)&&defined(unix)) || (defined(__vax__)&&defined(__unix__))
+#define VAXUltrix
+#endif
+
+#include /* NULL [in all machines stdio.h] */
+#include /* strlen, memset, memcpy, memchr. */
+#if !( defined(VAXUltrix) || defined(sun) || (defined(apollo)&&!defined(__STDCPP__)) )
+#include /* malloc,free */
+#else
+#include /* Had to be removed for DomainOS h105 10.4 sys5.3 425t*/
+#ifdef apollo
+#define __CF__APOLLO67 /* __STDCPP__ is in Apollo 6.8 (i.e. ANSI) and onwards */
+#endif
+#endif
+
+#if !defined(__GNUC__) && !defined(__sun) && (defined(sun)||defined(VAXUltrix)||defined(lynx))
+#define __CF__KnR /* Sun, LynxOS and VAX Ultrix cc only supports K&R. */
+ /* Manually define __CF__KnR for HP if desired/required.*/
+#endif /* i.e. We will generate Kernighan and Ritchie C. */
+/* Note that you may define __CF__KnR before #include cfortran.h, in order to
+generate K&R C instead of the default ANSI C. The differences are mainly in the
+function prototypes and declarations. All machines, except the Apollo, work
+with either style. The Apollo's argument promotion rules require ANSI or use of
+the obsolete std_$call which we have not implemented here. Hence on the Apollo,
+only C calling FORTRAN subroutines will work using K&R style.*/
+
+
+/* Remainder of cfortran.h depends on the Fortran compiler. */
+
+#if defined(CLIPPERFortran) || defined(pgiFortran)
+#define f2cFortran
+#endif
+
+/* VAX/VMS does not let us \-split long #if lines. */
+/* Split #if into 2 because some HP-UX can't handle long #if */
+#if !(defined(NAGf90Fortran)||defined(f2cFortran)||defined(hpuxFortran)||defined(apolloFortran)||defined(sunFortran)||defined(IBMR2Fortran)||defined(CRAYFortran))
+#if !(defined(mipsFortran)||defined(DECFortran)||defined(vmsFortran)||defined(CONVEXFortran)||defined(PowerStationFortran)||defined(AbsoftUNIXFortran)||defined(AbsoftProFortran)||defined(SXFortran))
+/* If no Fortran compiler is given, we choose one for the machines we know. */
+#if defined(lynx) || defined(VAXUltrix)
+#define f2cFortran /* Lynx: Only support f2c at the moment.
+ VAXUltrix: f77 behaves like f2c.
+ Support f2c or f77 with gcc, vcc with f2c.
+ f77 with vcc works, missing link magic for f77 I/O.*/
+#endif
+#if defined(__hpux) /* 921107: Use __hpux instead of __hp9000s300 */
+#define hpuxFortran /* Should also allow hp9000s7/800 use.*/
+#endif
+#if defined(apollo)
+#define apolloFortran /* __CF__APOLLO67 also defines some behavior. */
+#endif
+#if defined(sun) || defined(__sun)
+#define sunFortran
+#endif
+#if defined(_IBMR2)
+#define IBMR2Fortran
+#endif
+#if defined(_CRAY)
+#define CRAYFortran /* _CRAYT3E also defines some behavior. */
+#endif
+#if defined(_SX)
+#define SXFortran
+#endif
+#if defined(mips) || defined(__mips)
+#define mipsFortran
+#endif
+#if defined(vms) || defined(__vms)
+#define vmsFortran
+#endif
+#if defined(__alpha) && defined(__unix__)
+#define DECFortran
+#endif
+#if defined(__convex__)
+#define CONVEXFortran
+#endif
+#if defined(VISUAL_CPLUSPLUS)
+#define PowerStationFortran
+#endif
+#endif /* ...Fortran */
+#endif /* ...Fortran */
+
+/* Split #if into 2 because some HP-UX can't handle long #if */
+#if !(defined(NAGf90Fortran)||defined(f2cFortran)||defined(hpuxFortran)||defined(apolloFortran)||defined(sunFortran)||defined(IBMR2Fortran)||defined(CRAYFortran))
+#if !(defined(mipsFortran)||defined(DECFortran)||defined(vmsFortran)||defined(CONVEXFortran)||defined(PowerStationFortran)||defined(AbsoftUNIXFortran)||defined(AbsoftProFortran)||defined(SXFortran))
+/* If your compiler barfs on ' #error', replace # with the trigraph for # */
+ #error "cfortran.h: Can't find your environment among:\
+ - MIPS cc and f77 2.0. (e.g. Silicon Graphics, DECstations, ...) \
+ - IBM AIX XL C and FORTRAN Compiler/6000 Version 01.01.0000.0000 \
+ - VAX VMS CC 3.1 and FORTRAN 5.4. \
+ - Alpha VMS DEC C 1.3 and DEC FORTRAN 6.0. \
+ - Alpha OSF DEC C and DEC Fortran for OSF/1 AXP Version 1.2 \
+ - Apollo DomainOS 10.2 (sys5.3) with f77 10.7 and cc 6.7. \
+ - CRAY \
+ - NEC SX-4 SUPER-UX \
+ - CONVEX \
+ - Sun \
+ - PowerStation Fortran with Visual C++ \
+ - HP9000s300/s700/s800 Latest test with: HP-UX A.08.07 A 9000/730 \
+ - LynxOS: cc or gcc with f2c. \
+ - VAXUltrix: vcc,cc or gcc with f2c. gcc or cc with f77. \
+ - f77 with vcc works; but missing link magic for f77 I/O. \
+ - NO fort. None of gcc, cc or vcc generate required names.\
+ - f2c : Use #define f2cFortran, or cc -Df2cFortran \
+ - NAG f90: Use #define NAGf90Fortran, or cc -DNAGf90Fortran \
+ - Absoft UNIX F77: Use #define AbsoftUNIXFortran or cc -DAbsoftUNIXFortran \
+ - Absoft Pro Fortran: Use #define AbsoftProFortran \
+ - Portland Group Fortran: Use #define pgiFortran"
+/* Compiler must throw us out at this point! */
+#endif
+#endif
+
+
+#if defined(VAXC) && !defined(__VAXC)
+#define OLD_VAXC
+#pragma nostandard /* Prevent %CC-I-PARAMNOTUSED. */
+#endif
+
+/* Throughout cfortran.h we use: UN = Uppercase Name. LN = Lowercase Name. */
+
+#if defined(f2cFortran) || defined(NAGf90Fortran) || defined(DECFortran) || defined(mipsFortran) || defined(apolloFortran) || defined(sunFortran) || defined(CONVEXFortran) || defined(SXFortran) || defined(extname)
+#define CFC_(UN,LN) _(LN,_) /* Lowercase FORTRAN symbols. */
+#define orig_fcallsc(UN,LN) CFC_(UN,LN)
+#else
+#if defined(CRAYFortran) || defined(PowerStationFortran) || defined(AbsoftProFortran)
+#ifdef _CRAY /* (UN), not UN, circumvents CRAY preprocessor bug. */
+#define CFC_(UN,LN) (UN) /* Uppercase FORTRAN symbols. */
+#else /* At least VISUAL_CPLUSPLUS barfs on (UN), so need UN. */
+#define CFC_(UN,LN) UN /* Uppercase FORTRAN symbols. */
+#endif
+#define orig_fcallsc(UN,LN) CFC_(UN,LN) /* CRAY insists on arg.'s here. */
+#else /* For following machines one may wish to change the fcallsc default. */
+#define CF_SAME_NAMESPACE
+#ifdef vmsFortran
+#define CFC_(UN,LN) LN /* Either case FORTRAN symbols. */
+ /* BUT we usually use UN for C macro to FORTRAN routines, so use LN here,*/
+ /* because VAX/VMS doesn't do recursive macros. */
+#define orig_fcallsc(UN,LN) UN
+#else /* HP-UX without +ppu or IBMR2 without -qextname. NOT reccomended. */
+#define CFC_(UN,LN) LN /* Lowercase FORTRAN symbols. */
+#define orig_fcallsc(UN,LN) CFC_(UN,LN)
+#endif /* vmsFortran */
+#endif /* CRAYFortran PowerStationFortran */
+#endif /* ....Fortran */
+
+#define fcallsc(UN,LN) orig_fcallsc(UN,LN)
+#define preface_fcallsc(P,p,UN,LN) CFC_(_(P,UN),_(p,LN))
+#define append_fcallsc(P,p,UN,LN) CFC_(_(UN,P),_(LN,p))
+
+#define C_FUNCTION(UN,LN) fcallsc(UN,LN)
+#define FORTRAN_FUNCTION(UN,LN) CFC_(UN,LN)
+
+#ifndef COMMON_BLOCK
+#ifndef CONVEXFortran
+#ifndef CLIPPERFortran
+#if !(defined(AbsoftUNIXFortran)||defined(AbsoftProFortran))
+#define COMMON_BLOCK(UN,LN) CFC_(UN,LN)
+#else
+#define COMMON_BLOCK(UN,LN) _(_C,LN)
+#endif /* AbsoftUNIXFortran or AbsoftProFortran */
+#else
+#define COMMON_BLOCK(UN,LN) _(LN,__)
+#endif /* CLIPPERFortran */
+#else
+#define COMMON_BLOCK(UN,LN) _3(_,LN,_)
+#endif /* CONVEXFortran */
+#endif /* COMMON_BLOCK */
+
+#ifndef DOUBLE_PRECISION
+#if defined(CRAYFortran) && !defined(_CRAYT3E)
+#define DOUBLE_PRECISION long double
+#else
+#define DOUBLE_PRECISION double
+#endif
+#endif
+
+#ifndef FORTRAN_REAL
+#if defined(CRAYFortran) && defined(_CRAYT3E)
+#define FORTRAN_REAL double
+#else
+#define FORTRAN_REAL float
+#endif
+#endif
+
+#ifdef CRAYFortran
+#ifdef _CRAY
+#include
+#else
+#include "fortran.h" /* i.e. if crosscompiling assume user has file. */
+#endif
+#define FLOATVVVVVVV_cfPP (FORTRAN_REAL *) /* Used for C calls FORTRAN. */
+/* CRAY's double==float but CRAY says pointers to doubles and floats are diff.*/
+#define VOIDP (void *) /* When FORTRAN calls C, we don't know if C routine
+ arg.'s have been declared float *, or double *. */
+#else
+#define FLOATVVVVVVV_cfPP
+#define VOIDP
+#endif
+
+#ifdef vmsFortran
+#if defined(vms) || defined(__vms)
+#include
+#else
+#include "descrip.h" /* i.e. if crosscompiling assume user has file. */
+#endif
+#endif
+
+#ifdef sunFortran
+#if defined(sun) || defined(__sun)
+#include /* Sun's FLOATFUNCTIONTYPE, ASSIGNFLOAT, RETURNFLOAT. */
+#else
+#include "math.h" /* i.e. if crosscompiling assume user has file. */
+#endif
+/* At least starting with the default C compiler SC3.0.1 of SunOS 5.3,
+ * FLOATFUNCTIONTYPE, ASSIGNFLOAT, RETURNFLOAT are not required and not in
+ * , since sun C no longer promotes C float return values to doubles.
+ * Therefore, only use them if defined.
+ * Even if gcc is being used, assume that it exhibits the Sun C compiler
+ * behavior in order to be able to use *.o from the Sun C compiler.
+ * i.e. If FLOATFUNCTIONTYPE, etc. are in math.h, they required by gcc.
+ */
+#endif
+
+#ifndef apolloFortran
+#define COMMON_BLOCK_DEF(DEFINITION, NAME) extern DEFINITION NAME
+#define CF_NULL_PROTO
+#else /* HP doesn't understand #elif. */
+/* Without ANSI prototyping, Apollo promotes float functions to double. */
+/* Note that VAX/VMS, IBM, Mips choke on 'type function(...);' prototypes. */
+#define CF_NULL_PROTO ...
+#ifndef __CF__APOLLO67
+#define COMMON_BLOCK_DEF(DEFINITION, NAME) \
+ DEFINITION NAME __attribute((__section(NAME)))
+#else
+#define COMMON_BLOCK_DEF(DEFINITION, NAME) \
+ DEFINITION NAME #attribute[section(NAME)]
+#endif
+#endif
+
+#ifdef __cplusplus
+#undef CF_NULL_PROTO
+#define CF_NULL_PROTO ...
+#endif
+
+
+#ifndef USE_NEW_DELETE
+#ifdef __cplusplus
+#define USE_NEW_DELETE 1
+#else
+#define USE_NEW_DELETE 0
+#endif
+#endif
+#if USE_NEW_DELETE
+#define _cf_malloc(N) new char[N]
+#define _cf_free(P) delete[] P
+#else
+#define _cf_malloc(N) (char *)malloc(N)
+#define _cf_free(P) free(P)
+#endif
+
+#ifdef mipsFortran
+#define CF_DECLARE_GETARG int f77argc; char **f77argv
+#define CF_SET_GETARG(ARGC,ARGV) f77argc = ARGC; f77argv = ARGV
+#else
+#define CF_DECLARE_GETARG
+#define CF_SET_GETARG(ARGC,ARGV)
+#endif
+
+#ifdef OLD_VAXC /* Allow %CC-I-PARAMNOTUSED. */
+#pragma standard
+#endif
+
+#define AcfCOMMA ,
+#define AcfCOLON ;
+
+/*-------------------------------------------------------------------------*/
+
+/* UTILITIES USED WITHIN CFORTRAN.H */
+
+#define _cfMIN(A,B) (As) { /* Need this to handle NULL string.*/
+ while (e>s && *--e==t); /* Don't follow t's past beginning. */
+ e[*e==t?0:1] = '\0'; /* Handle s[0]=t correctly. */
+} return s; }
+
+/* kill_trailingn(s,t,e) will kill the trailing t's in string s. e normally
+points to the terminating '\0' of s, but may actually point to anywhere in s.
+s's new '\0' will be placed at e or earlier in order to remove any trailing t's.
+If es) { /* Watch out for neg. length string.*/
+ while (e>s && *--e==t); /* Don't follow t's past beginning. */
+ e[*e==t?0:1] = '\0'; /* Handle s[0]=t correctly. */
+} return s; }
+
+/* Note the following assumes that any element which has t's to be chopped off,
+does indeed fill the entire element. */
+#ifndef __CF__KnR
+static char *vkill_trailing(char* cstr, int elem_len, int sizeofcstr, char t)
+#else
+static char *vkill_trailing( cstr, elem_len, sizeofcstr, t)
+ char* cstr; int elem_len; int sizeofcstr; char t;
+#endif
+{ int i;
+for (i=0; i= 4.3 gives message:
+ zow35> cc -c -DDECFortran cfortest.c
+ cfe: Fatal: Out of memory: cfortest.c
+ zow35>
+ Old __hpux had the problem, but new 'HP-UX A.09.03 A 9000/735' is fine
+ if using -Aa, otherwise we have a problem.
+ */
+#ifndef MAX_PREPRO_ARGS
+#if !defined(__GNUC__) && (defined(VAXUltrix) || defined(__CF__APOLLO67) || (defined(sun)&&!defined(__sun)) || defined(_CRAY) || defined(__ultrix__) || (defined(__hpux)&&defined(__CF__KnR)))
+#define MAX_PREPRO_ARGS 31
+#else
+#define MAX_PREPRO_ARGS 99
+#endif
+#endif
+
+#if defined(AbsoftUNIXFortran) || defined(AbsoftProFortran)
+/* In addition to explicit Absoft stuff, only Absoft requires:
+ - DEFAULT coming from _cfSTR.
+ DEFAULT could have been called e.g. INT, but keep it for clarity.
+ - M term in CFARGT14 and CFARGT14FS.
+ */
+#define ABSOFT_cf1(T0) _(T0,_cfSTR)(0,ABSOFT1,0,0,0,0,0)
+#define ABSOFT_cf2(T0) _(T0,_cfSTR)(0,ABSOFT2,0,0,0,0,0)
+#define ABSOFT_cf3(T0) _(T0,_cfSTR)(0,ABSOFT3,0,0,0,0,0)
+#define DEFAULT_cfABSOFT1
+#define LOGICAL_cfABSOFT1
+#define STRING_cfABSOFT1 ,MAX_LEN_FORTRAN_FUNCTION_STRING
+#define DEFAULT_cfABSOFT2
+#define LOGICAL_cfABSOFT2
+#define STRING_cfABSOFT2 ,unsigned D0
+#define DEFAULT_cfABSOFT3
+#define LOGICAL_cfABSOFT3
+#define STRING_cfABSOFT3 ,D0
+#else
+#define ABSOFT_cf1(T0)
+#define ABSOFT_cf2(T0)
+#define ABSOFT_cf3(T0)
+#endif
+
+/* _Z introduced to cicumvent IBM and HP silly preprocessor warning.
+ e.g. "Macro CFARGT14 invoked with a null argument."
+ */
+#define _Z
+
+#define CFARGT14S(S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ S(T1,1) S(T2,2) S(T3,3) S(T4,4) S(T5,5) S(T6,6) S(T7,7) \
+ S(T8,8) S(T9,9) S(TA,10) S(TB,11) S(TC,12) S(TD,13) S(TE,14)
+#define CFARGT27S(S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ S(T1,1) S(T2,2) S(T3,3) S(T4,4) S(T5,5) S(T6,6) S(T7,7) \
+ S(T8,8) S(T9,9) S(TA,10) S(TB,11) S(TC,12) S(TD,13) S(TE,14) \
+ S(TF,15) S(TG,16) S(TH,17) S(TI,18) S(TJ,19) S(TK,20) S(TL,21) \
+ S(TM,22) S(TN,23) S(TO,24) S(TP,25) S(TQ,26) S(TR,27)
+
+#define CFARGT14FS(F,S,M,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ F(T1,1,0) F(T2,2,1) F(T3,3,1) F(T4,4,1) F(T5,5,1) F(T6,6,1) F(T7,7,1) \
+ F(T8,8,1) F(T9,9,1) F(TA,10,1) F(TB,11,1) F(TC,12,1) F(TD,13,1) F(TE,14,1) \
+ M CFARGT14S(S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE)
+#define CFARGT27FS(F,S,M,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ F(T1,1,0) F(T2,2,1) F(T3,3,1) F(T4,4,1) F(T5,5,1) F(T6,6,1) F(T7,7,1) \
+ F(T8,8,1) F(T9,9,1) F(TA,10,1) F(TB,11,1) F(TC,12,1) F(TD,13,1) F(TE,14,1) \
+ F(TF,15,1) F(TG,16,1) F(TH,17,1) F(TI,18,1) F(TJ,19,1) F(TK,20,1) F(TL,21,1) \
+ F(TM,22,1) F(TN,23,1) F(TO,24,1) F(TP,25,1) F(TQ,26,1) F(TR,27,1) \
+ M CFARGT27S(S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR)
+
+#if !(defined(PowerStationFortran)||defined(hpuxFortran800))
+/* Old CFARGT14 -> CFARGT14FS as seen below, for Absoft cross-compile yields:
+ SunOS> cc -c -Xa -DAbsoftUNIXFortran c.c
+ "c.c", line 406: warning: argument mismatch
+ Haven't checked if this is ANSI C or a SunOS bug. SunOS -Xs works ok.
+ Behavior is most clearly seen in example:
+ #define A 1 , 2
+ #define C(X,Y,Z) x=X. y=Y. z=Z.
+ #define D(X,Y,Z) C(X,Y,Z)
+ D(x,A,z)
+ Output from preprocessor is: x = x . y = 1 . z = 2 .
+ #define CFARGT14(F,S,M,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ CFARGT14FS(F,S,M,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE)
+*/
+#define CFARGT14(F,S,M,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ F(T1,1,0) F(T2,2,1) F(T3,3,1) F(T4,4,1) F(T5,5,1) F(T6,6,1) F(T7,7,1) \
+ F(T8,8,1) F(T9,9,1) F(TA,10,1) F(TB,11,1) F(TC,12,1) F(TD,13,1) F(TE,14,1) \
+ M CFARGT14S(S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE)
+#define CFARGT27(F,S,M,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ F(T1,1,0) F(T2,2,1) F(T3,3,1) F(T4,4,1) F(T5,5,1) F(T6,6,1) F(T7,7,1) \
+ F(T8,8,1) F(T9,9,1) F(TA,10,1) F(TB,11,1) F(TC,12,1) F(TD,13,1) F(TE,14,1) \
+ F(TF,15,1) F(TG,16,1) F(TH,17,1) F(TI,18,1) F(TJ,19,1) F(TK,20,1) F(TL,21,1) \
+ F(TM,22,1) F(TN,23,1) F(TO,24,1) F(TP,25,1) F(TQ,26,1) F(TR,27,1) \
+ M CFARGT27S(S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR)
+
+#define CFARGT20(F,S,M,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK) \
+ F(T1,1,0) F(T2,2,1) F(T3,3,1) F(T4,4,1) F(T5,5,1) F(T6,6,1) F(T7,7,1) \
+ F(T8,8,1) F(T9,9,1) F(TA,10,1) F(TB,11,1) F(TC,12,1) F(TD,13,1) F(TE,14,1) \
+ F(TF,15,1) F(TG,16,1) F(TH,17,1) F(TI,18,1) F(TJ,19,1) F(TK,20,1) \
+ S(T1,1) S(T2,2) S(T3,3) S(T4,4) S(T5,5) S(T6,6) S(T7,7) \
+ S(T8,8) S(T9,9) S(TA,10) S(TB,11) S(TC,12) S(TD,13) S(TE,14) \
+ S(TF,15) S(TG,16) S(TH,17) S(TI,18) S(TJ,19) S(TK,20)
+#define CFARGTA14(F,S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE) \
+ F(T1,A1,1,0) F(T2,A2,2,1) F(T3,A3,3,1) F(T4,A4,4,1) F(T5,A5,5,1) F(T6,A6,6,1) \
+ F(T7,A7,7,1) F(T8,A8,8,1) F(T9,A9,9,1) F(TA,AA,10,1) F(TB,AB,11,1) F(TC,AC,12,1) \
+ F(TD,AD,13,1) F(TE,AE,14,1) S(T1,1) S(T2,2) S(T3,3) S(T4,4) \
+ S(T5,5) S(T6,6) S(T7,7) S(T8,8) S(T9,9) S(TA,10) \
+ S(TB,11) S(TC,12) S(TD,13) S(TE,14)
+#if MAX_PREPRO_ARGS>31
+#define CFARGTA20(F,S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK) \
+ F(T1,A1,1,0) F(T2,A2,2,1) F(T3,A3,3,1) F(T4,A4,4,1) F(T5,A5,5,1) F(T6,A6,6,1) \
+ F(T7,A7,7,1) F(T8,A8,8,1) F(T9,A9,9,1) F(TA,AA,10,1) F(TB,AB,11,1) F(TC,AC,12,1) \
+ F(TD,AD,13,1) F(TE,AE,14,1) F(TF,AF,15,1) F(TG,AG,16,1) F(TH,AH,17,1) F(TI,AI,18,1) \
+ F(TJ,AJ,19,1) F(TK,AK,20,1) S(T1,1) S(T2,2) S(T3,3) S(T4,4) \
+ S(T5,5) S(T6,6) S(T7,7) S(T8,8) S(T9,9) S(TA,10) \
+ S(TB,11) S(TC,12) S(TD,13) S(TE,14) S(TF,15) S(TG,16) \
+ S(TH,17) S(TI,18) S(TJ,19) S(TK,20)
+#define CFARGTA27(F,S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,AP,AQ,AR) \
+ F(T1,A1,1,0) F(T2,A2,2,1) F(T3,A3,3,1) F(T4,A4,4,1) F(T5,A5,5,1) F(T6,A6,6,1) \
+ F(T7,A7,7,1) F(T8,A8,8,1) F(T9,A9,9,1) F(TA,AA,10,1) F(TB,AB,11,1) F(TC,AC,12,1) \
+ F(TD,AD,13,1) F(TE,AE,14,1) F(TF,AF,15,1) F(TG,AG,16,1) F(TH,AH,17,1) F(TI,AI,18,1) \
+ F(TJ,AJ,19,1) F(TK,AK,20,1) F(TL,AL,21,1) F(TM,AM,22,1) F(TN,AN,23,1) F(TO,AO,24,1) \
+ F(TP,AP,25,1) F(TQ,AQ,26,1) F(TR,AR,27,1) S(T1,1) S(T2,2) S(T3,3) \
+ S(T4,4) S(T5,5) S(T6,6) S(T7,7) S(T8,8) S(T9,9) \
+ S(TA,10) S(TB,11) S(TC,12) S(TD,13) S(TE,14) S(TF,15) \
+ S(TG,16) S(TH,17) S(TI,18) S(TJ,19) S(TK,20) S(TL,21) \
+ S(TM,22) S(TN,23) S(TO,24) S(TP,25) S(TQ,26) S(TR,27)
+#endif
+#else
+#define CFARGT14(F,S,M,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ F(T1,1,0) S(T1,1) F(T2,2,1) S(T2,2) F(T3,3,1) S(T3,3) F(T4,4,1) S(T4,4) \
+ F(T5,5,1) S(T5,5) F(T6,6,1) S(T6,6) F(T7,7,1) S(T7,7) F(T8,8,1) S(T8,8) \
+ F(T9,9,1) S(T9,9) F(TA,10,1) S(TA,10) F(TB,11,1) S(TB,11) F(TC,12,1) S(TC,12) \
+ F(TD,13,1) S(TD,13) F(TE,14,1) S(TE,14)
+#define CFARGT27(F,S,M,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ F(T1,1,0) S(T1,1) F(T2,2,1) S(T2,2) F(T3,3,1) S(T3,3) F(T4,4,1) S(T4,4) \
+ F(T5,5,1) S(T5,5) F(T6,6,1) S(T6,6) F(T7,7,1) S(T7,7) F(T8,8,1) S(T8,8) \
+ F(T9,9,1) S(T9,9) F(TA,10,1) S(TA,10) F(TB,11,1) S(TB,11) F(TC,12,1) S(TC,12) \
+ F(TD,13,1) S(TD,13) F(TE,14,1) S(TE,14) F(TF,15,1) S(TF,15) F(TG,16,1) S(TG,16) \
+ F(TH,17,1) S(TH,17) F(TI,18,1) S(TI,18) F(TJ,19,1) S(TJ,19) F(TK,20,1) S(TK,20) \
+ F(TL,21,1) S(TL,21) F(TM,22,1) S(TM,22) F(TN,23,1) S(TN,23) F(TO,24,1) S(TO,24) \
+ F(TP,25,1) S(TP,25) F(TQ,26,1) S(TQ,26) F(TR,27,1) S(TR,27)
+
+#define CFARGT20(F,S,M,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK) \
+ F(T1,1,0) S(T1,1) F(T2,2,1) S(T2,2) F(T3,3,1) S(T3,3) F(T4,4,1) S(T4,4) \
+ F(T5,5,1) S(T5,5) F(T6,6,1) S(T6,6) F(T7,7,1) S(T7,7) F(T8,8,1) S(T8,8) \
+ F(T9,9,1) S(T9,9) F(TA,10,1) S(TA,10) F(TB,11,1) S(TB,11) F(TC,12,1) S(TC,12) \
+ F(TD,13,1) S(TD,13) F(TE,14,1) S(TE,14) F(TF,15,1) S(TF,15) F(TG,16,1) S(TG,16) \
+ F(TH,17,1) S(TH,17) F(TI,18,1) S(TI,18) F(TJ,19,1) S(TJ,19) F(TK,20,1) S(TK,20)
+#define CFARGTA14(F,S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE) \
+ F(T1,A1,1,0) S(T1,1) F(T2,A2,2,1) S(T2,2) F(T3,A3,3,1) S(T3,3) \
+ F(T4,A4,4,1) S(T4,4) F(T5,A5,5,1) S(T5,5) F(T6,A6,6,1) S(T6,6) \
+ F(T7,A7,7,1) S(T7,7) F(T8,A8,8,1) S(T8,8) F(T9,A9,9,1) S(T9,9) \
+ F(TA,AA,10,1) S(TA,10) F(TB,AB,11,1) S(TB,11) F(TC,AC,12,1) S(TC,12) \
+ F(TD,AD,13,1) S(TD,13) F(TE,AE,14,1) S(TE,14)
+#if MAX_PREPRO_ARGS>31
+#define CFARGTA20(F,S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK) \
+ F(T1,A1,1,0) S(T1,1) F(T2,A2,2,1) S(T2,2) F(T3,A3,3,1) S(T3,3) \
+ F(T4,A4,4,1) S(T4,4) F(T5,A5,5,1) S(T5,5) F(T6,A6,6,1) S(T6,6) \
+ F(T7,A7,7,1) S(T7,7) F(T8,A8,8,1) S(T8,8) F(T9,A9,9,1) S(T9,9) \
+ F(TA,AA,10,1) S(TA,10) F(TB,AB,11,1) S(TB,11) F(TC,AC,12,1) S(TC,12) \
+ F(TD,AD,13,1) S(TD,13) F(TE,AE,14,1) S(TE,14) F(TF,AF,15,1) S(TF,15) \
+ F(TG,AG,16,1) S(TG,16) F(TH,AH,17,1) S(TH,17) F(TI,AI,18,1) S(TI,18) \
+ F(TJ,AJ,19,1) S(TJ,19) F(TK,AK,20,1) S(TK,20)
+#define CFARGTA27(F,S,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,AP,AQ,AR) \
+ F(T1,A1,1,0) S(T1,1) F(T2,A2,2,1) S(T2,2) F(T3,A3,3,1) S(T3,3) \
+ F(T4,A4,4,1) S(T4,4) F(T5,A5,5,1) S(T5,5) F(T6,A6,6,1) S(T6,6) \
+ F(T7,A7,7,1) S(T7,7) F(T8,A8,8,1) S(T8,8) F(T9,A9,9,1) S(T9,9) \
+ F(TA,AA,10,1) S(TA,10) F(TB,AB,11,1) S(TB,11) F(TC,AC,12,1) S(TC,12) \
+ F(TD,AD,13,1) S(TD,13) F(TE,AE,14,1) S(TE,14) F(TF,AF,15,1) S(TF,15) \
+ F(TG,AG,16,1) S(TG,16) F(TH,AH,17,1) S(TH,17) F(TI,AI,18,1) S(TI,18) \
+ F(TJ,AJ,19,1) S(TJ,19) F(TK,AK,20,1) S(TK,20) F(TL,AL,21,1) S(TL,21) \
+ F(TM,AM,22,1) S(TM,22) F(TN,AN,23,1) S(TN,23) F(TO,AO,24,1) S(TO,24) \
+ F(TP,AP,25,1) S(TP,25) F(TQ,AQ,26,1) S(TQ,26) F(TR,AR,27,1) S(TR,27)
+#endif
+#endif
+
+
+#define PROTOCCALLSFSUB1( UN,LN,T1) \
+ PROTOCCALLSFSUB14(UN,LN,T1,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB2( UN,LN,T1,T2) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB3( UN,LN,T1,T2,T3) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB4( UN,LN,T1,T2,T3,T4) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB5( UN,LN,T1,T2,T3,T4,T5) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB6( UN,LN,T1,T2,T3,T4,T5,T6) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB7( UN,LN,T1,T2,T3,T4,T5,T6,T7) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB8( UN,LN,T1,T2,T3,T4,T5,T6,T7,T8) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB9( UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB10(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB11(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB12(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,CF_0,CF_0)
+#define PROTOCCALLSFSUB13(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,CF_0)
+
+
+#define PROTOCCALLSFSUB15(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF) \
+ PROTOCCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB16(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG) \
+ PROTOCCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB17(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH) \
+ PROTOCCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB18(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI) \
+ PROTOCCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,CF_0,CF_0)
+#define PROTOCCALLSFSUB19(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ) \
+ PROTOCCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,CF_0)
+
+#define PROTOCCALLSFSUB21(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL) \
+ PROTOCCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB22(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM) \
+ PROTOCCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB23(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN) \
+ PROTOCCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB24(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO) \
+ PROTOCCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFSUB25(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP) \
+ PROTOCCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,CF_0,CF_0)
+#define PROTOCCALLSFSUB26(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ) \
+ PROTOCCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,CF_0)
+
+
+#ifndef FCALLSC_QUALIFIER
+#ifdef VISUAL_CPLUSPLUS
+#define FCALLSC_QUALIFIER __stdcall
+#else
+#define FCALLSC_QUALIFIER
+#endif
+#endif
+
+#ifdef __cplusplus
+#define CFextern extern "C"
+#else
+#define CFextern extern
+#endif
+
+
+#ifdef CFSUBASFUN
+#define PROTOCCALLSFSUB0(UN,LN) \
+ PROTOCCALLSFFUN0( VOID,UN,LN)
+#define PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ PROTOCCALLSFFUN14(VOID,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE)
+#define PROTOCCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK)\
+ PROTOCCALLSFFUN20(VOID,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK)
+#define PROTOCCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR)\
+ PROTOCCALLSFFUN27(VOID,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR)
+#else
+/* Note: Prevent compiler warnings, null #define PROTOCCALLSFSUB14/20 after
+ #include-ing cfortran.h if calling the FORTRAN wrapper within the same
+ source code where the wrapper is created. */
+#define PROTOCCALLSFSUB0(UN,LN) _(VOID,_cfPU)(CFC_(UN,LN))();
+#ifndef __CF__KnR
+#define PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ _(VOID,_cfPU)(CFC_(UN,LN))( CFARGT14(NCF,KCF,_Z,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) );
+#define PROTOCCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK)\
+ _(VOID,_cfPU)(CFC_(UN,LN))( CFARGT20(NCF,KCF,_Z,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK) );
+#define PROTOCCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR)\
+ _(VOID,_cfPU)(CFC_(UN,LN))( CFARGT27(NCF,KCF,_Z,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) );
+#else
+#define PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ PROTOCCALLSFSUB0(UN,LN)
+#define PROTOCCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK) \
+ PROTOCCALLSFSUB0(UN,LN)
+#define PROTOCCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ PROTOCCALLSFSUB0(UN,LN)
+#endif
+#endif
+
+
+#ifdef OLD_VAXC /* Allow %CC-I-PARAMNOTUSED. */
+#pragma standard
+#endif
+
+
+#define CCALLSFSUB1( UN,LN,T1, A1) \
+ CCALLSFSUB5 (UN,LN,T1,CF_0,CF_0,CF_0,CF_0,A1,0,0,0,0)
+#define CCALLSFSUB2( UN,LN,T1,T2, A1,A2) \
+ CCALLSFSUB5 (UN,LN,T1,T2,CF_0,CF_0,CF_0,A1,A2,0,0,0)
+#define CCALLSFSUB3( UN,LN,T1,T2,T3, A1,A2,A3) \
+ CCALLSFSUB5 (UN,LN,T1,T2,T3,CF_0,CF_0,A1,A2,A3,0,0)
+#define CCALLSFSUB4( UN,LN,T1,T2,T3,T4, A1,A2,A3,A4)\
+ CCALLSFSUB5 (UN,LN,T1,T2,T3,T4,CF_0,A1,A2,A3,A4,0)
+#define CCALLSFSUB5( UN,LN,T1,T2,T3,T4,T5, A1,A2,A3,A4,A5) \
+ CCALLSFSUB10(UN,LN,T1,T2,T3,T4,T5,CF_0,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,0,0,0,0,0)
+#define CCALLSFSUB6( UN,LN,T1,T2,T3,T4,T5,T6, A1,A2,A3,A4,A5,A6) \
+ CCALLSFSUB10(UN,LN,T1,T2,T3,T4,T5,T6,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,0,0,0,0)
+#define CCALLSFSUB7( UN,LN,T1,T2,T3,T4,T5,T6,T7, A1,A2,A3,A4,A5,A6,A7) \
+ CCALLSFSUB10(UN,LN,T1,T2,T3,T4,T5,T6,T7,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,0,0,0)
+#define CCALLSFSUB8( UN,LN,T1,T2,T3,T4,T5,T6,T7,T8, A1,A2,A3,A4,A5,A6,A7,A8) \
+ CCALLSFSUB10(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,0,0)
+#define CCALLSFSUB9( UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,A1,A2,A3,A4,A5,A6,A7,A8,A9)\
+ CCALLSFSUB10(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,0)
+#define CCALLSFSUB10(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA)\
+ CCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,0,0,0,0)
+#define CCALLSFSUB11(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB)\
+ CCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,0,0,0)
+#define CCALLSFSUB12(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC)\
+ CCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,0,0)
+#define CCALLSFSUB13(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD)\
+ CCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,0)
+
+#ifdef __cplusplus
+#define CPPPROTOCLSFSUB0( UN,LN)
+#define CPPPROTOCLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE)
+#define CPPPROTOCLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK)
+#define CPPPROTOCLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR)
+#else
+#define CPPPROTOCLSFSUB0(UN,LN) \
+ PROTOCCALLSFSUB0(UN,LN)
+#define CPPPROTOCLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ PROTOCCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE)
+#define CPPPROTOCLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK) \
+ PROTOCCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK)
+#define CPPPROTOCLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ PROTOCCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR)
+#endif
+
+#ifdef CFSUBASFUN
+#define CCALLSFSUB0(UN,LN) CCALLSFFUN0(UN,LN)
+#define CCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE)\
+ CCALLSFFUN14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE)
+#else
+/* do{...}while(0) allows if(a==b) FORT(); else BORT(); */
+#define CCALLSFSUB0( UN,LN) do{CPPPROTOCLSFSUB0(UN,LN) CFC_(UN,LN)();}while(0)
+#define CCALLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE)\
+do{VVCF(T1,A1,B1) VVCF(T2,A2,B2) VVCF(T3,A3,B3) VVCF(T4,A4,B4) VVCF(T5,A5,B5) \
+ VVCF(T6,A6,B6) VVCF(T7,A7,B7) VVCF(T8,A8,B8) VVCF(T9,A9,B9) VVCF(TA,AA,B10) \
+ VVCF(TB,AB,B11) VVCF(TC,AC,B12) VVCF(TD,AD,B13) VVCF(TE,AE,B14) \
+ CPPPROTOCLSFSUB14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ ACF(LN,T1,A1,1) ACF(LN,T2,A2,2) ACF(LN,T3,A3,3) \
+ ACF(LN,T4,A4,4) ACF(LN,T5,A5,5) ACF(LN,T6,A6,6) ACF(LN,T7,A7,7) \
+ ACF(LN,T8,A8,8) ACF(LN,T9,A9,9) ACF(LN,TA,AA,10) ACF(LN,TB,AB,11) \
+ ACF(LN,TC,AC,12) ACF(LN,TD,AD,13) ACF(LN,TE,AE,14) \
+ CFC_(UN,LN)( CFARGTA14(AACF,JCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE) );\
+ WCF(T1,A1,1) WCF(T2,A2,2) WCF(T3,A3,3) WCF(T4,A4,4) WCF(T5,A5,5) \
+ WCF(T6,A6,6) WCF(T7,A7,7) WCF(T8,A8,8) WCF(T9,A9,9) WCF(TA,AA,10) \
+ WCF(TB,AB,11) WCF(TC,AC,12) WCF(TD,AD,13) WCF(TE,AE,14) }while(0)
+#endif
+
+
+#if MAX_PREPRO_ARGS>31
+#define CCALLSFSUB15(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF)\
+ CCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,CF_0,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,0,0,0,0,0)
+#define CCALLSFSUB16(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG)\
+ CCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,0,0,0,0)
+#define CCALLSFSUB17(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH)\
+ CCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,0,0,0)
+#define CCALLSFSUB18(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI)\
+ CCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,0,0)
+#define CCALLSFSUB19(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ)\
+ CCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,0)
+
+#ifdef CFSUBASFUN
+#define CCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH, \
+ TI,TJ,TK, A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK) \
+ CCALLSFFUN20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH, \
+ TI,TJ,TK, A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK)
+#else
+#define CCALLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH, \
+ TI,TJ,TK, A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK) \
+do{VVCF(T1,A1,B1) VVCF(T2,A2,B2) VVCF(T3,A3,B3) VVCF(T4,A4,B4) VVCF(T5,A5,B5) \
+ VVCF(T6,A6,B6) VVCF(T7,A7,B7) VVCF(T8,A8,B8) VVCF(T9,A9,B9) VVCF(TA,AA,B10) \
+ VVCF(TB,AB,B11) VVCF(TC,AC,B12) VVCF(TD,AD,B13) VVCF(TE,AE,B14) VVCF(TF,AF,B15) \
+ VVCF(TG,AG,B16) VVCF(TH,AH,B17) VVCF(TI,AI,B18) VVCF(TJ,AJ,B19) VVCF(TK,AK,B20) \
+ CPPPROTOCLSFSUB20(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK) \
+ ACF(LN,T1,A1,1) ACF(LN,T2,A2,2) ACF(LN,T3,A3,3) ACF(LN,T4,A4,4) \
+ ACF(LN,T5,A5,5) ACF(LN,T6,A6,6) ACF(LN,T7,A7,7) ACF(LN,T8,A8,8) \
+ ACF(LN,T9,A9,9) ACF(LN,TA,AA,10) ACF(LN,TB,AB,11) ACF(LN,TC,AC,12) \
+ ACF(LN,TD,AD,13) ACF(LN,TE,AE,14) ACF(LN,TF,AF,15) ACF(LN,TG,AG,16) \
+ ACF(LN,TH,AH,17) ACF(LN,TI,AI,18) ACF(LN,TJ,AJ,19) ACF(LN,TK,AK,20) \
+ CFC_(UN,LN)( CFARGTA20(AACF,JCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK) ); \
+ WCF(T1,A1,1) WCF(T2,A2,2) WCF(T3,A3,3) WCF(T4,A4,4) WCF(T5,A5,5) WCF(T6,A6,6) \
+ WCF(T7,A7,7) WCF(T8,A8,8) WCF(T9,A9,9) WCF(TA,AA,10) WCF(TB,AB,11) WCF(TC,AC,12) \
+ WCF(TD,AD,13) WCF(TE,AE,14) WCF(TF,AF,15) WCF(TG,AG,16) WCF(TH,AH,17) WCF(TI,AI,18) \
+ WCF(TJ,AJ,19) WCF(TK,AK,20) }while(0)
+#endif
+#endif /* MAX_PREPRO_ARGS */
+
+#if MAX_PREPRO_ARGS>31
+#define CCALLSFSUB21(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL)\
+ CCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,0,0,0,0,0,0)
+#define CCALLSFSUB22(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM)\
+ CCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,CF_0,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,0,0,0,0,0)
+#define CCALLSFSUB23(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN)\
+ CCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,0,0,0,0)
+#define CCALLSFSUB24(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO)\
+ CCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,0,0,0)
+#define CCALLSFSUB25(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,AP)\
+ CCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,AP,0,0)
+#define CCALLSFSUB26(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,AP,AQ)\
+ CCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,AP,AQ,0)
+
+#ifdef CFSUBASFUN
+#define CCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR, \
+ A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,AP,AQ,AR) \
+ CCALLSFFUN27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR, \
+ A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,AP,AQ,AR)
+#else
+#define CCALLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR, \
+ A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,AP,AQ,AR) \
+do{VVCF(T1,A1,B1) VVCF(T2,A2,B2) VVCF(T3,A3,B3) VVCF(T4,A4,B4) VVCF(T5,A5,B5) \
+ VVCF(T6,A6,B6) VVCF(T7,A7,B7) VVCF(T8,A8,B8) VVCF(T9,A9,B9) VVCF(TA,AA,B10) \
+ VVCF(TB,AB,B11) VVCF(TC,AC,B12) VVCF(TD,AD,B13) VVCF(TE,AE,B14) VVCF(TF,AF,B15) \
+ VVCF(TG,AG,B16) VVCF(TH,AH,B17) VVCF(TI,AI,B18) VVCF(TJ,AJ,B19) VVCF(TK,AK,B20) \
+ VVCF(TL,AL,B21) VVCF(TM,AM,B22) VVCF(TN,AN,B23) VVCF(TO,AO,B24) VVCF(TP,AP,B25) \
+ VVCF(TQ,AQ,B26) VVCF(TR,AR,B27) \
+ CPPPROTOCLSFSUB27(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ ACF(LN,T1,A1,1) ACF(LN,T2,A2,2) ACF(LN,T3,A3,3) ACF(LN,T4,A4,4) \
+ ACF(LN,T5,A5,5) ACF(LN,T6,A6,6) ACF(LN,T7,A7,7) ACF(LN,T8,A8,8) \
+ ACF(LN,T9,A9,9) ACF(LN,TA,AA,10) ACF(LN,TB,AB,11) ACF(LN,TC,AC,12) \
+ ACF(LN,TD,AD,13) ACF(LN,TE,AE,14) ACF(LN,TF,AF,15) ACF(LN,TG,AG,16) \
+ ACF(LN,TH,AH,17) ACF(LN,TI,AI,18) ACF(LN,TJ,AJ,19) ACF(LN,TK,AK,20) \
+ ACF(LN,TL,AL,21) ACF(LN,TM,AM,22) ACF(LN,TN,AN,23) ACF(LN,TO,AO,24) \
+ ACF(LN,TP,AP,25) ACF(LN,TQ,AQ,26) ACF(LN,TR,AR,27) \
+ CFC_(UN,LN)( CFARGTA27(AACF,JCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR,\
+ A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM,AN,AO,AP,AQ,AR) ); \
+ WCF(T1,A1,1) WCF(T2,A2,2) WCF(T3,A3,3) WCF(T4,A4,4) WCF(T5,A5,5) WCF(T6,A6,6) \
+ WCF(T7,A7,7) WCF(T8,A8,8) WCF(T9,A9,9) WCF(TA,AA,10) WCF(TB,AB,11) WCF(TC,AC,12) \
+ WCF(TD,AD,13) WCF(TE,AE,14) WCF(TF,AF,15) WCF(TG,AG,16) WCF(TH,AH,17) WCF(TI,AI,18) \
+ WCF(TJ,AJ,19) WCF(TK,AK,20) WCF(TL,AL,21) WCF(TM,AM,22) WCF(TN,AN,23) WCF(TO,AO,24) \
+ WCF(TP,AP,25) WCF(TQ,AQ,26) WCF(TR,AR,27) }while(0)
+#endif
+#endif /* MAX_PREPRO_ARGS */
+
+/*-------------------------------------------------------------------------*/
+
+/* UTILITIES FOR C TO CALL FORTRAN FUNCTIONS */
+
+/*N.B. PROTOCCALLSFFUNn(..) generates code, whether or not the FORTRAN
+ function is called. Therefore, especially for creator's of C header files
+ for large FORTRAN libraries which include many functions, to reduce
+ compile time and object code size, it may be desirable to create
+ preprocessor directives to allow users to create code for only those
+ functions which they use. */
+
+/* The following defines the maximum length string that a function can return.
+ Of course it may be undefine-d and re-define-d before individual
+ PROTOCCALLSFFUNn(..) as required. It would also be nice to have this derived
+ from the individual machines' limits. */
+#define MAX_LEN_FORTRAN_FUNCTION_STRING 0x4FE
+
+/* The following defines a character used by CFORTRAN.H to flag the end of a
+ string coming out of a FORTRAN routine. */
+#define CFORTRAN_NON_CHAR 0x7F
+
+#ifdef OLD_VAXC /* Prevent %CC-I-PARAMNOTUSED. */
+#pragma nostandard
+#endif
+
+#define _SEP_(TN,C,cfCOMMA) _(__SEP_,C)(TN,cfCOMMA)
+#define __SEP_0(TN,cfCOMMA)
+#define __SEP_1(TN,cfCOMMA) _Icf(2,SEP,TN,cfCOMMA,0)
+#define INT_cfSEP(T,B) _(A,B)
+#define INTV_cfSEP(T,B) INT_cfSEP(T,B)
+#define INTVV_cfSEP(T,B) INT_cfSEP(T,B)
+#define INTVVV_cfSEP(T,B) INT_cfSEP(T,B)
+#define INTVVVV_cfSEP(T,B) INT_cfSEP(T,B)
+#define INTVVVVV_cfSEP(T,B) INT_cfSEP(T,B)
+#define INTVVVVVV_cfSEP(T,B) INT_cfSEP(T,B)
+#define INTVVVVVVV_cfSEP(T,B) INT_cfSEP(T,B)
+#define PINT_cfSEP(T,B) INT_cfSEP(T,B)
+#define PVOID_cfSEP(T,B) INT_cfSEP(T,B)
+#define ROUTINE_cfSEP(T,B) INT_cfSEP(T,B)
+#define SIMPLE_cfSEP(T,B) INT_cfSEP(T,B)
+#define VOID_cfSEP(T,B) INT_cfSEP(T,B) /* For FORTRAN calls C subr.s.*/
+#define STRING_cfSEP(T,B) INT_cfSEP(T,B)
+#define STRINGV_cfSEP(T,B) INT_cfSEP(T,B)
+#define PSTRING_cfSEP(T,B) INT_cfSEP(T,B)
+#define PSTRINGV_cfSEP(T,B) INT_cfSEP(T,B)
+#define PNSTRING_cfSEP(T,B) INT_cfSEP(T,B)
+#define PPSTRING_cfSEP(T,B) INT_cfSEP(T,B)
+#define ZTRINGV_cfSEP(T,B) INT_cfSEP(T,B)
+#define PZTRINGV_cfSEP(T,B) INT_cfSEP(T,B)
+
+#if defined(SIGNED_BYTE) || !defined(UNSIGNED_BYTE)
+#ifdef OLD_VAXC
+#define INTEGER_BYTE char /* Old VAXC barfs on 'signed char' */
+#else
+#define INTEGER_BYTE signed char /* default */
+#endif
+#else
+#define INTEGER_BYTE unsigned char
+#endif
+#define BYTEVVVVVVV_cfTYPE INTEGER_BYTE
+#define DOUBLEVVVVVVV_cfTYPE DOUBLE_PRECISION
+#define FLOATVVVVVVV_cfTYPE FORTRAN_REAL
+#define INTVVVVVVV_cfTYPE int
+#define LOGICALVVVVVVV_cfTYPE int
+#define LONGVVVVVVV_cfTYPE long
+#define SHORTVVVVVVV_cfTYPE short
+#define PBYTE_cfTYPE INTEGER_BYTE
+#define PDOUBLE_cfTYPE DOUBLE_PRECISION
+#define PFLOAT_cfTYPE FORTRAN_REAL
+#define PINT_cfTYPE int
+#define PLOGICAL_cfTYPE int
+#define PLONG_cfTYPE long
+#define PSHORT_cfTYPE short
+
+#define CFARGS0(A,T,V,W,X,Y,Z) _3(T,_cf,A)
+#define CFARGS1(A,T,V,W,X,Y,Z) _3(T,_cf,A)(V)
+#define CFARGS2(A,T,V,W,X,Y,Z) _3(T,_cf,A)(V,W)
+#define CFARGS3(A,T,V,W,X,Y,Z) _3(T,_cf,A)(V,W,X)
+#define CFARGS4(A,T,V,W,X,Y,Z) _3(T,_cf,A)(V,W,X,Y)
+#define CFARGS5(A,T,V,W,X,Y,Z) _3(T,_cf,A)(V,W,X,Y,Z)
+
+#define _Icf(N,T,I,X,Y) _(I,_cfINT)(N,T,I,X,Y,0)
+#define _Icf4(N,T,I,X,Y,Z) _(I,_cfINT)(N,T,I,X,Y,Z)
+#define BYTE_cfINT(N,A,B,X,Y,Z) DOUBLE_cfINT(N,A,B,X,Y,Z)
+#define DOUBLE_cfINT(N,A,B,X,Y,Z) _(CFARGS,N)(A,INT,B,X,Y,Z,0)
+#define FLOAT_cfINT(N,A,B,X,Y,Z) DOUBLE_cfINT(N,A,B,X,Y,Z)
+#define INT_cfINT(N,A,B,X,Y,Z) DOUBLE_cfINT(N,A,B,X,Y,Z)
+#define LOGICAL_cfINT(N,A,B,X,Y,Z) DOUBLE_cfINT(N,A,B,X,Y,Z)
+#define LONG_cfINT(N,A,B,X,Y,Z) DOUBLE_cfINT(N,A,B,X,Y,Z)
+#define SHORT_cfINT(N,A,B,X,Y,Z) DOUBLE_cfINT(N,A,B,X,Y,Z)
+#define PBYTE_cfINT(N,A,B,X,Y,Z) PDOUBLE_cfINT(N,A,B,X,Y,Z)
+#define PDOUBLE_cfINT(N,A,B,X,Y,Z) _(CFARGS,N)(A,PINT,B,X,Y,Z,0)
+#define PFLOAT_cfINT(N,A,B,X,Y,Z) PDOUBLE_cfINT(N,A,B,X,Y,Z)
+#define PINT_cfINT(N,A,B,X,Y,Z) PDOUBLE_cfINT(N,A,B,X,Y,Z)
+#define PLOGICAL_cfINT(N,A,B,X,Y,Z) PDOUBLE_cfINT(N,A,B,X,Y,Z)
+#define PLONG_cfINT(N,A,B,X,Y,Z) PDOUBLE_cfINT(N,A,B,X,Y,Z)
+#define PSHORT_cfINT(N,A,B,X,Y,Z) PDOUBLE_cfINT(N,A,B,X,Y,Z)
+#define BYTEV_cfINT(N,A,B,X,Y,Z) DOUBLEV_cfINT(N,A,B,X,Y,Z)
+#define BYTEVV_cfINT(N,A,B,X,Y,Z) DOUBLEVV_cfINT(N,A,B,X,Y,Z)
+#define BYTEVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVV_cfINT(N,A,B,X,Y,Z)
+#define BYTEVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVV_cfINT(N,A,B,X,Y,Z)
+#define BYTEVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVV_cfINT(N,A,B,X,Y,Z)
+#define BYTEVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define BYTEVVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define DOUBLEV_cfINT(N,A,B,X,Y,Z) _(CFARGS,N)(A,INTV,B,X,Y,Z,0)
+#define DOUBLEVV_cfINT(N,A,B,X,Y,Z) _(CFARGS,N)(A,INTVV,B,X,Y,Z,0)
+#define DOUBLEVVV_cfINT(N,A,B,X,Y,Z) _(CFARGS,N)(A,INTVVV,B,X,Y,Z,0)
+#define DOUBLEVVVV_cfINT(N,A,B,X,Y,Z) _(CFARGS,N)(A,INTVVVV,B,X,Y,Z,0)
+#define DOUBLEVVVVV_cfINT(N,A,B,X,Y,Z) _(CFARGS,N)(A,INTVVVVV,B,X,Y,Z,0)
+#define DOUBLEVVVVVV_cfINT(N,A,B,X,Y,Z) _(CFARGS,N)(A,INTVVVVVV,B,X,Y,Z,0)
+#define DOUBLEVVVVVVV_cfINT(N,A,B,X,Y,Z) _(CFARGS,N)(A,INTVVVVVVV,B,X,Y,Z,0)
+#define FLOATV_cfINT(N,A,B,X,Y,Z) DOUBLEV_cfINT(N,A,B,X,Y,Z)
+#define FLOATVV_cfINT(N,A,B,X,Y,Z) DOUBLEVV_cfINT(N,A,B,X,Y,Z)
+#define FLOATVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVV_cfINT(N,A,B,X,Y,Z)
+#define FLOATVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVV_cfINT(N,A,B,X,Y,Z)
+#define FLOATVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVV_cfINT(N,A,B,X,Y,Z)
+#define FLOATVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define FLOATVVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define INTV_cfINT(N,A,B,X,Y,Z) DOUBLEV_cfINT(N,A,B,X,Y,Z)
+#define INTVV_cfINT(N,A,B,X,Y,Z) DOUBLEVV_cfINT(N,A,B,X,Y,Z)
+#define INTVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVV_cfINT(N,A,B,X,Y,Z)
+#define INTVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVV_cfINT(N,A,B,X,Y,Z)
+#define INTVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVV_cfINT(N,A,B,X,Y,Z)
+#define INTVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define INTVVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define LOGICALV_cfINT(N,A,B,X,Y,Z) DOUBLEV_cfINT(N,A,B,X,Y,Z)
+#define LOGICALVV_cfINT(N,A,B,X,Y,Z) DOUBLEVV_cfINT(N,A,B,X,Y,Z)
+#define LOGICALVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVV_cfINT(N,A,B,X,Y,Z)
+#define LOGICALVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVV_cfINT(N,A,B,X,Y,Z)
+#define LOGICALVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVV_cfINT(N,A,B,X,Y,Z)
+#define LOGICALVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define LOGICALVVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define LONGV_cfINT(N,A,B,X,Y,Z) DOUBLEV_cfINT(N,A,B,X,Y,Z)
+#define LONGVV_cfINT(N,A,B,X,Y,Z) DOUBLEVV_cfINT(N,A,B,X,Y,Z)
+#define LONGVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVV_cfINT(N,A,B,X,Y,Z)
+#define LONGVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVV_cfINT(N,A,B,X,Y,Z)
+#define LONGVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVV_cfINT(N,A,B,X,Y,Z)
+#define LONGVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define LONGVVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define SHORTV_cfINT(N,A,B,X,Y,Z) DOUBLEV_cfINT(N,A,B,X,Y,Z)
+#define SHORTVV_cfINT(N,A,B,X,Y,Z) DOUBLEVV_cfINT(N,A,B,X,Y,Z)
+#define SHORTVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVV_cfINT(N,A,B,X,Y,Z)
+#define SHORTVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVV_cfINT(N,A,B,X,Y,Z)
+#define SHORTVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVV_cfINT(N,A,B,X,Y,Z)
+#define SHORTVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define SHORTVVVVVVV_cfINT(N,A,B,X,Y,Z) DOUBLEVVVVVVV_cfINT(N,A,B,X,Y,Z)
+#define PVOID_cfINT(N,A,B,X,Y,Z) _(CFARGS,N)(A,B,B,X,Y,Z,0)
+#define ROUTINE_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+/*CRAY coughs on the first,
+ i.e. the usual trouble of not being able to
+ define macros to macros with arguments.
+ New ultrix is worse, it coughs on all such uses.
+ */
+/*#define SIMPLE_cfINT PVOID_cfINT*/
+#define SIMPLE_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+#define VOID_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+#define STRING_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+#define STRINGV_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+#define PSTRING_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+#define PSTRINGV_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+#define PNSTRING_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+#define PPSTRING_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+#define ZTRINGV_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+#define PZTRINGV_cfINT(N,A,B,X,Y,Z) PVOID_cfINT(N,A,B,X,Y,Z)
+#define CF_0_cfINT(N,A,B,X,Y,Z)
+
+
+#define UCF(TN,I,C) _SEP_(TN,C,cfCOMMA) _Icf(2,U,TN,_(A,I),0)
+#define UUCF(TN,I,C) _SEP_(TN,C,cfCOMMA) _SEP_(TN,1,I)
+#define UUUCF(TN,I,C) _SEP_(TN,C,cfCOLON) _Icf(2,U,TN,_(A,I),0)
+#define INT_cfU(T,A) _(T,VVVVVVV_cfTYPE) A
+#define INTV_cfU(T,A) _(T,VVVVVV_cfTYPE) * A
+#define INTVV_cfU(T,A) _(T,VVVVV_cfTYPE) * A
+#define INTVVV_cfU(T,A) _(T,VVVV_cfTYPE) * A
+#define INTVVVV_cfU(T,A) _(T,VVV_cfTYPE) * A
+#define INTVVVVV_cfU(T,A) _(T,VV_cfTYPE) * A
+#define INTVVVVVV_cfU(T,A) _(T,V_cfTYPE) * A
+#define INTVVVVVVV_cfU(T,A) _(T,_cfTYPE) * A
+#define PINT_cfU(T,A) _(T,_cfTYPE) * A
+#define PVOID_cfU(T,A) void *A
+#define ROUTINE_cfU(T,A) void (*A)(CF_NULL_PROTO)
+#define VOID_cfU(T,A) void A /* Needed for C calls FORTRAN sub.s. */
+#define STRING_cfU(T,A) char *A /* via VOID and wrapper. */
+#define STRINGV_cfU(T,A) char *A
+#define PSTRING_cfU(T,A) char *A
+#define PSTRINGV_cfU(T,A) char *A
+#define ZTRINGV_cfU(T,A) char *A
+#define PZTRINGV_cfU(T,A) char *A
+
+/* VOID breaks U into U and UU. */
+#define INT_cfUU(T,A) _(T,VVVVVVV_cfTYPE) A
+#define VOID_cfUU(T,A) /* Needed for FORTRAN calls C sub.s. */
+#define STRING_cfUU(T,A) char *A
+
+
+#define BYTE_cfPU(A) CFextern INTEGER_BYTE FCALLSC_QUALIFIER A
+#define DOUBLE_cfPU(A) CFextern DOUBLE_PRECISION FCALLSC_QUALIFIER A
+#if ! (defined(FLOATFUNCTIONTYPE)&&defined(ASSIGNFLOAT)&&defined(RETURNFLOAT))
+#define FLOAT_cfPU(A) CFextern FORTRAN_REAL FCALLSC_QUALIFIER A
+#else
+#define FLOAT_cfPU(A) CFextern FLOATFUNCTIONTYPE FCALLSC_QUALIFIER A
+#endif
+#define INT_cfPU(A) CFextern int FCALLSC_QUALIFIER A
+#define LOGICAL_cfPU(A) CFextern int FCALLSC_QUALIFIER A
+#define LONG_cfPU(A) CFextern long FCALLSC_QUALIFIER A
+#define SHORT_cfPU(A) CFextern short FCALLSC_QUALIFIER A
+#define STRING_cfPU(A) CFextern void FCALLSC_QUALIFIER A
+#define VOID_cfPU(A) CFextern void FCALLSC_QUALIFIER A
+
+#define BYTE_cfE INTEGER_BYTE A0;
+#define DOUBLE_cfE DOUBLE_PRECISION A0;
+#if ! (defined(FLOATFUNCTIONTYPE)&&defined(ASSIGNFLOAT)&&defined(RETURNFLOAT))
+#define FLOAT_cfE FORTRAN_REAL A0;
+#else
+#define FLOAT_cfE FORTRAN_REAL AA0; FLOATFUNCTIONTYPE A0;
+#endif
+#define INT_cfE int A0;
+#define LOGICAL_cfE int A0;
+#define LONG_cfE long A0;
+#define SHORT_cfE short A0;
+#define VOID_cfE
+#ifdef vmsFortran
+#define STRING_cfE static char AA0[1+MAX_LEN_FORTRAN_FUNCTION_STRING]; \
+ static fstring A0 = \
+ {MAX_LEN_FORTRAN_FUNCTION_STRING,DSC$K_DTYPE_T,DSC$K_CLASS_S,AA0};\
+ memset(AA0, CFORTRAN_NON_CHAR, MAX_LEN_FORTRAN_FUNCTION_STRING);\
+ *(AA0+MAX_LEN_FORTRAN_FUNCTION_STRING)='\0';
+#else
+#ifdef CRAYFortran
+#define STRING_cfE static char AA0[1+MAX_LEN_FORTRAN_FUNCTION_STRING]; \
+ static _fcd A0; *(AA0+MAX_LEN_FORTRAN_FUNCTION_STRING)='\0';\
+ memset(AA0,CFORTRAN_NON_CHAR, MAX_LEN_FORTRAN_FUNCTION_STRING);\
+ A0 = _cptofcd(AA0,MAX_LEN_FORTRAN_FUNCTION_STRING);
+#else
+/* 'cc: SC3.0.1 13 Jul 1994' barfs on char A0[0x4FE+1];
+ * char A0[0x4FE +1]; char A0[1+0x4FE]; are both OK. */
+#define STRING_cfE static char A0[1+MAX_LEN_FORTRAN_FUNCTION_STRING]; \
+ memset(A0, CFORTRAN_NON_CHAR, \
+ MAX_LEN_FORTRAN_FUNCTION_STRING); \
+ *(A0+MAX_LEN_FORTRAN_FUNCTION_STRING)='\0';
+#endif
+#endif
+/* ESTRING must use static char. array which is guaranteed to exist after
+ function returns. */
+
+/* N.B.i) The diff. for 0 (Zero) and >=1 arguments.
+ ii)That the following create an unmatched bracket, i.e. '(', which
+ must of course be matched in the call.
+ iii)Commas must be handled very carefully */
+#define INT_cfGZ(T,UN,LN) A0=CFC_(UN,LN)(
+#define VOID_cfGZ(T,UN,LN) CFC_(UN,LN)(
+#ifdef vmsFortran
+#define STRING_cfGZ(T,UN,LN) CFC_(UN,LN)(&A0
+#else
+#if defined(CRAYFortran) || defined(AbsoftUNIXFortran) || defined(AbsoftProFortran)
+#define STRING_cfGZ(T,UN,LN) CFC_(UN,LN)( A0
+#else
+#define STRING_cfGZ(T,UN,LN) CFC_(UN,LN)( A0,MAX_LEN_FORTRAN_FUNCTION_STRING
+#endif
+#endif
+
+#define INT_cfG(T,UN,LN) INT_cfGZ(T,UN,LN)
+#define VOID_cfG(T,UN,LN) VOID_cfGZ(T,UN,LN)
+#define STRING_cfG(T,UN,LN) STRING_cfGZ(T,UN,LN), /*, is only diff. from _cfG*/
+
+#define BYTEVVVVVVV_cfPP
+#define INTVVVVVVV_cfPP /* These complement FLOATVVVVVVV_cfPP. */
+#define DOUBLEVVVVVVV_cfPP
+#define LOGICALVVVVVVV_cfPP
+#define LONGVVVVVVV_cfPP
+#define SHORTVVVVVVV_cfPP
+#define PBYTE_cfPP
+#define PINT_cfPP
+#define PDOUBLE_cfPP
+#define PLOGICAL_cfPP
+#define PLONG_cfPP
+#define PSHORT_cfPP
+#define PFLOAT_cfPP FLOATVVVVVVV_cfPP
+
+#define BCF(TN,AN,C) _SEP_(TN,C,cfCOMMA) _Icf(2,B,TN,AN,0)
+#define INT_cfB(T,A) (_(T,VVVVVVV_cfTYPE)) A
+#define INTV_cfB(T,A) A
+#define INTVV_cfB(T,A) (A)[0]
+#define INTVVV_cfB(T,A) (A)[0][0]
+#define INTVVVV_cfB(T,A) (A)[0][0][0]
+#define INTVVVVV_cfB(T,A) (A)[0][0][0][0]
+#define INTVVVVVV_cfB(T,A) (A)[0][0][0][0][0]
+#define INTVVVVVVV_cfB(T,A) (A)[0][0][0][0][0][0]
+#define PINT_cfB(T,A) _(T,_cfPP)&A
+#define STRING_cfB(T,A) (char *) A
+#define STRINGV_cfB(T,A) (char *) A
+#define PSTRING_cfB(T,A) (char *) A
+#define PSTRINGV_cfB(T,A) (char *) A
+#define PVOID_cfB(T,A) (void *) A
+#define ROUTINE_cfB(T,A) (cfCAST_FUNCTION)A
+#define ZTRINGV_cfB(T,A) (char *) A
+#define PZTRINGV_cfB(T,A) (char *) A
+
+#define SCF(TN,NAME,I,A) _(TN,_cfSTR)(3,S,NAME,I,A,0,0)
+#define DEFAULT_cfS(M,I,A)
+#define LOGICAL_cfS(M,I,A)
+#define PLOGICAL_cfS(M,I,A)
+#define STRING_cfS(M,I,A) ,sizeof(A)
+#define STRINGV_cfS(M,I,A) ,( (unsigned)0xFFFF*firstindexlength(A) \
+ +secondindexlength(A))
+#define PSTRING_cfS(M,I,A) ,sizeof(A)
+#define PSTRINGV_cfS(M,I,A) STRINGV_cfS(M,I,A)
+#define ZTRINGV_cfS(M,I,A)
+#define PZTRINGV_cfS(M,I,A)
+
+#define HCF(TN,I) _(TN,_cfSTR)(3,H,cfCOMMA, H,_(C,I),0,0)
+#define HHCF(TN,I) _(TN,_cfSTR)(3,H,cfCOMMA,HH,_(C,I),0,0)
+#define HHHCF(TN,I) _(TN,_cfSTR)(3,H,cfCOLON, H,_(C,I),0,0)
+#define H_CF_SPECIAL unsigned
+#define HH_CF_SPECIAL
+#define DEFAULT_cfH(M,I,A)
+#define LOGICAL_cfH(S,U,B)
+#define PLOGICAL_cfH(S,U,B)
+#define STRING_cfH(S,U,B) _(A,S) _(U,_CF_SPECIAL) B
+#define STRINGV_cfH(S,U,B) STRING_cfH(S,U,B)
+#define PSTRING_cfH(S,U,B) STRING_cfH(S,U,B)
+#define PSTRINGV_cfH(S,U,B) STRING_cfH(S,U,B)
+#define PNSTRING_cfH(S,U,B) STRING_cfH(S,U,B)
+#define PPSTRING_cfH(S,U,B) STRING_cfH(S,U,B)
+#define ZTRINGV_cfH(S,U,B)
+#define PZTRINGV_cfH(S,U,B)
+
+/* Need VOID_cfSTR because Absoft forced function types go through _cfSTR. */
+/* No spaces inside expansion. They screws up macro catenation kludge. */
+#define VOID_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define BYTE_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define DOUBLE_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define FLOAT_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define INT_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LOGICAL_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,LOGICAL,A,B,C,D,E)
+#define LONG_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define SHORT_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define BYTEV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define BYTEVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define BYTEVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define BYTEVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define BYTEVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define BYTEVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define BYTEVVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define DOUBLEV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define DOUBLEVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define DOUBLEVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define DOUBLEVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define DOUBLEVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define DOUBLEVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define DOUBLEVVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define FLOATV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define FLOATVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define FLOATVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define FLOATVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define FLOATVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define FLOATVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define FLOATVVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define INTV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define INTVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define INTVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define INTVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define INTVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define INTVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define INTVVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LOGICALV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LOGICALVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LOGICALVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LOGICALVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LOGICALVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LOGICALVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LOGICALVVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LONGV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LONGVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LONGVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LONGVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LONGVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LONGVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define LONGVVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define SHORTV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define SHORTVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define SHORTVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define SHORTVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define SHORTVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define SHORTVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define SHORTVVVVVVV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define PBYTE_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define PDOUBLE_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define PFLOAT_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define PINT_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define PLOGICAL_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,PLOGICAL,A,B,C,D,E)
+#define PLONG_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define PSHORT_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define STRING_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,STRING,A,B,C,D,E)
+#define PSTRING_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,PSTRING,A,B,C,D,E)
+#define STRINGV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,STRINGV,A,B,C,D,E)
+#define PSTRINGV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,PSTRINGV,A,B,C,D,E)
+#define PNSTRING_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,PNSTRING,A,B,C,D,E)
+#define PPSTRING_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,PPSTRING,A,B,C,D,E)
+#define PVOID_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define ROUTINE_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define SIMPLE_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,DEFAULT,A,B,C,D,E)
+#define ZTRINGV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,ZTRINGV,A,B,C,D,E)
+#define PZTRINGV_cfSTR(N,T,A,B,C,D,E) _(CFARGS,N)(T,PZTRINGV,A,B,C,D,E)
+#define CF_0_cfSTR(N,T,A,B,C,D,E)
+
+/* See ACF table comments, which explain why CCF was split into two. */
+#define CCF(NAME,TN,I) _(TN,_cfSTR)(5,C,NAME,I,_(A,I),_(B,I),_(C,I))
+#define DEFAULT_cfC(M,I,A,B,C)
+#define LOGICAL_cfC(M,I,A,B,C) A=C2FLOGICAL( A);
+#define PLOGICAL_cfC(M,I,A,B,C) *A=C2FLOGICAL(*A);
+#ifdef vmsFortran
+#define STRING_cfC(M,I,A,B,C) (B.clen=strlen(A),B.f.dsc$a_pointer=A, \
+ C==sizeof(char*)||C==(unsigned)(B.clen+1)?B.f.dsc$w_length=B.clen: \
+ (memset((A)+B.clen,' ',C-B.clen-1),A[B.f.dsc$w_length=C-1]='\0'));
+ /* PSTRING_cfC to beware of array A which does not contain any \0. */
+#define PSTRING_cfC(M,I,A,B,C) (B.dsc$a_pointer=A, C==sizeof(char*) ? \
+ B.dsc$w_length=strlen(A): (A[C-1]='\0',B.dsc$w_length=strlen(A), \
+ memset((A)+B.dsc$w_length,' ',C-B.dsc$w_length-1), B.dsc$w_length=C-1));
+#else
+#define STRING_cfC(M,I,A,B,C) (B.clen=strlen(A), \
+ C==sizeof(char*)||C==(unsigned)(B.clen+1)?B.flen=B.clen: \
+ (memset((A)+B.clen,' ',C-B.clen-1),A[B.flen=C-1]='\0'));
+#define PSTRING_cfC(M,I,A,B,C) (C==sizeof(char*)? B=strlen(A): \
+ (A[C-1]='\0',B=strlen(A),memset((A)+B,' ',C-B-1),B=C-1));
+#endif
+ /* For CRAYFortran for (P)STRINGV_cfC, B.fs is set, but irrelevant. */
+#define STRINGV_cfC(M,I,A,B,C) \
+ AATRINGV_cfA( A,B,(C/0xFFFF)*(C%0xFFFF),C/0xFFFF,C%0xFFFF)
+#define PSTRINGV_cfC(M,I,A,B,C) \
+ APATRINGV_cfA( A,B,(C/0xFFFF)*(C%0xFFFF),C/0xFFFF,C%0xFFFF)
+#define ZTRINGV_cfC(M,I,A,B,C) \
+ AATRINGV_cfA( A,B, (_3(M,_ELEMS_,I))*((_3(M,_ELEMLEN_,I))+1), \
+ (_3(M,_ELEMS_,I)), (_3(M,_ELEMLEN_,I))+1 )
+#define PZTRINGV_cfC(M,I,A,B,C) \
+ APATRINGV_cfA( A,B, (_3(M,_ELEMS_,I))*((_3(M,_ELEMLEN_,I))+1), \
+ (_3(M,_ELEMS_,I)), (_3(M,_ELEMLEN_,I))+1 )
+
+#define BYTE_cfCCC(A,B) &A
+#define DOUBLE_cfCCC(A,B) &A
+#if !defined(__CF__KnR)
+#define FLOAT_cfCCC(A,B) &A
+ /* Although the VAX doesn't, at least the */
+#else /* HP and K&R mips promote float arg.'s of */
+#define FLOAT_cfCCC(A,B) &B /* unprototyped functions to double. Cannot */
+#endif /* use A here to pass the argument to FORTRAN. */
+#define INT_cfCCC(A,B) &A
+#define LOGICAL_cfCCC(A,B) &A
+#define LONG_cfCCC(A,B) &A
+#define SHORT_cfCCC(A,B) &A
+#define PBYTE_cfCCC(A,B) A
+#define PDOUBLE_cfCCC(A,B) A
+#define PFLOAT_cfCCC(A,B) A
+#define PINT_cfCCC(A,B) A
+#define PLOGICAL_cfCCC(A,B) B=A /* B used to keep a common W table. */
+#define PLONG_cfCCC(A,B) A
+#define PSHORT_cfCCC(A,B) A
+
+#define CCCF(TN,I,M) _SEP_(TN,M,cfCOMMA) _Icf(3,CC,TN,_(A,I),_(B,I))
+#define INT_cfCC(T,A,B) _(T,_cfCCC)(A,B)
+#define INTV_cfCC(T,A,B) A
+#define INTVV_cfCC(T,A,B) A
+#define INTVVV_cfCC(T,A,B) A
+#define INTVVVV_cfCC(T,A,B) A
+#define INTVVVVV_cfCC(T,A,B) A
+#define INTVVVVVV_cfCC(T,A,B) A
+#define INTVVVVVVV_cfCC(T,A,B) A
+#define PINT_cfCC(T,A,B) _(T,_cfCCC)(A,B)
+#define PVOID_cfCC(T,A,B) A
+#if defined(apolloFortran) || defined(hpuxFortran800) || defined(AbsoftUNIXFortran)
+#define ROUTINE_cfCC(T,A,B) &A
+#else
+#define ROUTINE_cfCC(T,A,B) A
+#endif
+#define SIMPLE_cfCC(T,A,B) A
+#ifdef vmsFortran
+#define STRING_cfCC(T,A,B) &B.f
+#define STRINGV_cfCC(T,A,B) &B
+#define PSTRING_cfCC(T,A,B) &B
+#define PSTRINGV_cfCC(T,A,B) &B
+#else
+#ifdef CRAYFortran
+#define STRING_cfCC(T,A,B) _cptofcd(A,B.flen)
+#define STRINGV_cfCC(T,A,B) _cptofcd(B.s,B.flen)
+#define PSTRING_cfCC(T,A,B) _cptofcd(A,B)
+#define PSTRINGV_cfCC(T,A,B) _cptofcd(A,B.flen)
+#else
+#define STRING_cfCC(T,A,B) A
+#define STRINGV_cfCC(T,A,B) B.fs
+#define PSTRING_cfCC(T,A,B) A
+#define PSTRINGV_cfCC(T,A,B) B.fs
+#endif
+#endif
+#define ZTRINGV_cfCC(T,A,B) STRINGV_cfCC(T,A,B)
+#define PZTRINGV_cfCC(T,A,B) PSTRINGV_cfCC(T,A,B)
+
+#define BYTE_cfX return A0;
+#define DOUBLE_cfX return A0;
+#if ! (defined(FLOATFUNCTIONTYPE)&&defined(ASSIGNFLOAT)&&defined(RETURNFLOAT))
+#define FLOAT_cfX return A0;
+#else
+#define FLOAT_cfX ASSIGNFLOAT(AA0,A0); return AA0;
+#endif
+#define INT_cfX return A0;
+#define LOGICAL_cfX return F2CLOGICAL(A0);
+#define LONG_cfX return A0;
+#define SHORT_cfX return A0;
+#define VOID_cfX return ;
+#if defined(vmsFortran) || defined(CRAYFortran)
+#define STRING_cfX return kill_trailing( \
+ kill_trailing(AA0,CFORTRAN_NON_CHAR),' ');
+#else
+#define STRING_cfX return kill_trailing( \
+ kill_trailing( A0,CFORTRAN_NON_CHAR),' ');
+#endif
+
+#define CFFUN(NAME) _(__cf__,NAME)
+
+/* Note that we don't use LN here, but we keep it for consistency. */
+#define CCALLSFFUN0(UN,LN) CFFUN(UN)()
+
+#ifdef OLD_VAXC /* Allow %CC-I-PARAMNOTUSED. */
+#pragma standard
+#endif
+
+#define CCALLSFFUN1( UN,LN,T1, A1) \
+ CCALLSFFUN5 (UN,LN,T1,CF_0,CF_0,CF_0,CF_0,A1,0,0,0,0)
+#define CCALLSFFUN2( UN,LN,T1,T2, A1,A2) \
+ CCALLSFFUN5 (UN,LN,T1,T2,CF_0,CF_0,CF_0,A1,A2,0,0,0)
+#define CCALLSFFUN3( UN,LN,T1,T2,T3, A1,A2,A3) \
+ CCALLSFFUN5 (UN,LN,T1,T2,T3,CF_0,CF_0,A1,A2,A3,0,0)
+#define CCALLSFFUN4( UN,LN,T1,T2,T3,T4, A1,A2,A3,A4)\
+ CCALLSFFUN5 (UN,LN,T1,T2,T3,T4,CF_0,A1,A2,A3,A4,0)
+#define CCALLSFFUN5( UN,LN,T1,T2,T3,T4,T5, A1,A2,A3,A4,A5) \
+ CCALLSFFUN10(UN,LN,T1,T2,T3,T4,T5,CF_0,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,0,0,0,0,0)
+#define CCALLSFFUN6( UN,LN,T1,T2,T3,T4,T5,T6, A1,A2,A3,A4,A5,A6) \
+ CCALLSFFUN10(UN,LN,T1,T2,T3,T4,T5,T6,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,0,0,0,0)
+#define CCALLSFFUN7( UN,LN,T1,T2,T3,T4,T5,T6,T7, A1,A2,A3,A4,A5,A6,A7) \
+ CCALLSFFUN10(UN,LN,T1,T2,T3,T4,T5,T6,T7,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,0,0,0)
+#define CCALLSFFUN8( UN,LN,T1,T2,T3,T4,T5,T6,T7,T8, A1,A2,A3,A4,A5,A6,A7,A8) \
+ CCALLSFFUN10(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,0,0)
+#define CCALLSFFUN9( UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,A1,A2,A3,A4,A5,A6,A7,A8,A9)\
+ CCALLSFFUN10(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,0)
+#define CCALLSFFUN10(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA)\
+ CCALLSFFUN14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,CF_0,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,0,0,0,0)
+#define CCALLSFFUN11(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB)\
+ CCALLSFFUN14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,CF_0,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,0,0,0)
+#define CCALLSFFUN12(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC)\
+ CCALLSFFUN14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,CF_0,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,0,0)
+#define CCALLSFFUN13(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD)\
+ CCALLSFFUN14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,CF_0,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,0)
+
+#define CCALLSFFUN14(UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,A1,A2,A3,A4,A5,A6,A7,A8,A9,AA,AB,AC,AD,AE)\
+((CFFUN(UN)( BCF(T1,A1,0) BCF(T2,A2,1) BCF(T3,A3,1) BCF(T4,A4,1) BCF(T5,A5,1) \
+ BCF(T6,A6,1) BCF(T7,A7,1) BCF(T8,A8,1) BCF(T9,A9,1) BCF(TA,AA,1) \
+ BCF(TB,AB,1) BCF(TC,AC,1) BCF(TD,AD,1) BCF(TE,AE,1) \
+ SCF(T1,LN,1,A1) SCF(T2,LN,2,A2) SCF(T3,LN,3,A3) SCF(T4,LN,4,A4) \
+ SCF(T5,LN,5,A5) SCF(T6,LN,6,A6) SCF(T7,LN,7,A7) SCF(T8,LN,8,A8) \
+ SCF(T9,LN,9,A9) SCF(TA,LN,10,AA) SCF(TB,LN,11,AB) SCF(TC,LN,12,AC) \
+ SCF(TD,LN,13,AD) SCF(TE,LN,14,AE))))
+
+/* N.B. Create a separate function instead of using (call function, function
+value here) because in order to create the variables needed for the input
+arg.'s which may be const.'s one has to do the creation within {}, but these
+can never be placed within ()'s. Therefore one must create wrapper functions.
+gcc, on the other hand may be able to avoid the wrapper functions. */
+
+/* Prototypes are needed to correctly handle the value returned correctly. N.B.
+Can only have prototype arg.'s with difficulty, a la G... table since FORTRAN
+functions returning strings have extra arg.'s. Don't bother, since this only
+causes a compiler warning to come up when one uses FCALLSCFUNn and CCALLSFFUNn
+for the same function in the same source code. Something done by the experts in
+debugging only.*/
+
+#define PROTOCCALLSFFUN0(F,UN,LN) \
+_(F,_cfPU)( CFC_(UN,LN))(CF_NULL_PROTO); \
+static _Icf(2,U,F,CFFUN(UN),0)() {_(F,_cfE) _Icf(3,GZ,F,UN,LN) ABSOFT_cf1(F));_(F,_cfX)}
+
+#define PROTOCCALLSFFUN1( T0,UN,LN,T1) \
+ PROTOCCALLSFFUN5 (T0,UN,LN,T1,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFFUN2( T0,UN,LN,T1,T2) \
+ PROTOCCALLSFFUN5 (T0,UN,LN,T1,T2,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFFUN3( T0,UN,LN,T1,T2,T3) \
+ PROTOCCALLSFFUN5 (T0,UN,LN,T1,T2,T3,CF_0,CF_0)
+#define PROTOCCALLSFFUN4( T0,UN,LN,T1,T2,T3,T4) \
+ PROTOCCALLSFFUN5 (T0,UN,LN,T1,T2,T3,T4,CF_0)
+#define PROTOCCALLSFFUN5( T0,UN,LN,T1,T2,T3,T4,T5) \
+ PROTOCCALLSFFUN10(T0,UN,LN,T1,T2,T3,T4,T5,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFFUN6( T0,UN,LN,T1,T2,T3,T4,T5,T6) \
+ PROTOCCALLSFFUN10(T0,UN,LN,T1,T2,T3,T4,T5,T6,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFFUN7( T0,UN,LN,T1,T2,T3,T4,T5,T6,T7) \
+ PROTOCCALLSFFUN10(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFFUN8( T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8) \
+ PROTOCCALLSFFUN10(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,CF_0,CF_0)
+#define PROTOCCALLSFFUN9( T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9) \
+ PROTOCCALLSFFUN10(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,CF_0)
+#define PROTOCCALLSFFUN10(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA) \
+ PROTOCCALLSFFUN14(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,CF_0,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFFUN11(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB) \
+ PROTOCCALLSFFUN14(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,CF_0,CF_0,CF_0)
+#define PROTOCCALLSFFUN12(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC) \
+ PROTOCCALLSFFUN14(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,CF_0,CF_0)
+#define PROTOCCALLSFFUN13(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD) \
+ PROTOCCALLSFFUN14(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,CF_0)
+
+/* HP/UX 9.01 cc requires the blank between '_Icf(3,G,T0,UN,LN) CCCF(T1,1,0)' */
+
+#ifndef __CF__KnR
+#define PROTOCCALLSFFUN14(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ _(T0,_cfPU)(CFC_(UN,LN))(CF_NULL_PROTO); static _Icf(2,U,T0,CFFUN(UN),0)( \
+ CFARGT14FS(UCF,HCF,_Z,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) ) \
+{ CFARGT14S(VCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) _(T0,_cfE) \
+ CCF(LN,T1,1) CCF(LN,T2,2) CCF(LN,T3,3) CCF(LN,T4,4) CCF(LN,T5,5) \
+ CCF(LN,T6,6) CCF(LN,T7,7) CCF(LN,T8,8) CCF(LN,T9,9) CCF(LN,TA,10) \
+ CCF(LN,TB,11) CCF(LN,TC,12) CCF(LN,TD,13) CCF(LN,TE,14) _Icf(3,G,T0,UN,LN) \
+ CFARGT14(CCCF,JCF,ABSOFT_cf1(T0),T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE)); \
+ WCF(T1,A1,1) WCF(T2,A2,2) WCF(T3,A3,3) WCF(T4,A4,4) WCF(T5,A5,5) \
+ WCF(T6,A6,6) WCF(T7,A7,7) WCF(T8,A8,8) WCF(T9,A9,9) WCF(TA,A10,10) \
+ WCF(TB,A11,11) WCF(TC,A12,12) WCF(TD,A13,13) WCF(TE,A14,14) _(T0,_cfX)}
+#else
+#define PROTOCCALLSFFUN14(T0,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ _(T0,_cfPU)(CFC_(UN,LN))(CF_NULL_PROTO); static _Icf(2,U,T0,CFFUN(UN),0)( \
+ CFARGT14FS(UUCF,HHCF,_Z,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) ) \
+ CFARGT14FS(UUUCF,HHHCF,_Z,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) ; \
+{ CFARGT14S(VCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) _(T0,_cfE) \
+ CCF(LN,T1,1) CCF(LN,T2,2) CCF(LN,T3,3) CCF(LN,T4,4) CCF(LN,T5,5) \
+ CCF(LN,T6,6) CCF(LN,T7,7) CCF(LN,T8,8) CCF(LN,T9,9) CCF(LN,TA,10) \
+ CCF(LN,TB,11) CCF(LN,TC,12) CCF(LN,TD,13) CCF(LN,TE,14) _Icf(3,G,T0,UN,LN) \
+ CFARGT14(CCCF,JCF,ABSOFT_cf1(T0),T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE)); \
+ WCF(T1,A1,1) WCF(T2,A2,2) WCF(T3,A3,3) WCF(T4,A4,4) WCF(T5,A5,5) \
+ WCF(T6,A6,6) WCF(T7,A7,7) WCF(T8,A8,8) WCF(T9,A9,9) WCF(TA,A10,10) \
+ WCF(TB,A11,11) WCF(TC,A12,12) WCF(TD,A13,13) WCF(TE,A14,14) _(T0,_cfX)}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* UTILITIES FOR FORTRAN TO CALL C ROUTINES */
+
+#ifdef OLD_VAXC /* Prevent %CC-I-PARAMNOTUSED. */
+#pragma nostandard
+#endif
+
+#if defined(vmsFortran) || defined(CRAYFortran)
+#define DCF(TN,I)
+#define DDCF(TN,I)
+#define DDDCF(TN,I)
+#else
+#define DCF(TN,I) HCF(TN,I)
+#define DDCF(TN,I) HHCF(TN,I)
+#define DDDCF(TN,I) HHHCF(TN,I)
+#endif
+
+#define QCF(TN,I) _(TN,_cfSTR)(1,Q,_(B,I), 0,0,0,0)
+#define DEFAULT_cfQ(B)
+#define LOGICAL_cfQ(B)
+#define PLOGICAL_cfQ(B)
+#define STRINGV_cfQ(B) char *B; unsigned int _(B,N);
+#define STRING_cfQ(B) char *B=NULL;
+#define PSTRING_cfQ(B) char *B=NULL;
+#define PSTRINGV_cfQ(B) STRINGV_cfQ(B)
+#define PNSTRING_cfQ(B) char *B=NULL;
+#define PPSTRING_cfQ(B)
+
+#ifdef __sgi /* Else SGI gives warning 182 contrary to its C LRM A.17.7 */
+#define ROUTINE_orig *(void**)&
+#else
+#define ROUTINE_orig (void *)
+#endif
+
+#define ROUTINE_1 ROUTINE_orig
+#define ROUTINE_2 ROUTINE_orig
+#define ROUTINE_3 ROUTINE_orig
+#define ROUTINE_4 ROUTINE_orig
+#define ROUTINE_5 ROUTINE_orig
+#define ROUTINE_6 ROUTINE_orig
+#define ROUTINE_7 ROUTINE_orig
+#define ROUTINE_8 ROUTINE_orig
+#define ROUTINE_9 ROUTINE_orig
+#define ROUTINE_10 ROUTINE_orig
+#define ROUTINE_11 ROUTINE_orig
+#define ROUTINE_12 ROUTINE_orig
+#define ROUTINE_13 ROUTINE_orig
+#define ROUTINE_14 ROUTINE_orig
+#define ROUTINE_15 ROUTINE_orig
+#define ROUTINE_16 ROUTINE_orig
+#define ROUTINE_17 ROUTINE_orig
+#define ROUTINE_18 ROUTINE_orig
+#define ROUTINE_19 ROUTINE_orig
+#define ROUTINE_20 ROUTINE_orig
+#define ROUTINE_21 ROUTINE_orig
+#define ROUTINE_22 ROUTINE_orig
+#define ROUTINE_23 ROUTINE_orig
+#define ROUTINE_24 ROUTINE_orig
+#define ROUTINE_25 ROUTINE_orig
+#define ROUTINE_26 ROUTINE_orig
+#define ROUTINE_27 ROUTINE_orig
+
+#define TCF(NAME,TN,I,M) _SEP_(TN,M,cfCOMMA) _(TN,_cfT)(NAME,I,_(A,I),_(B,I),_(C,I))
+#define BYTE_cfT(M,I,A,B,D) *A
+#define DOUBLE_cfT(M,I,A,B,D) *A
+#define FLOAT_cfT(M,I,A,B,D) *A
+#define INT_cfT(M,I,A,B,D) *A
+#define LOGICAL_cfT(M,I,A,B,D) F2CLOGICAL(*A)
+#define LONG_cfT(M,I,A,B,D) *A
+#define SHORT_cfT(M,I,A,B,D) *A
+#define BYTEV_cfT(M,I,A,B,D) A
+#define DOUBLEV_cfT(M,I,A,B,D) A
+#define FLOATV_cfT(M,I,A,B,D) VOIDP A
+#define INTV_cfT(M,I,A,B,D) A
+#define LOGICALV_cfT(M,I,A,B,D) A
+#define LONGV_cfT(M,I,A,B,D) A
+#define SHORTV_cfT(M,I,A,B,D) A
+#define BYTEVV_cfT(M,I,A,B,D) (void *)A /* We have to cast to void *,*/
+#define BYTEVVV_cfT(M,I,A,B,D) (void *)A /* since we don't know the */
+#define BYTEVVVV_cfT(M,I,A,B,D) (void *)A /* dimensions of the array. */
+#define BYTEVVVVV_cfT(M,I,A,B,D) (void *)A /* i.e. Unfortunately, can't */
+#define BYTEVVVVVV_cfT(M,I,A,B,D) (void *)A /* check that the type */
+#define BYTEVVVVVVV_cfT(M,I,A,B,D) (void *)A /* matches the prototype. */
+#define DOUBLEVV_cfT(M,I,A,B,D) (void *)A
+#define DOUBLEVVV_cfT(M,I,A,B,D) (void *)A
+#define DOUBLEVVVV_cfT(M,I,A,B,D) (void *)A
+#define DOUBLEVVVVV_cfT(M,I,A,B,D) (void *)A
+#define DOUBLEVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define DOUBLEVVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define FLOATVV_cfT(M,I,A,B,D) (void *)A
+#define FLOATVVV_cfT(M,I,A,B,D) (void *)A
+#define FLOATVVVV_cfT(M,I,A,B,D) (void *)A
+#define FLOATVVVVV_cfT(M,I,A,B,D) (void *)A
+#define FLOATVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define FLOATVVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define INTVV_cfT(M,I,A,B,D) (void *)A
+#define INTVVV_cfT(M,I,A,B,D) (void *)A
+#define INTVVVV_cfT(M,I,A,B,D) (void *)A
+#define INTVVVVV_cfT(M,I,A,B,D) (void *)A
+#define INTVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define INTVVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define LOGICALVV_cfT(M,I,A,B,D) (void *)A
+#define LOGICALVVV_cfT(M,I,A,B,D) (void *)A
+#define LOGICALVVVV_cfT(M,I,A,B,D) (void *)A
+#define LOGICALVVVVV_cfT(M,I,A,B,D) (void *)A
+#define LOGICALVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define LOGICALVVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define LONGVV_cfT(M,I,A,B,D) (void *)A
+#define LONGVVV_cfT(M,I,A,B,D) (void *)A
+#define LONGVVVV_cfT(M,I,A,B,D) (void *)A
+#define LONGVVVVV_cfT(M,I,A,B,D) (void *)A
+#define LONGVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define LONGVVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define SHORTVV_cfT(M,I,A,B,D) (void *)A
+#define SHORTVVV_cfT(M,I,A,B,D) (void *)A
+#define SHORTVVVV_cfT(M,I,A,B,D) (void *)A
+#define SHORTVVVVV_cfT(M,I,A,B,D) (void *)A
+#define SHORTVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define SHORTVVVVVVV_cfT(M,I,A,B,D) (void *)A
+#define PBYTE_cfT(M,I,A,B,D) A
+#define PDOUBLE_cfT(M,I,A,B,D) A
+#define PFLOAT_cfT(M,I,A,B,D) VOIDP A
+#define PINT_cfT(M,I,A,B,D) A
+#define PLOGICAL_cfT(M,I,A,B,D) ((*A=F2CLOGICAL(*A)),A)
+#define PLONG_cfT(M,I,A,B,D) A
+#define PSHORT_cfT(M,I,A,B,D) A
+#define PVOID_cfT(M,I,A,B,D) A
+#if defined(apolloFortran) || defined(hpuxFortran800) || defined(AbsoftUNIXFortran)
+#define ROUTINE_cfT(M,I,A,B,D) _(ROUTINE_,I) (*A)
+#else
+#define ROUTINE_cfT(M,I,A,B,D) _(ROUTINE_,I) A
+#endif
+/* A == pointer to the characters
+ D == length of the string, or of an element in an array of strings
+ E == number of elements in an array of strings */
+#define TTSTR( A,B,D) \
+ ((B=_cf_malloc(D+1))[D]='\0', memcpy(B,A,D), kill_trailing(B,' '))
+#define TTTTSTR( A,B,D) (!(D<4||A[0]||A[1]||A[2]||A[3]))?NULL: \
+ memchr(A,'\0',D) ?A : TTSTR(A,B,D)
+#define TTTTSTRV( A,B,D,E) (_(B,N)=E,B=_cf_malloc(_(B,N)*(D+1)), (void *) \
+ vkill_trailing(f2cstrv(A,B,D+1, _(B,N)*(D+1)), D+1,_(B,N)*(D+1),' '))
+#ifdef vmsFortran
+#define STRING_cfT(M,I,A,B,D) TTTTSTR( A->dsc$a_pointer,B,A->dsc$w_length)
+#define STRINGV_cfT(M,I,A,B,D) TTTTSTRV(A->dsc$a_pointer, B, \
+ A->dsc$w_length , A->dsc$l_m[0])
+#define PSTRING_cfT(M,I,A,B,D) TTSTR( A->dsc$a_pointer,B,A->dsc$w_length)
+#define PPSTRING_cfT(M,I,A,B,D) A->dsc$a_pointer
+#else
+#ifdef CRAYFortran
+#define STRING_cfT(M,I,A,B,D) TTTTSTR( _fcdtocp(A),B,_fcdlen(A))
+#define STRINGV_cfT(M,I,A,B,D) TTTTSTRV(_fcdtocp(A),B,_fcdlen(A), \
+ num_elem(_fcdtocp(A),_fcdlen(A),_3(M,_STRV_A,I)))
+#define PSTRING_cfT(M,I,A,B,D) TTSTR( _fcdtocp(A),B,_fcdlen(A))
+#define PPSTRING_cfT(M,I,A,B,D) _fcdtocp(A)
+#else
+#define STRING_cfT(M,I,A,B,D) TTTTSTR( A,B,D)
+#define STRINGV_cfT(M,I,A,B,D) TTTTSTRV(A,B,D, num_elem(A,D,_3(M,_STRV_A,I)))
+#define PSTRING_cfT(M,I,A,B,D) TTSTR( A,B,D)
+#define PPSTRING_cfT(M,I,A,B,D) A
+#endif
+#endif
+#define PNSTRING_cfT(M,I,A,B,D) STRING_cfT(M,I,A,B,D)
+#define PSTRINGV_cfT(M,I,A,B,D) STRINGV_cfT(M,I,A,B,D)
+#define CF_0_cfT(M,I,A,B,D)
+
+#define RCF(TN,I) _(TN,_cfSTR)(3,R,_(A,I),_(B,I),_(C,I),0,0)
+#define DEFAULT_cfR(A,B,D)
+#define LOGICAL_cfR(A,B,D)
+#define PLOGICAL_cfR(A,B,D) *A=C2FLOGICAL(*A);
+#define STRING_cfR(A,B,D) if (B) _cf_free(B);
+#define STRINGV_cfR(A,B,D) _cf_free(B);
+/* A and D as defined above for TSTRING(V) */
+#define RRRRPSTR( A,B,D) if (B) memcpy(A,B, _cfMIN(strlen(B),D)), \
+ (D>strlen(B)?memset(A+strlen(B),' ', D-strlen(B)):0), _cf_free(B);
+#define RRRRPSTRV(A,B,D) c2fstrv(B,A,D+1,(D+1)*_(B,N)), _cf_free(B);
+#ifdef vmsFortran
+#define PSTRING_cfR(A,B,D) RRRRPSTR( A->dsc$a_pointer,B,A->dsc$w_length)
+#define PSTRINGV_cfR(A,B,D) RRRRPSTRV(A->dsc$a_pointer,B,A->dsc$w_length)
+#else
+#ifdef CRAYFortran
+#define PSTRING_cfR(A,B,D) RRRRPSTR( _fcdtocp(A),B,_fcdlen(A))
+#define PSTRINGV_cfR(A,B,D) RRRRPSTRV(_fcdtocp(A),B,_fcdlen(A))
+#else
+#define PSTRING_cfR(A,B,D) RRRRPSTR( A,B,D)
+#define PSTRINGV_cfR(A,B,D) RRRRPSTRV(A,B,D)
+#endif
+#endif
+#define PNSTRING_cfR(A,B,D) PSTRING_cfR(A,B,D)
+#define PPSTRING_cfR(A,B,D)
+
+#define BYTE_cfFZ(UN,LN) INTEGER_BYTE FCALLSC_QUALIFIER fcallsc(UN,LN)(
+#define DOUBLE_cfFZ(UN,LN) DOUBLE_PRECISION FCALLSC_QUALIFIER fcallsc(UN,LN)(
+#define INT_cfFZ(UN,LN) int FCALLSC_QUALIFIER fcallsc(UN,LN)(
+#define LOGICAL_cfFZ(UN,LN) int FCALLSC_QUALIFIER fcallsc(UN,LN)(
+#define LONG_cfFZ(UN,LN) long FCALLSC_QUALIFIER fcallsc(UN,LN)(
+#define SHORT_cfFZ(UN,LN) short FCALLSC_QUALIFIER fcallsc(UN,LN)(
+#define VOID_cfFZ(UN,LN) void FCALLSC_QUALIFIER fcallsc(UN,LN)(
+#ifndef __CF__KnR
+/* The void is req'd by the Apollo, to make this an ANSI function declaration.
+ The Apollo promotes K&R float functions to double. */
+#define FLOAT_cfFZ(UN,LN) FORTRAN_REAL FCALLSC_QUALIFIER fcallsc(UN,LN)(void
+#ifdef vmsFortran
+#define STRING_cfFZ(UN,LN) void FCALLSC_QUALIFIER fcallsc(UN,LN)(fstring *AS
+#else
+#ifdef CRAYFortran
+#define STRING_cfFZ(UN,LN) void FCALLSC_QUALIFIER fcallsc(UN,LN)(_fcd AS
+#else
+#if defined(AbsoftUNIXFortran) || defined(AbsoftProFortran)
+#define STRING_cfFZ(UN,LN) void FCALLSC_QUALIFIER fcallsc(UN,LN)(char *AS
+#else
+#define STRING_cfFZ(UN,LN) void FCALLSC_QUALIFIER fcallsc(UN,LN)(char *AS, unsigned D0
+#endif
+#endif
+#endif
+#else
+#if ! (defined(FLOATFUNCTIONTYPE)&&defined(ASSIGNFLOAT)&&defined(RETURNFLOAT))
+#define FLOAT_cfFZ(UN,LN) FORTRAN_REAL FCALLSC_QUALIFIER fcallsc(UN,LN)(
+#else
+#define FLOAT_cfFZ(UN,LN) FLOATFUNCTIONTYPE FCALLSC_QUALIFIER fcallsc(UN,LN)(
+#endif
+#if defined(vmsFortran) || defined(CRAYFortran) || defined(AbsoftUNIXFortran)
+#define STRING_cfFZ(UN,LN) void FCALLSC_QUALIFIER fcallsc(UN,LN)(AS
+#else
+#define STRING_cfFZ(UN,LN) void FCALLSC_QUALIFIER fcallsc(UN,LN)(AS, D0
+#endif
+#endif
+
+#define BYTE_cfF(UN,LN) BYTE_cfFZ(UN,LN)
+#define DOUBLE_cfF(UN,LN) DOUBLE_cfFZ(UN,LN)
+#ifndef __CF_KnR
+#define FLOAT_cfF(UN,LN) FORTRAN_REAL FCALLSC_QUALIFIER fcallsc(UN,LN)(
+#else
+#define FLOAT_cfF(UN,LN) FLOAT_cfFZ(UN,LN)
+#endif
+#define INT_cfF(UN,LN) INT_cfFZ(UN,LN)
+#define LOGICAL_cfF(UN,LN) LOGICAL_cfFZ(UN,LN)
+#define LONG_cfF(UN,LN) LONG_cfFZ(UN,LN)
+#define SHORT_cfF(UN,LN) SHORT_cfFZ(UN,LN)
+#define VOID_cfF(UN,LN) VOID_cfFZ(UN,LN)
+#define STRING_cfF(UN,LN) STRING_cfFZ(UN,LN),
+
+#define INT_cfFF
+#define VOID_cfFF
+#ifdef vmsFortran
+#define STRING_cfFF fstring *AS;
+#else
+#ifdef CRAYFortran
+#define STRING_cfFF _fcd AS;
+#else
+#define STRING_cfFF char *AS; unsigned D0;
+#endif
+#endif
+
+#define INT_cfL A0=
+#define STRING_cfL A0=
+#define VOID_cfL
+
+#define INT_cfK
+#define VOID_cfK
+/* KSTRING copies the string into the position provided by the caller. */
+#ifdef vmsFortran
+#define STRING_cfK \
+ memcpy(AS->dsc$a_pointer,A0,_cfMIN(AS->dsc$w_length,(A0==NULL?0:strlen(A0))));\
+ AS->dsc$w_length>(A0==NULL?0:strlen(A0))? \
+ memset(AS->dsc$a_pointer+(A0==NULL?0:strlen(A0)),' ', \
+ AS->dsc$w_length-(A0==NULL?0:strlen(A0))):0;
+#else
+#ifdef CRAYFortran
+#define STRING_cfK \
+ memcpy(_fcdtocp(AS),A0, _cfMIN(_fcdlen(AS),(A0==NULL?0:strlen(A0))) ); \
+ _fcdlen(AS)>(A0==NULL?0:strlen(A0))? \
+ memset(_fcdtocp(AS)+(A0==NULL?0:strlen(A0)),' ', \
+ _fcdlen(AS)-(A0==NULL?0:strlen(A0))):0;
+#else
+#define STRING_cfK memcpy(AS,A0, _cfMIN(D0,(A0==NULL?0:strlen(A0))) ); \
+ D0>(A0==NULL?0:strlen(A0))?memset(AS+(A0==NULL?0:strlen(A0)), \
+ ' ', D0-(A0==NULL?0:strlen(A0))):0;
+#endif
+#endif
+
+/* Note that K.. and I.. can't be combined since K.. has to access data before
+R.., in order for functions returning strings which are also passed in as
+arguments to work correctly. Note that R.. frees and hence may corrupt the
+string. */
+#define BYTE_cfI return A0;
+#define DOUBLE_cfI return A0;
+#if ! (defined(FLOATFUNCTIONTYPE)&&defined(ASSIGNFLOAT)&&defined(RETURNFLOAT))
+#define FLOAT_cfI return A0;
+#else
+#define FLOAT_cfI RETURNFLOAT(A0);
+#endif
+#define INT_cfI return A0;
+#ifdef hpuxFortran800
+/* Incredibly, functions must return true as 1, elsewhere .true.==0x01000000. */
+#define LOGICAL_cfI return ((A0)?1:0);
+#else
+#define LOGICAL_cfI return C2FLOGICAL(A0);
+#endif
+#define LONG_cfI return A0;
+#define SHORT_cfI return A0;
+#define STRING_cfI return ;
+#define VOID_cfI return ;
+
+#ifdef OLD_VAXC /* Allow %CC-I-PARAMNOTUSED. */
+#pragma standard
+#endif
+
+#define FCALLSCSUB0( CN,UN,LN) FCALLSCFUN0(VOID,CN,UN,LN)
+#define FCALLSCSUB1( CN,UN,LN,T1) FCALLSCFUN1(VOID,CN,UN,LN,T1)
+#define FCALLSCSUB2( CN,UN,LN,T1,T2) FCALLSCFUN2(VOID,CN,UN,LN,T1,T2)
+#define FCALLSCSUB3( CN,UN,LN,T1,T2,T3) FCALLSCFUN3(VOID,CN,UN,LN,T1,T2,T3)
+#define FCALLSCSUB4( CN,UN,LN,T1,T2,T3,T4) \
+ FCALLSCFUN4(VOID,CN,UN,LN,T1,T2,T3,T4)
+#define FCALLSCSUB5( CN,UN,LN,T1,T2,T3,T4,T5) \
+ FCALLSCFUN5(VOID,CN,UN,LN,T1,T2,T3,T4,T5)
+#define FCALLSCSUB6( CN,UN,LN,T1,T2,T3,T4,T5,T6) \
+ FCALLSCFUN6(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6)
+#define FCALLSCSUB7( CN,UN,LN,T1,T2,T3,T4,T5,T6,T7) \
+ FCALLSCFUN7(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7)
+#define FCALLSCSUB8( CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8) \
+ FCALLSCFUN8(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8)
+#define FCALLSCSUB9( CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9) \
+ FCALLSCFUN9(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9)
+#define FCALLSCSUB10(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA) \
+ FCALLSCFUN10(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA)
+#define FCALLSCSUB11(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB) \
+ FCALLSCFUN11(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB)
+#define FCALLSCSUB12(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC) \
+ FCALLSCFUN12(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC)
+#define FCALLSCSUB13(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD) \
+ FCALLSCFUN13(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD)
+#define FCALLSCSUB14(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ FCALLSCFUN14(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE)
+#define FCALLSCSUB15(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF) \
+ FCALLSCFUN15(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF)
+#define FCALLSCSUB16(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG) \
+ FCALLSCFUN16(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG)
+#define FCALLSCSUB17(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH) \
+ FCALLSCFUN17(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH)
+#define FCALLSCSUB18(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI) \
+ FCALLSCFUN18(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI)
+#define FCALLSCSUB19(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ) \
+ FCALLSCFUN19(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ)
+#define FCALLSCSUB20(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK) \
+ FCALLSCFUN20(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK)
+#define FCALLSCSUB21(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL) \
+ FCALLSCFUN21(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL)
+#define FCALLSCSUB22(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM) \
+ FCALLSCFUN22(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM)
+#define FCALLSCSUB23(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN) \
+ FCALLSCFUN23(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN)
+#define FCALLSCSUB24(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO) \
+ FCALLSCFUN24(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO)
+#define FCALLSCSUB25(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP) \
+ FCALLSCFUN25(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP)
+#define FCALLSCSUB26(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ) \
+ FCALLSCFUN26(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ)
+#define FCALLSCSUB27(CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ FCALLSCFUN27(VOID,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR)
+
+
+#define FCALLSCFUN1( T0,CN,UN,LN,T1) \
+ FCALLSCFUN5 (T0,CN,UN,LN,T1,CF_0,CF_0,CF_0,CF_0)
+#define FCALLSCFUN2( T0,CN,UN,LN,T1,T2) \
+ FCALLSCFUN5 (T0,CN,UN,LN,T1,T2,CF_0,CF_0,CF_0)
+#define FCALLSCFUN3( T0,CN,UN,LN,T1,T2,T3) \
+ FCALLSCFUN5 (T0,CN,UN,LN,T1,T2,T3,CF_0,CF_0)
+#define FCALLSCFUN4( T0,CN,UN,LN,T1,T2,T3,T4) \
+ FCALLSCFUN5 (T0,CN,UN,LN,T1,T2,T3,T4,CF_0)
+#define FCALLSCFUN5( T0,CN,UN,LN,T1,T2,T3,T4,T5) \
+ FCALLSCFUN10(T0,CN,UN,LN,T1,T2,T3,T4,T5,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define FCALLSCFUN6( T0,CN,UN,LN,T1,T2,T3,T4,T5,T6) \
+ FCALLSCFUN10(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,CF_0,CF_0,CF_0,CF_0)
+#define FCALLSCFUN7( T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7) \
+ FCALLSCFUN10(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,CF_0,CF_0,CF_0)
+#define FCALLSCFUN8( T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8) \
+ FCALLSCFUN10(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,CF_0,CF_0)
+#define FCALLSCFUN9( T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9) \
+ FCALLSCFUN10(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,CF_0)
+#define FCALLSCFUN10(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA) \
+ FCALLSCFUN14(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,CF_0,CF_0,CF_0,CF_0)
+#define FCALLSCFUN11(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB) \
+ FCALLSCFUN14(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,CF_0,CF_0,CF_0)
+#define FCALLSCFUN12(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC) \
+ FCALLSCFUN14(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,CF_0,CF_0)
+#define FCALLSCFUN13(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD) \
+ FCALLSCFUN14(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,CF_0)
+
+
+#define FCALLSCFUN15(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF) \
+ FCALLSCFUN20(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define FCALLSCFUN16(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG) \
+ FCALLSCFUN20(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,CF_0,CF_0,CF_0,CF_0)
+#define FCALLSCFUN17(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH) \
+ FCALLSCFUN20(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,CF_0,CF_0,CF_0)
+#define FCALLSCFUN18(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI) \
+ FCALLSCFUN20(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,CF_0,CF_0)
+#define FCALLSCFUN19(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ) \
+ FCALLSCFUN20(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,CF_0)
+#define FCALLSCFUN20(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK) \
+ FCALLSCFUN27(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define FCALLSCFUN21(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL) \
+ FCALLSCFUN27(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,CF_0,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define FCALLSCFUN22(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM) \
+ FCALLSCFUN27(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,CF_0,CF_0,CF_0,CF_0,CF_0)
+#define FCALLSCFUN23(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN) \
+ FCALLSCFUN27(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,CF_0,CF_0,CF_0,CF_0)
+#define FCALLSCFUN24(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO) \
+ FCALLSCFUN27(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,CF_0,CF_0,CF_0)
+#define FCALLSCFUN25(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP) \
+ FCALLSCFUN27(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,CF_0,CF_0)
+#define FCALLSCFUN26(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ) \
+ FCALLSCFUN27(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,CF_0)
+
+
+#ifndef __CF__KnR
+#define FCALLSCFUN0(T0,CN,UN,LN) CFextern _(T0,_cfFZ)(UN,LN) ABSOFT_cf2(T0)) \
+ {_Icf(2,UU,T0,A0,0); _Icf(0,L,T0,0,0) CN(); _Icf(0,K,T0,0,0) _(T0,_cfI)}
+
+#define FCALLSCFUN14(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ CFextern _(T0,_cfF)(UN,LN) \
+ CFARGT14(NCF,DCF,ABSOFT_cf2(T0),T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) ) \
+ { CFARGT14S(QCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ _Icf(2,UU,T0,A0,0); _Icf(0,L,T0,0,0) CN( TCF(LN,T1,1,0) TCF(LN,T2,2,1) \
+ TCF(LN,T3,3,1) TCF(LN,T4,4,1) TCF(LN,T5,5,1) TCF(LN,T6,6,1) TCF(LN,T7,7,1) \
+ TCF(LN,T8,8,1) TCF(LN,T9,9,1) TCF(LN,TA,10,1) TCF(LN,TB,11,1) TCF(LN,TC,12,1) \
+ TCF(LN,TD,13,1) TCF(LN,TE,14,1) ); _Icf(0,K,T0,0,0) \
+ CFARGT14S(RCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) _(T0,_cfI) }
+
+#define FCALLSCFUN27(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ CFextern _(T0,_cfF)(UN,LN) \
+ CFARGT27(NCF,DCF,ABSOFT_cf2(T0),T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) ) \
+ { CFARGT27S(QCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ _Icf(2,UU,T0,A0,0); _Icf(0,L,T0,0,0) CN( TCF(LN,T1,1,0) TCF(LN,T2,2,1) \
+ TCF(LN,T3,3,1) TCF(LN,T4,4,1) TCF(LN,T5,5,1) TCF(LN,T6,6,1) TCF(LN,T7,7,1) \
+ TCF(LN,T8,8,1) TCF(LN,T9,9,1) TCF(LN,TA,10,1) TCF(LN,TB,11,1) TCF(LN,TC,12,1) \
+ TCF(LN,TD,13,1) TCF(LN,TE,14,1) TCF(LN,TF,15,1) TCF(LN,TG,16,1) TCF(LN,TH,17,1) \
+ TCF(LN,TI,18,1) TCF(LN,TJ,19,1) TCF(LN,TK,20,1) TCF(LN,TL,21,1) TCF(LN,TM,22,1) \
+ TCF(LN,TN,23,1) TCF(LN,TO,24,1) TCF(LN,TP,25,1) TCF(LN,TQ,26,1) TCF(LN,TR,27,1) ); _Icf(0,K,T0,0,0) \
+ CFARGT27S(RCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) _(T0,_cfI) }
+
+#else
+#define FCALLSCFUN0(T0,CN,UN,LN) CFextern _(T0,_cfFZ)(UN,LN) ABSOFT_cf3(T0)) _Icf(0,FF,T0,0,0)\
+ {_Icf(2,UU,T0,A0,0); _Icf(0,L,T0,0,0) CN(); _Icf(0,K,T0,0,0) _(T0,_cfI)}
+
+#define FCALLSCFUN14(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ CFextern _(T0,_cfF)(UN,LN) \
+ CFARGT14(NNCF,DDCF,ABSOFT_cf3(T0),T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE)) _Icf(0,FF,T0,0,0) \
+ CFARGT14FS(NNNCF,DDDCF,_Z,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE); \
+ { CFARGT14S(QCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) \
+ _Icf(2,UU,T0,A0,0); _Icf(0,L,T0,0,0) CN( TCF(LN,T1,1,0) TCF(LN,T2,2,1) \
+ TCF(LN,T3,3,1) TCF(LN,T4,4,1) TCF(LN,T5,5,1) TCF(LN,T6,6,1) TCF(LN,T7,7,1) \
+ TCF(LN,T8,8,1) TCF(LN,T9,9,1) TCF(LN,TA,10,1) TCF(LN,TB,11,1) TCF(LN,TC,12,1) \
+ TCF(LN,TD,13,1) TCF(LN,TE,14,1) ); _Icf(0,K,T0,0,0) \
+ CFARGT14S(RCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE) _(T0,_cfI)}
+
+#define FCALLSCFUN27(T0,CN,UN,LN,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ CFextern _(T0,_cfF)(UN,LN) \
+ CFARGT27(NNCF,DDCF,ABSOFT_cf3(T0),T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR)) _Icf(0,FF,T0,0,0) \
+ CFARGT27FS(NNNCF,DDDCF,_Z,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR); \
+ { CFARGT27S(QCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) \
+ _Icf(2,UU,T0,A0,0); _Icf(0,L,T0,0,0) CN( TCF(LN,T1,1,0) TCF(LN,T2,2,1) \
+ TCF(LN,T3,3,1) TCF(LN,T4,4,1) TCF(LN,T5,5,1) TCF(LN,T6,6,1) TCF(LN,T7,7,1) \
+ TCF(LN,T8,8,1) TCF(LN,T9,9,1) TCF(LN,TA,10,1) TCF(LN,TB,11,1) TCF(LN,TC,12,1) \
+ TCF(LN,TD,13,1) TCF(LN,TE,14,1) TCF(LN,TF,15,1) TCF(LN,TG,16,1) TCF(LN,TH,17,1) \
+ TCF(LN,TI,18,1) TCF(LN,TJ,19,1) TCF(LN,TK,20,1) TCF(LN,TL,21,1) TCF(LN,TM,22,1) \
+ TCF(LN,TN,23,1) TCF(LN,TO,24,1) TCF(LN,TP,25,1) TCF(LN,TQ,26,1) TCF(LN,TR,27,1) ); _Icf(0,K,T0,0,0) \
+ CFARGT27S(RCF,T1,T2,T3,T4,T5,T6,T7,T8,T9,TA,TB,TC,TD,TE,TF,TG,TH,TI,TJ,TK,TL,TM,TN,TO,TP,TQ,TR) _(T0,_cfI)}
+
+#endif
+
+
+#endif /* __CFORTRAN_LOADED */
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_commandline.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_commandline.F90
new file mode 100644
index 0000000000..d9fdd863f7
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_commandline.F90
@@ -0,0 +1,162 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_commandline.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#include "config.inc"
+
+#ifdef HAVE_2003ARGS
+#define NARGS command_argument_count()
+#define GETARG get_command_argument
+#else
+#define NARGS iargc
+#define GETARG getarg
+#endif
+
+!> parsing common command line arguments
+module glimmer_commandline
+
+ use glimmer_global, only:fname_length
+
+ implicit none
+
+ character(len=5000) :: commandline_history !< complete command line
+ character(len=fname_length) :: commandline_configname !< name of the configuration file
+ character(len=fname_length) :: commandline_resultsname !< name of results file
+
+contains
+
+ !> get the command line and parse it
+ !!
+ !! \author Magnus Hagdorn
+ !! \date April 2009
+ subroutine glimmer_GetCommandline()
+ use parallel, only: main_task
+ implicit none
+
+ integer numargs,nfiles
+ integer :: i
+#ifndef HAVE_2003ARGS
+ integer, external :: iargc
+#endif
+ character(len=100) :: argument
+ integer, dimension(100) :: argumentIdx
+
+ ! defaults
+ commandline_resultsname = 'results'
+
+ if (main_task) then
+ ! get number of arguments and file names
+ numargs = NARGS
+ ! reconstruct command line to store commandline_history
+ call GETARG(0,commandline_history)
+ do i=1,numargs
+ call GETARG(i,argument)
+ commandline_history = trim(commandline_history)//" "//trim(argument)
+ end do
+
+ if (numargs > 0) then
+ i=0
+ nfiles = 0
+ ! loop over command line arguments
+ do while (i < numargs)
+ i = i + 1
+ call GETARG(i,argument)
+ ! check if it is an option
+ if (argument(1:1) == '-') then
+ select case (trim(argument))
+ case ('-h')
+ call glimmer_commandlineHelp()
+ stop
+ case ('-r')
+ i = i+1
+ if (i > numargs) then
+ write(*,*) 'Error, expect name of output file to follow -o option'
+ call glimmer_commandlineHelp()
+ stop
+ end if
+ call GETARG(i,commandline_resultsname)
+ case default
+ write(*,*) 'Unkown option ',trim(argument)
+ call glimmer_commandlineHelp()
+ stop
+ end select
+ else
+ ! it's not an option
+ nfiles = nfiles+1
+ argumentIdx(nfiles) = i
+ end if
+ end do
+ if (nfiles > 0) then
+ call GETARG(argumentIdx(1),commandline_configname)
+ else
+ write(*,*) 'Need at least one argument'
+ call glimmer_commandlineHelp()
+ stop
+ end if
+ else
+ write(*,*) 'Enter name of GLIDE configuration file to be read'
+ read(*,'(a)') commandline_configname
+ ! commandline_configname = 'hump.config'
+ end if ! numargs > 0
+ end if ! main_task
+
+ end subroutine glimmer_GetCommandline
+
+ !> print out command line
+ !!
+ !! \author Magnus Hagdorn
+ !! \date April 2009
+ subroutine glimmer_PrintCommandline()
+ use parallel, only: main_task
+ implicit none
+
+ if (main_task) then
+ write(*,*) 'Entire commandline'
+ write(*,*) trim(commandline_history)
+ write(*,*)
+ write(*,*) 'commandline_configname: ', trim(commandline_configname)
+ write(*,*) 'commandline_resultsname: ', trim(commandline_resultsname)
+ endif
+ end subroutine glimmer_PrintCommandline
+
+ !> print help message
+ !!
+ !! \author Magnus Hagdorn
+ !! \date April 2009
+ subroutine glimmer_commandlineHelp()
+ use parallel, only: main_task
+ implicit none
+ character(len=500) :: pname
+
+ call GETARG(0,pname)
+
+ if (main_task) then
+ write(*,*) 'Usage: ',trim(pname),' [options] cfgname'
+ write(*,*) 'where [options] are'
+ write(*,*) ' -h: this message'
+ write(*,*) ' -r : the name of the results file (default: results)'
+ endif
+ end subroutine glimmer_commandlineHelp
+end module glimmer_commandline
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_config.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_config.F90
new file mode 100644
index 0000000000..b271c0e92c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_config.F90
@@ -0,0 +1,943 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_config.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> configuration file parser
+!!
+!! \author Magnus Hagdorn
+!! \date May 2004
+!!
+!! procedures used to parse configuration files. The file syntax is similar to
+!! MS Windows style ini files or files that can be parsed using the Python
+!! configuration file parser module.
+!!
+!! The file is split up into sections. Each section appears in [] brackets.
+!! Each section can contain a number of key, value pairs. Key, value pairs are
+!! separated by : or =.
+!!
+!! Strings starting with any of the following characters are ignored
+!! (comments): !, # or ;
+!!
+!! The sections are stored in a linked list. The key-value pairs of each section
+!! are also stored in linked lists. The module provides accessors to query the
+!! data structure.
+module glimmer_config
+
+ use glimmer_global, only : sp, dp, msg_length
+ use glimmer_log
+
+ implicit none
+
+ private :: handle_section, handle_value, InsertSection, InsertValue, dp
+
+ integer, parameter :: namelen=50 !< the maximum length of key or section
+ integer, parameter :: valuelen=400 !< the maximum length of a value
+ integer, parameter :: linelen=valuelen+namelen+1 !< the maximum length of a line
+
+ !> derived type defining a key-value pair
+ type ConfigValue
+ character(len=namelen) :: name = '' !< the key
+ character(len=valuelen) :: value !< the value
+ type(ConfigValue), pointer :: next=>NULL() !< pointer to the next key-value pair
+ end type ConfigValue
+
+ !> derived type defining a configuration section
+ type ConfigSection
+ character(len=namelen) :: name = '' !< the section name
+ logical :: used = .false. !< flag used to check if section is used
+ type(ConfigValue), pointer :: values=>NULL() !< pointer to the first key-value pair
+ type(ConfigSection), pointer :: next=>NULL() !< pointer to the next section
+ end type ConfigSection
+
+ !> This type exists so that we can have
+ !! arrays of config data, since f90 doesn't
+ !! allow arrays of pointers
+ type ConfigData
+ type(ConfigSection), pointer :: config=>null()
+ end type ConfigData
+
+ !> generic interface for the get accessor
+ interface GetValue
+ module procedure GetValueDouble, GetValueReal, GetValueInt, GetValueChar, GetValueLogical, &
+ GetValueDoubleArray, GetValueRealArray, GetValueIntArray, GetValueCharArray
+ end interface
+
+ !> generic interface for the set accessor
+ interface ConfigSetValue
+ module procedure ConfigSetValueData, ConfigSetValueSec
+ end interface
+
+ !> generic interface for the combine procedure
+ interface ConfigCombine
+ module procedure ConfigCombineData, ConfigCombineSec, ConfigCombineDataSec, ConfigCombineSecData
+ end interface
+
+contains
+
+ !> read a configuration file
+ subroutine ConfigRead(fname,config,fileunit)
+ !> read configuration file
+ use parallel
+ use glimmer_log
+ implicit none
+
+ character(len=*), intent(in) :: fname !< the name of the file to be read
+ type(ConfigSection), pointer :: config !< on return this pointer will point to the first section
+ integer, optional,intent(in) :: fileunit !< if supplied, open this unit
+
+ ! local variables
+ type(ConfigSection), pointer :: this_section
+ type(ConfigValue), pointer :: this_value
+ logical there
+ integer unit,ios,linenr
+ character(len=linelen) :: line
+ character(len=msg_length) :: message
+
+ if (main_task) inquire (exist=there,file=fname)
+ call broadcast(there)
+ if (.not.there) then
+ call write_log('Cannot open configuration file '//trim(fname),GM_FATAL)
+ end if
+
+ unit = 99
+ if (present(fileunit)) then
+ unit = fileunit
+ endif
+
+ if (main_task) open(unit,file=trim(fname),status='old')
+ ios=0
+ linenr=0
+ config=>NULL()
+ this_section=>NULL()
+ do while(ios == 0)
+ if (main_task) read(unit,fmt='(a450)',iostat=ios) line
+ call broadcast(line)
+ call broadcast(ios)
+ line = adjustl(line)
+ if (ios /= 0) then
+ exit
+ end if
+ if (.not.(line(1:1) == '!' .or. line(1:1) == '#' .or. line(1:1) == ';' .or. line(1:1) == ' ')) then
+ ! handle comments
+ if (line(1:1) == '[') then
+ ! new section
+ call handle_section(linenr,line,this_section)
+ this_value=>NULL()
+ if (.not.associated(config)) then
+ ! this is the first section in config file
+ config=>this_section
+ end if
+ else
+ ! handle value
+ if (.not.associated(this_section)) then
+ call write_log('No section defined yet',GM_ERROR)
+ write(message,*) trim(adjustl(fname)), linenr
+ call write_log(message,GM_FATAL)
+ end if
+ call handle_value(linenr,line,this_value)
+ if (.not.associated(this_section%values)) then
+ this_section%values => this_value
+ end if
+ end if
+ end if
+ linenr = linenr + 1
+ end do
+ if (main_task) close(unit)
+ return
+
+ end subroutine ConfigRead
+
+ !> print contents of file
+ subroutine PrintConfig(config)
+ implicit none
+ type(ConfigSection), pointer :: config !< pointer to the first section to be printed
+
+ type(ConfigSection), pointer :: sec
+ type(ConfigValue), pointer :: val
+
+ sec=>config
+ do while(associated(sec))
+ write(*,*) sec%name
+ val=>sec%values
+ do while(associated(val))
+ write(*,*) ' ',trim(val%name),' == ', trim(val%value)
+ val=>val%next
+ end do
+ write(*,*)
+ sec=>sec%next
+ end do
+ end subroutine PrintConfig
+
+ !> serialise config data structure to string
+ !! \author Ian Rutt
+ subroutine ConfigAsString(config,string)
+ use glimmer_global, only: endline
+ implicit none
+ type(ConfigSection), pointer :: config !< pointer to first section
+ character(*),intent(out) :: string !< on completion this string will hold the conents of the config data structure
+
+ type(ConfigSection), pointer :: sec
+ type(ConfigValue), pointer :: val
+
+ string=''
+
+ sec=>config
+ do while(associated(sec))
+ string=trim(string)//'['//trim(sec%name)//']'//trim(endline)
+ val=>sec%values
+ do while(associated(val))
+ string=trim(string)//trim(val%name)//': '//trim(val%value)//trim(endline)
+ val=>val%next
+ end do
+ sec=>sec%next
+ end do
+ end subroutine ConfigAsString
+
+ !> Either overwrite a given key-value pair,
+ !! or create a new one
+ !! \author Ian Rutt
+ subroutine ConfigSetValueData(config,secname,valname,value,tag)
+
+ type(ConfigData) :: config !<
+ character(len=*), intent(in) :: secname !< name of the section
+ character(len=*), intent(in) :: valname !< name of the key
+ character(len=*), intent(in) :: value !< the value
+ character(len=*), intent(in), optional :: tag !< an identifier used to distinguish sections that occur a number of times,e.g. [CF output]
+
+ call ConfigSetValueSec(config%config,secname,valname,value,tag)
+
+ end subroutine ConfigSetValueData
+
+ !> Either overwrite a given key-value pair,
+ !! or create a new one
+ !! \author Ian Rutt
+ subroutine ConfigSetValueSec(config,secname,valname,value,tag)
+
+ type(ConfigSection), pointer :: config !< pointer to the first section
+ character(len=*), intent(in) :: secname !< name of the section
+ character(len=*), intent(in) :: valname !< name of the key
+ character(len=*), intent(in) :: value !< the value
+ character(len=*), intent(in), optional :: tag !< an identifier used to distinguish sections that occur a number of times,e.g. [CF output]
+
+ type(ConfigSection), pointer :: found
+ type(ConfigSection), pointer :: newsec
+ type(ConfigValue), pointer :: val
+ type(ConfigValue), pointer :: newval
+ type(ConfigValue), pointer :: newtag
+ logical :: tagflag
+
+ ! Find or create correct section
+
+ if (.not.associated(config)) allocate(config)
+
+ found=>config
+ do
+ if (associated(found)) then
+ if (present(tag)) then
+ tagflag=ConfigSectionHasTag(found,tag)
+ else
+ tagflag=.true.
+ end if
+ if ((trim(secname)==trim(found%name)).and.tagflag) then
+ exit
+ else
+ if (associated(found%next)) then
+ found=>found%next
+ else
+ allocate(newsec)
+ found%next=>newsec
+ found=>found%next
+ found%name=trim(secname)
+ if (present(tag)) then
+ allocate(newtag)
+ newtag%name='tag'
+ newtag%value=trim(tag)
+ found%values=>newtag
+ end if
+ exit
+ end if
+ end if
+ else
+ exit
+ end if
+ end do
+
+ ! Add or create key-value pair
+
+ if (.not.associated(found%values)) then
+ allocate(newval)
+ found%values=>newval
+ found%values%name=valname
+ found%values%value=value
+ else
+ val=>found%values
+ do
+ if (trim(valname)==trim(val%name)) then
+ val%value=value
+ exit
+ else
+ if (associated(val%next)) then
+ val=>val%next
+ else
+ allocate(newval)
+ val%next=>newval
+ val%next%name=valname
+ val%next%value=value
+ exit
+ end if
+ end if
+ end do
+ end if
+
+ end subroutine ConfigSetValueSec
+
+ !> Add the contents of config2 to config1, overwriting if necessary
+ !! \author Ian Rutt
+ subroutine ConfigCombineDataSec(config1,config2)
+
+ type(ConfigData) :: config1
+ type(ConfigSection),pointer :: config2
+
+ call ConfigCombineSec(config1%config,config2)
+
+ end subroutine ConfigCombineDataSec
+
+ !> Add the contents of config2 to config1, overwriting if necessary
+ !! \author Ian Rutt
+ subroutine ConfigCombineSecData(config1,config2)
+
+ type(ConfigSection),pointer :: config1
+ type(ConfigData) :: config2
+
+ call ConfigCombineSec(config1,config2%config)
+
+ end subroutine ConfigCombineSecData
+
+
+ !> Add the contents of config2 to config1, overwriting if necessary
+ !! \author Ian Rutt
+ subroutine ConfigCombineData(config1,config2)
+
+ type(ConfigData) :: config1
+ type(ConfigData) :: config2
+
+ call ConfigCombineSec(config1%config,config2%config)
+
+ end subroutine ConfigCombineData
+
+ !> Add the contents of config2 to config1, overwriting if necessary
+ !! \author Ian Rutt
+ subroutine ConfigCombineSec(config1,config2)
+
+ type(ConfigSection), pointer :: config1
+ type(ConfigSection), pointer :: config2
+
+ type(ConfigSection), pointer :: thissec
+ type(ConfigValue), pointer :: thisval
+ character(namelen) :: thisname
+
+ character(150) :: tag
+
+ thissec=>config2
+ do
+ if (associated(thissec)) then
+ thisval=>thissec%values
+ thisname=trim(thissec%name)
+ do
+ if (associated(thisval)) then
+ if (ConfigSectionHasValue(thissec,'tag',tag)) then
+ call ConfigSetValue(config1,thisname,trim(thisval%name),trim(thisval%value),tag=tag)
+ else
+ call ConfigSetValue(config1,thisname,trim(thisval%name),trim(thisval%value))
+ end if
+ thisval=>thisval%next
+ else
+ exit
+ end if
+ end do
+ thissec=>thissec%next
+ else
+ exit
+ end if
+ end do
+
+ end subroutine ConfigCombineSec
+
+ !> check if section has specified tag
+ !! \author Ian Rutt
+ !!
+ !! a tag is jus a special key value pair
+ logical function ConfigSectionHasTag(section,tag)
+
+ type(ConfigSection), pointer :: section !< pointer to section
+ character(len=*),intent(in) :: tag !< the name of the tag
+ character(200) :: testtag
+
+ ConfigSectionHasTag=.false.
+ if (ConfigSectionHasValue(section,'tag',testtag)) then
+ if (trim(tag)==trim(testtag)) then
+ ConfigSectionHasTag=.true.
+ end if
+ end if
+
+ end function ConfigSectionhasTag
+
+ !> check if section has a particular key-value pair
+ !! \author Ian Rutt
+ logical function ConfigSectionHasValue(section,valname,val)
+
+ type(ConfigSection), pointer :: section !< pointer to the section to be checked
+ type(ConfigValue), pointer :: thisval
+ character(len=*), intent(in) :: valname !< the name of the key
+ character(len=*), intent(inout) :: val !< the value
+
+ ConfigSectionHasValue=.false.
+ val=''
+
+ if (.not.associated(section)) return
+
+ thisval=>section%values
+ do
+ if (.not.associated(thisval)) exit
+ if (trim(valname)==trim(thisval%name)) then
+ val=trim(thisval%value)
+ ConfigSectionHasValue=.true.
+ exit
+ else
+ thisval=>thisval%next
+ end if
+ end do
+
+ end function ConfigSectionHasValue
+
+ !> find a return section
+ !! \author Magnus Hagdorn
+ subroutine GetSection(config,found,name)
+ implicit none
+ type(ConfigSection), pointer :: config !< pointer to the first section
+ type(ConfigSection), pointer :: found
+ character(len=*),intent(in) :: name !< the name of the section to be found
+
+ found=>config
+ do while(associated(found))
+ if (name == trim(found%name)) then
+ found%used = .true.
+ return
+ end if
+ found=>found%next
+ end do
+ end subroutine GetSection
+
+ !> traverse linked list and check that all sections have been used
+ subroutine CheckSections(config)
+ use glimmer_log
+ implicit none
+ type(ConfigSection), pointer :: config
+
+ ! local variables
+ type(ConfigSection), pointer :: cf
+
+ cf=>config
+ do while(associated(cf))
+ if (.not.cf%used) then
+ call write_log('Unused section: '//trim(cf%name),GM_WARNING)
+ end if
+ cf=>cf%next
+ end do
+ end subroutine CheckSections
+
+ !> get double array value
+ subroutine GetValueDoubleArray(section,name,val,numval)
+ use glimmer_log
+ implicit none
+ type(ConfigSection), pointer :: section !< the section from which the value is loaded
+ character(len=*),intent(in) :: name !< the name of the key
+ real(dp), pointer, dimension(:) :: val !< on exit this will hold the values
+ integer,intent(in), optional :: numval !< maximum number of values to be read
+
+ ! local variables
+ character(len=valuelen) :: value,tmp
+ real(dp), dimension(:),allocatable :: tempval
+ integer i,numv,inds,indc,ind
+
+ if (present(numval)) then
+ numv=numval
+ else
+ numv=100
+ end if
+ allocate(tempval(numv))
+ value=''
+ call GetValueChar(section,name,value)
+ if (value == '') return
+
+ i=1
+ do
+ inds=index(value,' ') ; indc=index(value,',')
+ if (inds==0.and.indc==0) then
+ exit
+ else if (inds==1.or.indc==1) then
+ value=value(2:)
+ cycle
+ else if (inds==0) then
+ ind=indc
+ else if (indc==0) then
+ ind=inds
+ else
+ ind=min(inds,indc)
+ end if
+ tmp=value(1:ind-1)
+ read(tmp,*,err=10)tempval(i)
+ value=value(ind+1:)
+ if (trim(value)=='') exit
+ i=i+1
+ end do
+ if (i >= 1) then
+ if (associated(val)) then
+ deallocate(val)
+ end if
+ allocate(val(i))
+ val = tempval(1:i)
+ end if
+ return
+
+10 call write_log('Array error in config file - check syntax',GM_FATAL)
+
+ end subroutine GetValueDoubleArray
+
+ !> get real array value
+ subroutine GetValueRealArray(section,name,val,numval)
+
+ use glimmer_log
+ implicit none
+
+ type(ConfigSection), pointer :: section !< the section from which the value is loaded
+ character(len=*),intent(in) :: name !< the name of the key
+ real(sp), pointer, dimension(:) :: val !< on exit this will hold the values
+ integer,intent(in), optional :: numval !< maximum number of values to be read
+
+ ! local variables
+ character(len=valuelen) :: value,tmp
+ real(sp), dimension(:),allocatable :: tempval
+ integer i,numv,inds,indc,ind
+
+ if (present(numval)) then
+ numv=numval
+ else
+ numv=100
+ end if
+ allocate(tempval(numv))
+ value=''
+ call GetValueChar(section,name,value)
+ if (value == '') return
+
+ i=1
+ do
+ inds=index(value,' ') ; indc=index(value,',')
+ if (inds==0.and.indc==0) then
+ exit
+ else if (inds==1.or.indc==1) then
+ value=value(2:)
+ cycle
+ else if (inds==0) then
+ ind=indc
+ else if (indc==0) then
+ ind=inds
+ else
+ ind=min(inds,indc)
+ end if
+ tmp=value(1:ind-1)
+ read(tmp,*,err=10)tempval(i)
+ value=value(ind+1:)
+ if (trim(value)=='') exit
+ i=i+1
+ end do
+
+ if (i >= 1) then
+ if (associated(val)) then
+ deallocate(val)
+ end if
+ allocate(val(i))
+ val = tempval(1:i)
+ end if
+ return
+
+10 call write_log('Array error in config file - check syntax',GM_FATAL)
+
+ end subroutine GetValueRealArray
+
+ !> get integer value array
+ subroutine GetValueIntArray(section,name,val,numval)
+ !> get integer array value
+ use glimmer_log
+ implicit none
+ type(ConfigSection), pointer :: section !< the section from which the value is loaded
+ character(len=*),intent(in) :: name !< the name of the key
+ integer, pointer, dimension(:) :: val !< on exit this will hold the value
+ integer,intent(in), optional :: numval !< maximum number of values to be read
+
+ ! local variables
+ character(len=valuelen) :: value,tmp
+ integer, dimension(:),allocatable :: tempval
+ integer i,numv,inds,indc,ind
+
+ if (present(numval)) then
+ numv=numval
+ else
+ numv=100
+ end if
+ allocate(tempval(numv))
+ value=''
+ call GetValueChar(section,name,value)
+ if (value == '') return
+
+ i=1
+ do
+ inds=index(value,' ') ; indc=index(value,',')
+ if (inds==0.and.indc==0) then
+ exit
+ else if (inds==1.or.indc==1) then
+ value=value(2:)
+ cycle
+ else if (inds==0) then
+ ind=indc
+ else if (indc==0) then
+ ind=inds
+ else
+ ind=min(inds,indc)
+ end if
+ tmp=value(1:ind-1)
+ read(tmp,*,err=10)tempval(i)
+ value=value(ind+1:)
+ if (trim(value)=='') exit
+ i=i+1
+ end do
+
+ if (i >= 1) then
+ if (associated(val)) then
+ deallocate(val)
+ end if
+ allocate(val(i))
+ val = tempval(1:i)
+ end if
+ return
+
+10 call write_log('Array error in config file - check syntax',GM_FATAL)
+
+ end subroutine GetValueIntArray
+
+ !> get character array value
+ subroutine GetValueCharArray(section,name,val,numval)
+ use glimmer_log
+ implicit none
+ type(ConfigSection), pointer :: section !< the section from which the value is loaded
+ character(len=*),intent(in) :: name !< the name of the key
+ character(len=80), pointer, dimension(:) :: val !< on exit this will hold the values
+ integer,intent(in), optional :: numval !< maximum number of values to be read
+
+ ! local variables
+ character(len=valuelen) :: value
+ character(80), dimension(:),allocatable :: tempval
+ integer i,numv,inds,indc,ind
+
+ if (present(numval)) then
+ numv=numval
+ else
+ numv=100
+ end if
+ allocate(tempval(numv))
+ value=''
+ call GetValueChar(section,name,value)
+ if (value == '') return
+
+ i=1
+ do
+ inds=index(value,' ') ; indc=index(value,',')
+ if (inds==0.and.indc==0) then
+ exit
+ else if (inds==1.or.indc==1) then
+ value=value(2:)
+ cycle
+ else if (inds==0) then
+ ind=indc
+ else if (indc==0) then
+ ind=inds
+ else
+ ind=min(inds,indc)
+ end if
+ tempval(i)=value(1:ind-1)
+ value=value(ind+1:)
+ if (trim(value)=='') exit
+ i=i+1
+ end do
+
+ if (i >= 1) then
+ if (associated(val)) then
+ deallocate(val)
+ end if
+ allocate(val(i))
+ val = tempval(1:i)
+ end if
+ return
+
+10 call write_log('Array error in config file - check syntax',GM_FATAL)
+
+ end subroutine GetValueCharArray
+
+ !> get real value
+ subroutine GetValueReal(section,name,val)
+ implicit none
+ type(ConfigSection), pointer :: section !< the section from which the value is loaded
+ character(len=*),intent(in) :: name !< the name of the key
+ real(sp), intent(inout) :: val !< the value
+
+ ! local variables
+ character(len=valuelen) :: value
+ real(sp) :: temp
+ integer ios
+
+ value=''
+ call GetValueChar(section,name,value)
+
+ read(value,*,iostat=ios) temp
+ if (ios==0) then
+ val = temp
+ elseif (ios > 0) then
+ call write_log('Value for "' // trim( name) // '" specified in .config file was not used because of a read error (e.g. wrong data type used). Default value has been used instead.', GM_WARNING)
+ end if
+ end subroutine GetValueReal
+
+ !> get double value
+ subroutine GetValueDouble(section,name,val)
+ implicit none
+ type(ConfigSection), pointer :: section !< the section from which the value is loaded
+ character(len=*),intent(in) :: name !< the name of the key
+ real(dp), intent(inout) :: val !< the value
+
+ ! local variables
+ character(len=valuelen) :: value
+
+ real(dp) :: temp
+
+ integer ios
+
+ value=''
+ call GetValueChar(section,name,value)
+
+ read(value,*,iostat=ios) temp
+ if (ios==0) then
+ val = temp
+ elseif (ios > 0) then
+ call write_log('Value for the option "' // trim( name) // '" specified in .config file was not used because of a read error (e.g. wrong data type used). Default value has been used instead.', GM_WARNING)
+ end if
+ end subroutine GetValueDouble
+
+ !> get integer value
+ subroutine GetValueInt(section,name,val)
+ implicit none
+ type(ConfigSection), pointer :: section !< the section from which the value is loaded
+ character(len=*),intent(in) :: name !< the name of the key
+ integer, intent(inout) :: val !< the value
+
+ ! local variables
+ character(len=valuelen) :: value
+ integer temp
+ integer ios
+
+ value=''
+ call GetValueChar(section,name,value)
+
+ read(value,*,iostat=ios) temp
+ if (ios==0) then
+ val = temp
+ elseif (ios > 0) then
+ call write_log('Value for the option "' // trim( name) // '" specified in .config file was not used because of a read error (e.g. wrong data type used). Default value has been used instead.', GM_WARNING)
+ end if
+ end subroutine GetValueInt
+
+ !> get character value
+ subroutine GetValueChar(section,name,val)
+ use glimmer_log
+ implicit none
+ type(ConfigSection), pointer :: section !< the section from which the value is loaded
+ character(len=*),intent(in) :: name !< the name of the key
+ character(len=*), intent(inout) :: val !< the value
+
+ type(ConfigValue), pointer :: value
+
+ value=>section%values
+ do while(associated(value))
+ if (name == trim(value%name)) then
+ val = value%value
+ if ((len_trim(val) + 1) >= len(val)) then
+ ! Assume that if we get within one space of the variable length (excluding spaces) then we may be truncating the intended value.
+ call write_log('The value of config option ' // trim(name) // ' is too long for the variable.' ,GM_FATAL)
+ endif
+ return
+ end if
+ value=>value%next
+ end do
+ end subroutine GetValueChar
+
+ !> get logical value
+ subroutine GetValueLogical(section,name,val)
+ implicit none
+ type(ConfigSection), pointer :: section !< the section from which the value is loaded
+ character(len=*),intent(in) :: name !< the name of the key
+ logical, intent(inout) :: val !< the value
+
+ ! local variables
+ character(len=valuelen) :: value
+ integer itemp
+ logical ltemp
+ integer ios
+ integer ierr
+
+ ierr = 0
+ value=''
+ call GetValueChar(section,name,value)
+
+ read(value,*,iostat=ios) itemp
+ if (ios==0) then
+ val = itemp == 1
+ elseif (ios > 0) then
+ ierr = 1
+ end if
+ read(value,*,iostat=ios) ltemp
+ if (ios==0) then
+ val = ltemp
+ elseif (ios > 0) then
+ ierr = ierr + 1
+ end if
+ if (ierr == 2) then
+ call write_log('Value for the option "' // trim( name) // '" specified in .config file was not used because of a read error (e.g. wrong data type used). Default value has been used instead.', GM_WARNING)
+ endif
+
+ end subroutine GetValueLogical
+
+ !==================================================================================
+ ! private procedures
+ !==================================================================================
+
+ !> handle line in file containing a section
+ subroutine handle_section(linenr,line,section)
+ use glimmer_log
+ implicit none
+ integer, intent(in) :: linenr !< the line number
+ character(len=*), intent(in) :: line !< buffer containing the line
+ type(ConfigSection), pointer :: section !< pointer to place where new section should be inserted
+
+ ! local variables
+ integer i
+ character(len=msg_length) :: message
+
+ do i=1,linelen
+ if (line(i:i) == ']') then
+ exit
+ end if
+ end do
+ if (line(i:i) /= ']') then
+ write(message,*) 'Cannot find end of section ',linenr
+ call write_log(message,GM_FATAL)
+ end if
+
+ call InsertSection(trim(adjustl(line(2:i-1))),section)
+ end subroutine handle_section
+
+ !> handle line in file containing a key-value pair
+ subroutine handle_value(linenr,line,value)
+ use glimmer_log
+ implicit none
+ integer, intent(in) :: linenr !< the line number
+ character(len=*), intent(in) :: line !< buffer containing the line
+ type(ConfigValue), pointer :: value !< pointer to value linked list where value should be added
+
+ ! local variables
+ integer i
+ character(len=msg_length) :: message
+ do i=1,linelen
+ if (line(i:i) == '=' .or. line(i:i) == ':') then
+ exit
+ end if
+ end do
+ if (.not.(line(i:i) == '=' .or. line(i:i) == ':')) then
+ write(message,*) 'Cannot find = or : ',linenr
+ call write_log(message,GM_FATAL)
+ end if
+
+ call InsertValue(trim(adjustl(line(:i-1))), trim(adjustl(line(i+1:))),value)
+ end subroutine handle_value
+
+ !> add a new section
+ subroutine InsertSection(name,section)
+ !> add a new section
+ implicit none
+ character(len=*), intent(in) :: name !< name of new section
+ type(ConfigSection), pointer :: section !< on entry the element of linked list after which the new element is inserted, on exit: the new element
+ type(ConfigSection), pointer :: new_sec
+
+ allocate(new_sec)
+ new_sec%name = name
+
+ if (associated(section)) then
+ if (associated(section%next)) then
+ new_sec%next => section%next
+ end if
+ section%next=>new_sec
+ end if
+ section=>new_sec
+ end subroutine InsertSection
+
+ !> insert a key-value pair
+ subroutine InsertValue(name,val,value)
+ use glimmer_log
+ implicit none
+ character(len=*), intent(in) :: name !< the key
+ character(len=*), intent(in) :: val !< the value
+ type(ConfigValue), pointer :: value !< on entry the element after which the new value should be added, on exit pointer the new element
+ type(ConfigValue), pointer :: new_value
+
+ allocate(new_value)
+
+ ! Assume that if we get within one space of the variable length (excluding spaces) then we may be truncating the intended value.
+ if ((len_trim(val) + 1) >= len(new_value%value)) then
+ call write_log('The value of config option ' // trim(name) // ' is too long to be read fully.' ,GM_FATAL)
+ endif
+
+ new_value%name = name
+ new_value%value = val
+
+ if(associated(value)) then
+ if (associated(value%next)) then
+ new_value%next => value%next
+ end if
+ value%next => new_value
+ end if
+ value=>new_value
+ end subroutine InsertValue
+end module glimmer_config
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_coordinates.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_coordinates.F90
new file mode 100644
index 0000000000..c16cdeee70
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_coordinates.F90
@@ -0,0 +1,322 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_coordinates.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> module for handling regular coordinate systems
+!!
+!! \author Magnus Hagdorn
+!! \date June 2006
+module glimmer_coordinates
+
+ use glimmer_global, only: dp, sp
+
+ implicit none
+
+ !> derived type describing a 2D point
+ type coord_point
+ real(kind=dp), dimension(2) :: pt !< the coordinates
+ end type coord_point
+
+ !> derived type describing a 2D integer point
+ type coord_ipoint
+ integer, dimension(2) :: pt !< the coordinates
+ end type coord_ipoint
+
+ !> type describing coordinate systems
+ type coordsystem_type
+ type(coord_point) :: origin !< origin of coordinate space
+ type(coord_point) :: delta !< stepsize in x and y direction
+ type(coord_point) :: delta_r !< reciprocal stepsize in x and y direction
+ type(coord_ipoint) :: size !< extent in x and y direction
+ end type coordsystem_type
+
+ !> interface of creating new coord system
+ interface coordsystem_new
+ module procedure coordsystem_new_real, coordsystem_new_pt
+ end interface
+
+ !> interface for allocating data for new coord system
+ interface coordsystem_allocate
+ module procedure coordsystem_allocate_d, coordsystem_allocate_s, coordsystem_allocate_i, coordsystem_allocate_l, &
+ coordsystem_allocate_d2, coordsystem_allocate_s2, coordsystem_allocate_i2
+ end interface
+
+#ifdef DEBUG_COORDS
+ character(len=msg_length), private :: message
+#endif
+
+contains
+
+ !> print coordsystem info to unit
+ subroutine coordsystem_print(coord, unit)
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ integer,intent(in) :: unit !< unit to be printed to
+ write(unit,*) 'Origin ',coord%origin%pt
+ write(unit,*) 'Delta ',coord%delta%pt
+ write(unit,*) '1/Delta ',coord%delta_r%pt
+ write(unit,*) 'Size ',coord%size%pt
+ end subroutine coordsystem_print
+
+ !> create new coordinate system from individual variables
+ function coordsystem_new_real(ox, oy, dx, dy, sx, sy)
+ implicit none
+ real(kind=dp), intent(in) :: ox, oy !< coordinates of origin
+ real(kind=dp), intent(in) :: dx, dy !< offsets
+ integer, intent(in) :: sx, sy !< x and y dimension
+ type(coordsystem_type) :: coordsystem_new_real
+
+ ! origin
+ coordsystem_new_real%origin%pt(1) = ox
+ coordsystem_new_real%origin%pt(2) = oy
+ ! deltas
+ coordsystem_new_real%delta%pt(1) = dx
+ coordsystem_new_real%delta%pt(2) = dy
+ coordsystem_new_real%delta_r%pt(1) = 1.d0/dx
+ coordsystem_new_real%delta_r%pt(2) = 1.d0/dy
+ ! size
+ coordsystem_new_real%size%pt(1) = sx
+ coordsystem_new_real%size%pt(2) = sy
+ end function coordsystem_new_real
+
+ !> create new coordinate system from points
+ function coordsystem_new_pt(o, d, s)
+ implicit none
+ type(coord_point), intent(in) :: o !< coordinates of origin
+ type(coord_point), intent(in) :: d !< offsets
+ type(coord_ipoint), intent(in) :: s !< x and y dimension
+ type(coordsystem_type) :: coordsystem_new_pt
+
+ ! origin
+ coordsystem_new_pt%origin = o
+ ! deltas
+ coordsystem_new_pt%delta = d
+ coordsystem_new_pt%delta_r%pt(:) = 1.d0/d%pt(:)
+ ! size
+ coordsystem_new_pt%size = s
+ end function coordsystem_new_pt
+
+ !> get coordinates of node
+ function coordsystem_get_coord(coord,node)
+ use glimmer_log
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ type(coord_ipoint), intent(in) :: node !< node
+
+ type(coord_point) :: coordsystem_get_coord
+
+#ifdef DEBUG_COORDS
+ if (.not.coordsystem_node_inside(coord,node)) then
+ write(message,*) 'node (',node%pt,') not inside coord system'
+ call coordsystem_print(coord,glimmer_get_logunit())
+ call write_log(message,GM_FATAL,__FILE__,__LINE__)
+ end if
+#endif
+
+ coordsystem_get_coord%pt(:) = coord%origin%pt(:) + (node%pt(:) - 1)*coord%delta%pt(:)
+ end function coordsystem_get_coord
+
+ !> get index of nearest node given coords of a point
+ function coordsystem_get_node(coord,point)
+ use glimmer_log
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ type(coord_point), intent(in) :: point !< point
+
+ type(coord_ipoint) :: coordsystem_get_node
+
+ coordsystem_get_node%pt(:) = 1+floor(0.5+(point%pt(:)-coord%origin%pt(:))*coord%delta_r%pt(:))
+ if (coordsystem_get_node%pt(1) == coord%size%pt(1)+1) coordsystem_get_node%pt(1) = coord%size%pt(1)
+ if (coordsystem_get_node%pt(2) == coord%size%pt(2)+1) coordsystem_get_node%pt(2) = coord%size%pt(2)
+
+#ifdef DEBUG_COORDS
+ if (.not.coordsystem_node_inside(coord,coordsystem_get_node)) then
+ write(message,*) 'point (',point%pt,') not inside coord system'
+ call coordsystem_print(coord,glimmer_get_logunit())
+ call write_log(message,GM_FATAL,__FILE__,__LINE__)
+ end if
+#endif
+ end function coordsystem_get_node
+
+ !> get index of lower-left node of cell into which point falls
+ function coordsystem_get_llnode(coord,point)
+ use glimmer_log
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ type(coord_point), intent(in) :: point !< point
+
+ type(coord_ipoint) :: coordsystem_get_llnode
+
+ coordsystem_get_llnode%pt(:) = 1+floor((point%pt(:)-coord%origin%pt(:))*coord%delta_r%pt(:))
+ end function coordsystem_get_llnode
+
+ !> return true iff node is inside coord system
+ function coordsystem_node_inside(coord,node)
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ type(coord_ipoint), intent(in) :: node !< node
+
+ logical coordsystem_node_inside
+
+ coordsystem_node_inside = (all(node%pt >= 1) .and. all(node%pt <= coord%size%pt))
+ end function coordsystem_node_inside
+
+ !> return true iff point is inside coord system
+ function coordsystem_point_inside(coord,point)
+ use glimmer_log
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ type(coord_point), intent(in) :: point !< point
+ logical coordsystem_point_inside
+ integer i
+
+ coordsystem_point_inside = .true.
+ do i=1,2
+ coordsystem_point_inside = (point%pt(i) >= coord%origin%pt(i)) .and. &
+ (point%pt(i) <= coord%origin%pt(i)+coord%size%pt(i)*coord%delta%pt(i))
+ if (.not.coordsystem_point_inside) then
+ exit
+ end if
+ end do
+ end function coordsystem_point_inside
+
+ !> linearise node, given coord
+ function coordsystem_linearise2d(coord,node)
+ use glimmer_log
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ type(coord_ipoint), intent(in) :: node !< node
+ integer coordsystem_linearise2d
+
+ coordsystem_linearise2d = -1
+
+#ifdef DEBUG_COORDS
+ if (.not.coordsystem_node_inside(coord,node)) then
+ write(message,*) 'node (',node%pt,') not inside coord system'
+ call write_log(message,GM_ERROR,__FILE__,__LINE__)
+ return
+ end if
+#endif
+
+ coordsystem_linearise2d = node%pt(1) + (node%pt(2)-1)*coord%size%pt(1)
+ end function coordsystem_linearise2d
+
+ !> expand linearisation
+ function coordsystem_delinearise2d(coord, ind)
+ use glimmer_log
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ integer, intent(in) :: ind !< index
+ type(coord_ipoint) :: coordsystem_delinearise2d
+
+#ifdef DEBUG_COORDS
+ if (ind < 1 .or. ind > coord%size%pt(1)*coord%size%pt(2)) then
+ write(message,*) 'index ',ind,' outside coord system'
+ call write_log(message,GM_FATAL,__FILE__,__LINE__)
+ end if
+#endif
+
+ coordsystem_delinearise2d%pt(1) = mod(ind-1,coord%size%pt(1)) + 1
+ coordsystem_delinearise2d%pt(2) = (ind-1)/coord%size%pt(1) + 1
+ end function coordsystem_delinearise2d
+
+ !> allocate memory to pointer field
+ subroutine coordsystem_allocate_d(coord, field)
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ real(kind=dp), dimension(:,:), pointer :: field !< unallocated field
+
+ allocate(field(coord%size%pt(1),coord%size%pt(2)))
+ field = 0.d0
+ end subroutine coordsystem_allocate_d
+
+ !> allocate memory to pointer field
+ subroutine coordsystem_allocate_s(coord, field)
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ real(kind=sp), dimension(:,:), pointer :: field !< unallocated field
+
+ allocate(field(coord%size%pt(1),coord%size%pt(2)))
+ field = 0.e0
+ end subroutine coordsystem_allocate_s
+
+ !> allocate memory to pointer field
+ subroutine coordsystem_allocate_i(coord, field)
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ integer, dimension(:,:), pointer :: field !< unallocated field
+
+ allocate(field(coord%size%pt(1),coord%size%pt(2)))
+ field = 0
+ end subroutine coordsystem_allocate_i
+
+ !> allocate memory to pointer field
+ subroutine coordsystem_allocate_l(coord, field)
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ logical, dimension(:,:), pointer :: field !< unallocated field
+
+ allocate(field(coord%size%pt(1),coord%size%pt(2)))
+ field = .FALSE.
+ end subroutine coordsystem_allocate_l
+
+ !> allocate memory to pointer field
+ subroutine coordsystem_allocate_d2(coord, nup, field)
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ integer, intent(in) :: nup !< the number of vertical points
+ real(kind=dp), dimension(:,:,:), pointer :: field !< unallocated field
+
+ allocate(field(nup,coord%size%pt(1),coord%size%pt(2)))
+ field = 0.d0
+ end subroutine coordsystem_allocate_d2
+
+ !> allocate memory to pointer field
+ subroutine coordsystem_allocate_s2(coord, nup, field)
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ integer, intent(in) :: nup !< the number of vertical points
+ real(kind=sp), dimension(:,:,:), pointer :: field !< unallocated field
+
+ allocate(field(nup,coord%size%pt(1),coord%size%pt(2)))
+ field = 0.0
+ end subroutine coordsystem_allocate_s2
+
+ !> allocate memory to pointer field
+ subroutine coordsystem_allocate_i2(coord, nup, field)
+ implicit none
+ type(coordsystem_type), intent(in) :: coord !< coordinate system
+ integer, intent(in) :: nup !< the number of vertical points
+ integer, dimension(:,:,:), pointer :: field !< unallocated field
+
+ allocate(field(nup,coord%size%pt(1),coord%size%pt(2)))
+ field = 0
+ end subroutine coordsystem_allocate_i2
+
+end module glimmer_coordinates
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_filenames.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_filenames.F90
new file mode 100644
index 0000000000..c5885959ba
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_filenames.F90
@@ -0,0 +1,151 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_filenames.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+!> Module to handle setting a working directory for glimmer
+!!
+!! \author Ian Rutt
+!! \date May 2007
+module glimmer_filenames
+
+ use glimmer_global,only: dirsep,fname_length
+
+ implicit none
+
+ character(fname_length) :: workingdir = '' !< Working directory for all file operations. Absolute paths are unaffected
+ character(fname_length) :: configdir = '' !< the directory where the config file lives and possibly other input files
+
+contains
+
+ !> initialise the config directory
+ !!
+ !! \author Magnus Hagdorn
+ !! \date September 2009
+ subroutine filenames_init(cname)
+ implicit none
+ character(len=*), intent(in) :: cname !< the configuration file name include path
+
+ ! local variables
+ integer pos
+
+ ! find the last directory separator, the remaining bit is the filename
+ pos = scan(trim(cname),dirsep,back=.true.)
+ if (pos > 0) then
+ configdir = cname(:pos)
+ end if
+
+ end subroutine filenames_init
+
+ !> prepend path to filename
+ !!
+ !! \author Magnus Hagdorn
+ !! \date September 2009
+ !!
+ !! first check if name starts with a dir sparator if so don't change name,
+ !! then check if file exists in present working directory if so do not modify file. if it doesn't exist
+ !! prepend config dir
+ !! \return modified file name
+ function filenames_inputname(infile)
+ implicit none
+ character(len=*), intent(in) :: infile
+ character(len=fname_length) :: filenames_inputname
+
+ logical :: fexist
+
+ filenames_inputname = trim(infile)
+
+ ! check if configdir exists
+ if (len(trim(configdir)) == 0) then
+ return
+ end if
+ ! check if path is absolute
+ !! \todo figure out absolute paths for windows
+ if (infile(1:1) == dirsep) then
+ return
+ else
+ inquire(file=infile,exist=fexist)
+ ! check if the file exists in the local directory
+ if (fexist) then
+ return
+ else
+ filenames_inputname = trim(configdir)//trim(infile)
+ end if
+ end if
+ end function filenames_inputname
+
+
+ !> set the working directory
+ subroutine glimmer_set_path(path)
+
+ use glimmer_log
+
+ character(len=*),intent(in) :: path !< the path
+
+ workingdir=path
+ call write_log('Set GLIMMER working dir to :'//trim(workingdir))
+
+ end subroutine glimmer_set_path
+
+ !> append path to working dir
+ character(200) function process_path(path)
+
+ character(*),intent(in) :: path !< the path to be appended
+
+ character(200) :: alpath
+
+ alpath=adjustl(path)
+
+ if (alpath(1:1)/=dirsep .and. trim(workingdir)/='') then
+ process_path=trim(workingdir)//dirsep//alpath
+ else
+ process_path=alpath
+ end if
+
+ end function process_path
+
+ !> returns the next free file unit between 20 and 100
+ integer function get_free_unit()
+
+ use glimmer_log
+
+
+ integer :: unit
+ logical :: op
+
+ unit = 20
+ do
+ inquire(unit,opened=op)
+ if (.not.op) exit
+ unit=unit+1
+ if (unit>=100) then
+ call write_log('No file units available',GM_FATAL,__FILE__,__LINE__)
+ end if
+ end do
+
+ get_free_unit=unit
+
+ end function get_free_unit
+
+end module glimmer_filenames
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_global.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_global.F90
new file mode 100644
index 0000000000..0b61b85c7b
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_global.F90
@@ -0,0 +1,89 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_global.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+module glimmer_global
+
+ !> Module holding global variables for Glimmer. Holds real-type
+ !> kind values, and other global code parameters.
+
+ implicit none
+
+ integer,parameter :: sp = kind(1.0)
+
+ !> Single precision --- Fortran single-precision real-type kind
+ !> value. Used internally.
+ !>
+ !> Note that if the code is being compiled with forced typing (e.g. with
+ !> the -r8 flag), then this parameter may need to be set in agreement with
+ !> that.
+
+ integer,parameter :: dp = kind(1.0d0)
+
+ !> Double precision --- Fortran double-precision real-type kind
+ !> value. Used internally.
+ !>
+ !> Note that if the code is being compiled with forced typing (e.g. with
+ !> the -r8 flag), then this parameter may need to be set in agreement
+ !> with that
+
+!WHL - Removed rk from the code, so commenting out these declarations
+!!#ifdef GLIMMER_SP
+!! integer,parameter :: rk=sp !< Precision of glimmer module --- the general Fortran real-type kind value for the Glimmer module and its interfaces.
+!!#else
+!! integer,parameter :: rk=dp !< Precision of glimmer module --- the general Fortran real-type kind value for the Glimmer module and its interfaces.
+!!#endif
+
+ integer,parameter :: size_t = kind(1)
+
+ !> Precision of glimmer module --- the general Fortran real-type kind value
+ !> for the Glimmer module and its interfaces.
+ !>
+ !> Note that if the code is being compiled with forced typing (e.g. with
+ !> the -r8 flag), then this parameter must be set in agreement with that.
+
+ integer,parameter :: fname_length=200 !< Specifies the length of character string variables used to hold filenames.
+ integer,parameter :: msg_length=500 !< lenght of message buffers
+
+ !> Specifies the length of character string variables used to
+ !> hold filenames.
+
+ character, parameter :: dirsep = '/'
+ !> directory separator
+
+ character, parameter :: linefeed = achar(10) !< ASCII linefeed
+ character, parameter :: char_ret = achar(13) !< ASCII carriage-return
+ character(2), parameter :: cr_lf = char_ret//linefeed !< default newline appropriate for UNIX-type systems
+ character, parameter :: endline = linefeed
+ !> ASCII linefeed and carriage-return characters,
+ !> and set up default newline appropriate for UNIX-type systems
+
+ real(kind=dp) :: wall_start_time, wall_stop_time
+
+end module glimmer_global
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_log.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_log.F90
new file mode 100644
index 0000000000..c3474af72e
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_log.F90
@@ -0,0 +1,260 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_log.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> module providing file logging and error/message handling
+!!
+!! Six levels of message/error are defined:
+!! - Diagnostic messages
+!! - Timestep enumeration and related information
+!! - Information messages
+!! - Warning messages
+!! - Error messages
+!! - Fatal error messages
+!!
+!! These are numbered 1--6, with increasing severity, and the level of
+!! message output may be set to output all messages, only those above a particular
+!! severity, or none at all. It should be noted that even if all messages are
+!! turned off, the model will still halt if it encounters a fatal
+!! error!
+!!
+!! The other point to note is that when calling the messaging routines,
+!! the numerical identifier of a message level should be replaced by the
+!! appropriate parameter:
+!! - GM_DIAGNOSTIC
+!! - GM_TIMESTEP
+!! - GM_INFO
+!! - GM_WARNING
+!! - GM_ERROR
+!! - GM_FATAL
+module glimmer_log
+
+ use glimmer_global, only : fname_length,dirsep
+
+ implicit none
+
+ integer,parameter :: GM_DIAGNOSTIC = 1 !< Numerical identifier for diagnostic messages.
+ integer,parameter :: GM_TIMESTEP = 2 !< Numerical identifier for timestep messages.
+ integer,parameter :: GM_INFO = 3 !< Numerical identifier for information messages.
+ integer,parameter :: GM_WARNING = 4 !< Numerical identifier for warning messages.
+ integer,parameter :: GM_ERROR = 5 !< Numerical identifier for (non-fatal) error messages.
+ integer,parameter :: GM_FATAL = 6 !< Numerical identifier for fatal error messages.
+
+ integer, parameter :: GM_levels = 6 !< the number of logging levels
+ logical, private, dimension(GM_levels) :: gm_show = .false.
+
+ character(len=*), parameter, dimension(0:GM_levels), private :: msg_prefix = (/ &
+ '* UNKNOWN ', &
+ '* ', &
+ '* ', &
+ ' ', &
+ '* WARNING: ', &
+ '* ERROR: ', &
+ '* FATAL ERROR :' /) !< array containing log level names
+
+
+ character(len=fname_length),private :: glimmer_logname !< name of log file
+ integer,private :: glimmer_unit = 6 !< log unit
+
+contains
+ !> derives name of log file from file name by stripping directories and appending .log
+ function logname(fname)
+ implicit none
+ character(len=*), intent(in) :: fname !< the file name
+ character(len=fname_length) :: logname
+
+ character(len=*), parameter :: suffix='.log'
+ integer i
+ i = scan(fname,dirsep,.True.)
+ if (i /= 0) then
+ logname = trim(fname(i+1:))//suffix
+ else
+ logname = trim(fname)//suffix
+ end if
+ end function logname
+
+ !> opens log file
+ subroutine open_log(unit,fname)
+ use parallel
+ implicit none
+ integer, optional :: unit !< file unit to use
+ character(len=*), optional :: fname !< name of log file
+
+ ! local variables
+ character(len=8) :: date
+ character(len=10) :: time
+
+ if (present(unit)) then
+ glimmer_unit = unit
+ end if
+ if (present(fname)) then
+ glimmer_logname = adjustl(trim(fname))
+ else
+ glimmer_logname = 'glide.log'
+ end if
+
+ if ((main_task).and.(glimmer_unit /= 6)) then
+ open(unit=glimmer_unit,file=glimmer_logname,status='unknown')
+ end if
+
+ call date_and_time(date,time)
+ call write_log_div
+ if (main_task) write(unit=glimmer_unit,fmt="(a,a4,'-',a2,'-',a2,' ',a2,':',a2,':',a6)") ' Started logging at ',&
+ date(1:4),date(5:6),date(7:8),time(1:2),time(3:4),time(5:10)
+ call write_log_div
+ end subroutine open_log
+
+ !> write to log
+ subroutine write_log(message,type,file,line)
+ use glimmer_global, only : msg_length
+ use parallel
+ implicit none
+ integer,intent(in),optional :: type !< Type of error to be generated (see list above).
+ character(len=*),intent(in) :: message !< message to be written
+ character(len=*),intent(in),optional :: file !< the name of the file which triggered the message
+ integer,intent(in),optional :: line !< the line number at the which the message was triggered
+
+ ! local variables
+ character(len=msg_length) :: msg
+ integer :: local_type
+ character(len=6) :: line_num
+
+ local_type = 0
+ if (present(type)) then
+ if (type >= 1 .or. type <= GM_levels) then
+ local_type = type
+ end if
+ else
+ local_type = GM_INFO
+ end if
+
+ ! constructing message
+ if (present(file) .and. present(line)) then
+ if (main_task) write(*,*)"Logged at",file,line
+ write(line_num,'(I6)')line
+ write(msg,*) trim(msg_prefix(local_type))//' (',trim(file),':',trim(adjustl(line_num)),') '//trim(message)
+ else
+ write(msg,*) trim(msg_prefix(local_type))//' '//trim(message)
+ end if
+
+ ! messages are always written to file log
+ if (main_task) write(glimmer_unit,*) trim(msg)
+
+ ! and maybe to std out
+ if (local_type /= 0) then
+ if ((main_task).and.(gm_show(local_type))) write(*,*) trim(msg)
+ end if
+
+ ! stop logging if we encountered a fatal error
+ if (local_type == GM_FATAL) then
+ if (main_task) write(*,*) "Fatal error encountered, exiting..."
+ call close_log
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+ end subroutine write_log
+
+ !> start a new section
+ subroutine write_log_div
+ use parallel
+ implicit none
+ if (main_task) write(glimmer_unit,*) '*******************************************************************************'
+ end subroutine write_log_div
+
+ !> close log file
+ subroutine close_log
+ use parallel
+ implicit none
+ ! local variables
+ character(len=8) :: date
+ character(len=10) :: time
+
+ call date_and_time(date,time)
+ call write_log_div
+ if (main_task) write(unit=glimmer_unit,fmt="(a,a4,'-',a2,'-',a2,' ',a2,':',a2,':',a6)") ' Finished logging at ',&
+ date(1:4),date(5:6),date(7:8),time(1:2),time(3:4),time(5:10)
+ call write_log_div
+
+ if (main_task) close(glimmer_unit)
+ end subroutine close_log
+
+ !> synchronise log to disk
+ subroutine sync_log
+ implicit none
+ close(glimmer_unit)
+ open(unit=glimmer_unit,file=glimmer_logname, position="append", status='old')
+ end subroutine sync_log
+
+ !> Sets the output message level.
+ subroutine glimmer_set_msg_level(level)
+ integer, intent(in) :: level !< The message level (6 is all messages; 0 is no messages).
+ integer :: i
+
+ do i=1,GM_levels
+ if (i>(GM_levels-level)) then
+ gm_show(i)=.true.
+ else
+ gm_show(i)=.false.
+ endif
+ enddo
+
+ end subroutine glimmer_set_msg_level
+
+ !> return glimmer log unit
+ function glimmer_get_logunit()
+ implicit none
+ integer glimmer_get_logunit
+
+ glimmer_get_logunit = glimmer_unit
+ end function glimmer_get_logunit
+
+ subroutine set_glimmer_unit(unit)
+
+ ! This subroutine should be called when the log file is already open, but glimmer_unit
+ ! needs to be set to a desired value (e.g. for CESM coupled runs).
+ use parallel
+ implicit none
+ integer, optional :: unit !> file unit to use
+
+ ! local variables
+ character(len=8) :: date
+ character(len=10) :: time
+
+ if (present(unit)) then
+ glimmer_unit = unit
+ end if
+
+ call date_and_time(date,time)
+ call write_log_div
+ if (main_task) write(unit=glimmer_unit,fmt="(a,a4,'-',a2,'-',a2,' ',a2,':',a2,':',a6)") &
+ ' Started logging at ',&
+ date(1:4),date(5:6),date(7:8),time(1:2),time(3:4),time(5:10)
+ call write_log_div
+ end subroutine set_glimmer_unit
+
+end module glimmer_log
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_map_CFproj.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_map_CFproj.F90
new file mode 100644
index 0000000000..f56f30cee3
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_map_CFproj.F90
@@ -0,0 +1,412 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_map_CFproj.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> Holds derived types and subroutines
+!! necessary for handling map projections.
+!!
+!! Most of the component
+!! names of the various derived types are self-explanatory.
+!! Note that this doesn't currently interface with the proj4
+!! library in anyway, it simply handles NetCDF data and projection
+!! parameters in an appropriate format.
+module glimmer_map_CFproj
+
+ use glimmer_map_types
+ use glimmer_ncdf, only: nc_errorhandle
+
+ implicit none
+
+ private
+ public glimmap_CFGetProj,glimmap_CFPutProj
+
+contains
+
+ !EIB! added use glimmer_ncdf to access nc_errorhandle, not sure if/when it
+ !moved
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! public functions
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !> Read projection from a given netCDF file, returning
+ !! an instance of type glimmap_proj.
+ !!
+ !! \return Derived type instance containing projection parameters
+ function glimmap_CFGetProj(ncid)
+
+ use parallel
+ use glimmer_log
+ use glimmer_map_init
+
+ implicit none
+
+ type(glimmap_proj) :: glimmap_CFGetProj
+ integer, intent(in) :: ncid !< Handle of the file to be read.
+
+ !local variables
+ integer status
+ integer nvars, varid
+ integer natts, attid
+ logical found_map
+ character(len=50) :: attname,mapname
+
+ ! getting variables
+ status = parallel_inquire(ncid,nvariables=nvars)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ ! looping over variables
+ found_map=.false.
+ do varid=1,nvars
+ status = parallel_inquire_variable(ncid,varid,natts=natts)
+ ! and loop over attributes
+ do attid=1,natts
+ status = parallel_inq_attname(ncid,varid,attid,attname)
+ if (trim(attname) == 'grid_mapping_name') then
+ found_map = .true.
+ status = parallel_get_att(ncid,varid,attname,mapname)
+ mapname = adjustl(mapname)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ exit
+ end if
+ end do
+ if (found_map) exit
+ end do
+
+ if (found_map) then
+ glimmap_CFGetProj%found = .true.
+ if (index(mapname,'lambert_azimuthal_equal_area') /= 0) then
+ glimmap_CFGetProj%laea => CFproj_get_laea(ncid,varid)
+ call glimmap_laea_init(glimmap_CFGetProj%laea)
+ else if (index(mapname,'albers_conical_equal_area') /= 0) then
+ glimmap_CFGetProj%aea => CFproj_get_aea(ncid,varid)
+ call glimmap_aea_init(glimmap_CFGetProj%aea)
+ else if (index(mapname,'lambert_conformal_conic') /= 0) then
+ glimmap_CFGetProj%lcc => CFproj_get_lcc(ncid,varid)
+ call glimmap_lcc_init(glimmap_CFGetProj%lcc)
+ else if (index(mapname,'polar_stereographic') /= 0) then
+ glimmap_CFGetProj%stere => CFproj_get_stere_polar(ncid,varid)
+ call glimmap_stere_init(glimmap_CFGetProj%stere)
+ else if (index(mapname,'stereographic') /= 0) then
+ glimmap_CFGetProj%stere => CFproj_get_stere(ncid,varid)
+ call glimmap_stere_init(glimmap_CFGetProj%stere)
+ else
+ glimmap_CFGetProj%found = .false.
+ call write_log('Do not know about this projection: '//(mapname),GM_ERROR)
+ end if
+ else
+ glimmap_CFGetProj%found = .false.
+ call write_log('No map projection found',GM_WARNING)
+ end if
+ end function glimmap_CFGetProj
+
+ !-------------------------------------------------------------------------
+
+ !> write projection to a netCDF file.
+ subroutine glimmap_CFPutProj(ncid,mapid,proj)
+
+ use glimmer_log
+
+ implicit none
+
+ type(glimmap_proj) :: proj !< Projection to be written.
+ integer, intent(in) :: ncid !< Handle of netCDF file.
+ integer, intent(in) :: mapid !< Handle of map projection in netCDF file.
+
+ if (.not.glimmap_allocated(proj)) then
+ call write_log('No known projection found!',GM_WARNING)
+ return
+ end if
+
+ if (associated(proj%laea)) then
+ call CFproj_put_laea(ncid,mapid,proj%laea)
+ return
+ else if (associated(proj%aea)) then
+ call CFproj_put_aea(ncid,mapid,proj%aea)
+ return
+ else if (associated(proj%lcc)) then
+ call CFproj_put_lcc(ncid,mapid,proj%lcc)
+ return
+ else if (associated(proj%stere)) then
+ call CFproj_put_stere(ncid,mapid,proj%stere)
+ return
+ else
+ call write_log('No known projection found!',GM_WARNING)
+ end if
+ end subroutine glimmap_CFPutProj
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! private readers
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !> get parameters for stereographic projection
+ function CFproj_get_stere(ncid,mapid)
+ use parallel
+
+ implicit none
+ type(proj_stere), pointer :: CFproj_get_stere
+ integer, intent(in) :: ncid !< Handle of netCDF file.
+ integer, intent(in) :: mapid !< Handle of map projection in netCDF file.
+
+ integer status
+
+ allocate(CFproj_get_stere)
+ status = parallel_get_att(ncid,mapid,'false_easting',CFproj_get_stere%false_easting)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'false_northing',CFproj_get_stere%false_northing)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'longitude_of_projection_origin',CFproj_get_stere%longitude_of_central_meridian)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'latitude_of_projection_origin',CFproj_get_stere%latitude_of_projection_origin)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'scale_factor_at_projection_origin',CFproj_get_stere%scale_factor_at_proj_origin)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ end function CFproj_get_stere
+
+ !> get parameters for polar stereographic projection
+ function CFproj_get_stere_polar(ncid,mapid)
+ use parallel
+ use glimmer_global, only: dp
+ use glimmer_log
+
+ implicit none
+ type(proj_stere), pointer :: CFproj_get_stere_polar
+ integer, intent(in) :: ncid !< Handle of netCDF file.
+ integer, intent(in) :: mapid !< Handle of map projection in netCDF file.
+
+ integer status
+ real(dp) :: dummy
+
+ allocate(CFproj_get_stere_polar)
+ status = parallel_get_att(ncid,mapid,'false_easting',CFproj_get_stere_polar%false_easting)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'false_northing',CFproj_get_stere_polar%false_northing)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'straight_vertical_longitude_from_pole',CFproj_get_stere_polar%longitude_of_central_meridian)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ CFproj_get_stere_polar%latitude_of_projection_origin=90.0
+ status = parallel_get_att(ncid,mapid,'latitude_of_projection_origin',CFproj_get_stere_polar%latitude_of_projection_origin)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (abs(abs(CFproj_get_stere_polar%latitude_of_projection_origin)-90.0)>0.001) then
+ call write_log('Error (polar stereographic projection) latitude of origin must be +-90.0',&
+ GM_FATAL,__FILE__,__LINE__)
+ end if
+ status = parallel_get_att(ncid,mapid,'scale_factor_at_projection_origin',dummy)
+ if (status == NF90_NOERR) then
+ CFproj_get_stere_polar%scale_factor_at_proj_origin = dummy
+ end if
+ status = parallel_get_att(ncid,mapid,'standard_parallel',dummy)
+ if (status == NF90_NOERR) then
+ CFproj_get_stere_polar%standard_parallel = dummy
+ end if
+ if (CFproj_get_stere_polar%standard_parallel /= 0 .and. CFproj_get_stere_polar%scale_factor_at_proj_origin /= 0.) then
+ call write_log('Error (stereographic projection), can only handle either standard_parallel or scale_at_orig',&
+ GM_FATAL,__FILE__,__LINE__)
+ end if
+ end function CFproj_get_stere_polar
+
+ !> get parameters for Lambert azimuthal equal area projection
+ function CFproj_get_laea(ncid,mapid)
+ use parallel
+
+ implicit none
+ type(proj_laea), pointer :: CFproj_get_laea
+ integer, intent(in) :: ncid !< Handle of netCDF file.
+ integer, intent(in) :: mapid !< Handle of map projection in netCDF file.
+
+ integer status
+ allocate(CFproj_get_laea)
+ status = parallel_get_att(ncid,mapid,'false_easting',CFproj_get_laea%false_easting)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'false_northing',CFproj_get_laea%false_northing)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'longitude_of_projection_origin',CFproj_get_laea%longitude_of_central_meridian)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'latitude_of_projection_origin',CFproj_get_laea%latitude_of_projection_origin)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end function CFproj_get_laea
+
+ !> get parameters for Albers conical equal area projection
+ function CFproj_get_aea(ncid,mapid)
+ use parallel
+ implicit none
+ type(proj_aea), pointer :: CFproj_get_aea
+ integer, intent(in) :: ncid !< Handle of netCDF file.
+ integer, intent(in) :: mapid !< Handle of map projection in netCDF file.
+
+ integer status
+ allocate(CFproj_get_aea)
+ status = parallel_get_att(ncid,mapid,'false_easting',CFproj_get_aea%false_easting)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'false_northing',CFproj_get_aea%false_northing)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'longitude_of_central_meridian',CFproj_get_aea%longitude_of_central_meridian)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'latitude_of_projection_origin',CFproj_get_aea%latitude_of_projection_origin)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'standard_parallel',CFproj_get_aea%standard_parallel)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end function CFproj_get_aea
+
+ !> get parameters for Lambert conformal conic projection
+ function CFproj_get_lcc(ncid,mapid)
+ use parallel
+ implicit none
+ type(proj_lcc), pointer :: CFproj_get_lcc
+ integer, intent(in) :: ncid !< Handle of netCDF file.
+ integer, intent(in) :: mapid !< Handle of map projection in netCDF file.
+
+ integer status
+ allocate(CFproj_get_lcc)
+ status = parallel_get_att(ncid,mapid,'false_easting',CFproj_get_lcc%false_easting)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'false_northing',CFproj_get_lcc%false_northing)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'longitude_of_central_meridian',CFproj_get_lcc%longitude_of_central_meridian)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'latitude_of_projection_origin',CFproj_get_lcc%latitude_of_projection_origin)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_att(ncid,mapid,'standard_parallel',CFproj_get_lcc%standard_parallel)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end function CFproj_get_lcc
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! private subroutines to write projection info
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !> put parameters for stereographic projection
+ subroutine CFproj_put_stere(ncid,mapid,stere)
+ use parallel
+ implicit none
+ type(proj_stere), pointer :: stere !< the derived type containing projection parameters
+ integer, intent(in) :: ncid !< Handle of netCDF file.
+ integer, intent(in) :: mapid !< Handle of map projection in netCDF file.
+
+ integer status
+
+ if (stere%pole/=0) then
+ status = parallel_put_att(ncid,mapid,'grid_mapping_name','polar_stereographic')
+ else
+ status = parallel_put_att(ncid,mapid,'grid_mapping_name','stereographic')
+ end if
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'false_easting',stere%false_easting)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'false_northing',stere%false_northing)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (stere%pole/=0) then
+ status = parallel_put_att(ncid,mapid,'straight_vertical_longitude_from_pole',stere%longitude_of_central_meridian)
+ else
+ status = parallel_put_att(ncid,mapid,'longitude_of_projection_origin',stere%longitude_of_central_meridian)
+ end if
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'latitude_of_projection_origin',stere%latitude_of_projection_origin)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (stere%pole/=0) then
+ if (stere%standard_parallel /= 0) then
+ status = parallel_put_att(ncid,mapid,'standard_parallel',stere%standard_parallel)
+ else
+ status = parallel_put_att(ncid,mapid,'scale_factor_at_projection_origin',stere%scale_factor_at_proj_origin)
+ end if
+ else
+ status = parallel_put_att(ncid,mapid,'scale_factor_at_projection_origin',stere%scale_factor_at_proj_origin)
+ end if
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end subroutine CFproj_put_stere
+
+ !> put parameters for Lambert azimuthal equal area projection
+ subroutine CFproj_put_laea(ncid,mapid,laea)
+ use parallel
+ implicit none
+ type(proj_laea), pointer :: laea !< the derived type containing projection parameters
+ integer, intent(in) :: ncid !< Handle of netCDF file.
+ integer, intent(in) :: mapid !< Handle of map projection in netCDF file.
+
+ integer status
+
+ status = parallel_put_att(ncid,mapid,'grid_mapping_name','lambert_azimuthal_equal_area')
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'false_easting',laea%false_easting)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'false_northing',laea%false_northing)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'longitude_of_projection_origin',laea%longitude_of_central_meridian)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'latitude_of_projection_origin',laea%latitude_of_projection_origin)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end subroutine CFproj_put_laea
+
+ !> put parameters for Albers conical equal area projection
+ subroutine CFproj_put_aea(ncid,mapid,aea)
+ use parallel
+ implicit none
+ type(proj_aea), pointer :: aea !< the derived type containing projection parameters
+ integer, intent(in) :: ncid !< Handle of netCDF file.
+ integer, intent(in) :: mapid !< Handle of map projection in netCDF file.
+
+ integer status
+
+ status = parallel_put_att(ncid,mapid,'grid_mapping_name','albers_conical_equal_area')
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'false_easting',aea%false_easting)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'false_northing',aea%false_northing)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'longitude_of_central_meridian',aea%longitude_of_central_meridian)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'latitude_of_projection_origin',aea%latitude_of_projection_origin)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'standard_parallel',aea%standard_parallel)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end subroutine CFproj_put_aea
+
+ !> put parameters for Lambert conformal conic projection
+ subroutine CFproj_put_lcc(ncid,mapid,lcc)
+ use parallel
+ implicit none
+ type(proj_lcc), pointer :: lcc !< the derived type containing projection parameters
+ integer, intent(in) :: ncid !< Handle of netCDF file.
+ integer, intent(in) :: mapid !< Handle of map projection in netCDF file.
+
+ integer status
+
+ status = parallel_put_att(ncid,mapid,'grid_mapping_name','lambert_conformal_conic')
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'false_easting',lcc%false_easting)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'false_northing',lcc%false_northing)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'longitude_of_central_meridian',lcc%longitude_of_central_meridian)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'latitude_of_projection_origin',lcc%latitude_of_projection_origin)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(ncid,mapid,'standard_parallel',lcc%standard_parallel)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ end subroutine CFproj_put_lcc
+
+end module glimmer_map_CFproj
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_map_init.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_map_init.F90
new file mode 100644
index 0000000000..1e66b237f5
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_map_init.F90
@@ -0,0 +1,455 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_map_init.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> initialise map projection routines
+module glimmer_map_init
+
+ use glimmer_map_types
+
+ implicit none
+
+contains
+
+ !> read projection configuration from file
+ subroutine glimmap_readconfig(proj,config,dx,dy)
+ use glimmer_config
+ use glimmer_log
+ use glimmer_global, only: dp
+ implicit none
+ type(glimmap_proj),intent(inout) :: proj !< The projection parameters to be initialised
+ type(ConfigSection), pointer :: config !< structure holding sections of configuration file
+ real(dp),intent(in) :: dx !< grid resolution in x
+ real(dp),intent(in) :: dy !< grid resolution in y
+
+ ! local variables
+ type(ConfigSection), pointer :: section
+ real(dp) :: lonc,latc,efalse,nfalse,stdp1,stdp2,scale_factor,cpx,cpy
+ real(dp),dimension(:),pointer :: std_par
+ character(10) :: ptype
+ logical :: stdp,scfac
+ integer :: ptval,ptold
+
+ ptype = ''
+ lonc = 0.d0 ; latc = 0.d0
+ efalse = 0.d0 ; nfalse = 0.d0
+ std_par => null()
+ scale_factor = 0.d0
+ stdp1 = 0.d0
+ stdp2 = 0.d0
+
+ call GetSection(config,section,'projection')
+ if (associated(section)) then
+ call GetValue(section,'type',ptype)
+ call GetValue(section,'centre_longitude',lonc)
+ call GetValue(section,'centre_latitude',latc)
+ call GetValue(section,'false_easting',efalse)
+ call GetValue(section,'false_northing',nfalse)
+ call GetValue(section,'standard_parallel',std_par)
+ call GetValue(section,'scale_factor',scale_factor)
+
+ ! Parse the projection type
+ if (index(ptype,'LAEA')/=0 .or. index(ptype,'laea')/=0) then
+ ptval = GMAP_LAEA
+ else if (index(ptype,'AEA')/=0 .or. index(ptype,'aea')/=0) then
+ ptval = GMAP_AEA
+ else if (index(ptype,'LCC')/=0 .or. index(ptype,'lcc')/=0) then
+ ptval = GMAP_LCC
+ else if (index(ptype,'STERE')/=0 .or. index(ptype,'stere')/=0) then
+ ptval = GMAP_STERE
+ else
+ call write_log('Unrecognised type in [projection]', &
+ GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ ! Deal with presence or not of standard parallel(s)
+ if (associated(std_par)) then
+ stdp = .true.
+ select case (size(std_par))
+ case(1)
+ stdp1 = std_par(1) ; stdp2 = std_par(1)
+ case(2)
+ stdp1 = std_par(1) ; stdp2 = std_par(2)
+ case(0)
+ stdp=.false.
+ case default
+ call write_log('More than two Standard parallels given', &
+ GM_FATAL,__FILE__,__LINE__)
+ end select
+ else
+ stdp = .false.
+ end if
+
+ ! Deal with scale factor
+ if (scale_factor /= 0.d0) then
+ scfac = .true.
+ else
+ scfac = .false.
+ end if
+
+ else
+ call GetSection(config,section,'GLINT projection')
+ if(.not.associated(section)) return
+ call write_log('Using [GLINT projection] config section',GM_WARNING)
+ call write_log('This config option has been deprecated, and will be removed at some point.',GM_WARNING)
+ call write_log('Use [projection] instead',GM_WARNING)
+ call GetValue(section,'projection',ptold)
+ call GetValue(section,'lonc',lonc)
+ call GetValue(section,'latc',latc)
+ call GetValue(section,'cpx',cpx)
+ call GetValue(section,'cpy',cpy)
+ call GetValue(section,'std_parallel',stdp1)
+ select case(ptold)
+ case(1)
+ ptval = GMAP_LAEA
+ case(2:4)
+ ptval = GMAP_STERE
+ case default
+ call write_log('Unsupported projection in [GLINT projection] config section',GM_FATAL)
+ end select
+ efalse = dx*(cpx-1)
+ nfalse = dy*(cpy-1)
+ if (stdp1 /= 0.d0) then
+ stdp2 = stdp1
+ stdp = .true.
+ else
+ stdp = .false.
+ end if
+ scfac=.false.
+ end if
+
+
+ ! Check for conflict
+
+ if (stdp.and.scfac) then
+ call write_log('You cannot specify both a standard parallel and a scale factor.', &
+ GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ ! Initialise the projection
+
+ if (stdp) then
+ call glimmap_proj_define(proj,ptval, &
+ lonc,latc,efalse,nfalse, &
+ standard_parallel = stdp1, &
+ standard_parallel_2 = stdp2)
+ else if (scfac) then
+ call glimmap_proj_define(proj,ptval, &
+ lonc,latc,efalse,nfalse, &
+ scale_factor_at_proj_origin = scale_factor)
+ else
+ call glimmap_proj_define(proj,ptval, &
+ lonc,latc,efalse,nfalse)
+ end if
+
+ end subroutine glimmap_readconfig
+
+ !-------------------------------------------------------------------------
+
+ !> print projection info to log
+ subroutine glimmap_printproj(proj)
+ use glimmer_log
+ use glimmer_global, only : msg_length
+
+ type(glimmap_proj),intent(in) :: proj !< the projection
+
+ character(len=msg_length) :: message
+
+ call write_log('Projection')
+ call write_log('----------')
+ if (.not.proj%found) then
+ call write_log('No projection found')
+ return
+ end if
+
+ if (associated(proj%laea)) then
+
+ call write_log('Type: Lambert Azimuthal Equal Area')
+ write(message,*)'Longitude of central meridian: ',proj%laea%longitude_of_central_meridian
+ call write_log(message)
+ write(message,*)'Latitude of projection origin: ',proj%laea%latitude_of_projection_origin
+ call write_log(message)
+ write(message,*)'False easting: ',proj%laea%false_easting
+ call write_log(message)
+ write(message,*)'False northing: ',proj%laea%false_northing
+ call write_log(message)
+
+ else if (associated(proj%aea)) then
+
+ call write_log('Type: Albers Equal Area Conic')
+ write(message,*)'Longitude of central meridian: ',proj%aea%longitude_of_central_meridian
+ call write_log(message)
+ write(message,*)'Latitude of projection origin: ',proj%aea%latitude_of_projection_origin
+ call write_log(message)
+ write(message,*)'False easting: ',proj%aea%false_easting
+ call write_log(message)
+ write(message,*)'False northing: ',proj%aea%false_northing
+ call write_log(message)
+ write(message,*)'Standard parallels: ', &
+ proj%aea%standard_parallel(1),proj%aea%standard_parallel(2)
+ call write_log(message)
+
+ else if (associated(proj%lcc)) then
+
+ call write_log('Type: Lambert Conformal Conic')
+ write(message,*)'Longitude of central meridian: ',proj%lcc%longitude_of_central_meridian
+ call write_log(message)
+ write(message,*)'Latitude of projection origin: ',proj%lcc%latitude_of_projection_origin
+ call write_log(message)
+ write(message,*)'False easting: ',proj%lcc%false_easting
+ call write_log(message)
+ write(message,*)'False northing: ',proj%lcc%false_northing
+ call write_log(message)
+ write(message,*)'Standard parallels: ', &
+ proj%lcc%standard_parallel(1),proj%lcc%standard_parallel(2)
+ call write_log(message)
+
+ else if (associated(proj%stere)) then
+
+ call write_log('Type: Stereographic')
+ write(message,*)'Longitude of central meridian: ',proj%stere%longitude_of_central_meridian
+ call write_log(message)
+ write(message,*)'Latitude of projection origin: ',proj%stere%latitude_of_projection_origin
+ call write_log(message)
+ write(message,*)'False easting: ',proj%stere%false_easting
+ call write_log(message)
+ write(message,*)'False northing: ',proj%stere%false_northing
+ call write_log(message)
+ write(message,*)'Standard parallel: ',proj%stere%standard_parallel
+ call write_log(message)
+ write(message,*)'Scale factor: ',proj%stere%scale_factor_at_proj_origin
+
+ end if
+
+ end subroutine glimmap_printproj
+
+ !-------------------------------------------------------------------------
+
+ !> Defines a projection from scratch, and initialises
+ !! the other elements appropriately.
+ subroutine glimmap_proj_define(cfp,ptype, &
+ longitude_of_central_meridian, &
+ latitude_of_projection_origin, &
+ false_easting, &
+ false_northing, &
+ scale_factor_at_proj_origin, &
+ standard_parallel, &
+ standard_parallel_2)
+
+ use glimmer_log
+
+ type(glimmap_proj),intent(inout) :: cfp !< the projection data type
+ integer,intent(in) :: ptype !< the projection ID
+ real(dp),intent(in) :: longitude_of_central_meridian !< the longitude of the central meridian
+ real(dp),intent(in) :: latitude_of_projection_origin !< the latitude of the projection origin
+ real(dp),intent(in) :: false_easting !< false easting
+ real(dp),intent(in) :: false_northing !< false northing
+ real(dp),optional,intent(in) :: scale_factor_at_proj_origin !< scale factor
+ real(dp),optional,intent(in) :: standard_parallel !< standard parallel 1
+ real(dp),optional,intent(in) :: standard_parallel_2 !< standard parallel 2
+
+
+ if (associated(cfp%laea)) deallocate(cfp%laea)
+ if (associated(cfp%aea)) deallocate(cfp%aea)
+ if (associated(cfp%lcc)) deallocate(cfp%lcc)
+ if (associated(cfp%stere)) deallocate(cfp%stere)
+
+ cfp%found = .true.
+ select case(ptype)
+ case(GMAP_LAEA)
+ allocate(cfp%laea)
+ cfp%laea%longitude_of_central_meridian = longitude_of_central_meridian
+ cfp%laea%latitude_of_projection_origin = latitude_of_projection_origin
+ cfp%laea%false_easting = false_easting
+ cfp%laea%false_northing = false_northing
+ call glimmap_laea_init(cfp%laea)
+ case(GMAP_AEA)
+ allocate(cfp%aea)
+ cfp%aea%longitude_of_central_meridian = longitude_of_central_meridian
+ cfp%aea%latitude_of_projection_origin = latitude_of_projection_origin
+ cfp%aea%false_easting = false_easting
+ cfp%aea%false_northing = false_northing
+ if (present(standard_parallel).and.present(standard_parallel_2)) then
+ cfp%aea%standard_parallel = (/ standard_parallel,standard_parallel_2 /)
+ else if (present(standard_parallel).and..not.present(standard_parallel_2)) then
+ cfp%aea%standard_parallel = (/ standard_parallel,standard_parallel /)
+ else
+ call write_log('Albers Equal Area: you must supply at least one standard parallel',&
+ GM_FATAL,__FILE__,__LINE__)
+ end if
+ call glimmap_aea_init(cfp%aea)
+ case(GMAP_LCC)
+ allocate(cfp%lcc)
+ cfp%lcc%longitude_of_central_meridian = longitude_of_central_meridian
+ cfp%lcc%latitude_of_projection_origin = latitude_of_projection_origin
+ cfp%lcc%false_easting = false_easting
+ cfp%lcc%false_northing = false_northing
+ if (present(standard_parallel).and.present(standard_parallel_2)) then
+ cfp%lcc%standard_parallel = (/ standard_parallel,standard_parallel_2 /)
+ else if (present(standard_parallel).and..not.present(standard_parallel_2)) then
+ cfp%lcc%standard_parallel = (/ standard_parallel,standard_parallel /)
+ else
+ call write_log('Lambert Conformal Conic: you must supply at least one standard parallel',&
+ GM_FATAL,__FILE__,__LINE__)
+ end if
+ call glimmap_lcc_init(cfp%lcc)
+ case(GMAP_STERE)
+ allocate(cfp%stere)
+ cfp%stere%longitude_of_central_meridian = longitude_of_central_meridian
+ cfp%stere%latitude_of_projection_origin = latitude_of_projection_origin
+ cfp%stere%false_easting = false_easting
+ cfp%stere%false_northing = false_northing
+ if(present(scale_factor_at_proj_origin) .and. present(standard_parallel)) then
+ if (scale_factor_at_proj_origin/=0.d0 .and. standard_parallel/=0.d0) &
+ call write_log('Both standard parallel and scale factor specified', &
+ GM_FATAL,__FILE__,__LINE__)
+ end if
+ if(present(scale_factor_at_proj_origin)) &
+ cfp%stere%scale_factor_at_proj_origin = scale_factor_at_proj_origin
+ if(present(standard_parallel)) &
+ cfp%stere%standard_parallel = standard_parallel
+ call glimmap_stere_init(cfp%stere)
+ case default
+ call write_log('Unrecognised projection type', &
+ GM_FATAL,__FILE__,__LINE__)
+ end select
+
+ end subroutine glimmap_proj_define
+
+ !> initialise Lambert azimuthal equal area projection
+ subroutine glimmap_laea_init(params)
+
+ type(proj_laea),intent(inout) :: params
+
+ params%sinp=sin(params%latitude_of_projection_origin*D2R)
+ params%cosp=cos(params%latitude_of_projection_origin*D2R)
+
+ ! Check whether polar
+
+ if (abs(params%latitude_of_projection_origin-90.d0) initialise Lambert azimuthal equal area projection
+ subroutine glimmap_aea_init(params)
+
+ type(proj_aea),intent(inout) :: params
+
+ params%n = 0.5d0*(sin(params%standard_parallel(1)*D2R) &
+ + sin(params%standard_parallel(2)*D2R))
+ params%i_n = 1.d0/params%n
+ params%c = cos(params%standard_parallel(1)*D2R)**2.d0 &
+ + 2.d0*params%n*sin(params%standard_parallel(1)*D2R)
+ params%rho0_R = params%i_n * sqrt(params%c - &
+ 2.d0*params%n*sin(params%latitude_of_projection_origin*D2R))
+ params%rho0 = params%rho0_R * EQ_RAD
+
+ end subroutine glimmap_aea_init
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> initialise Lambert conformal conic projection
+ subroutine glimmap_lcc_init(params)
+
+ type(proj_lcc),intent(inout) :: params
+
+ if (abs(params%standard_parallel(1)-params%standard_parallel(2)) initialise stereographic projection
+ subroutine glimmap_stere_init(params)
+
+ use glimmer_log
+
+ type(proj_stere),intent(inout) :: params
+
+ ! Determine polar/equatorial, etc.
+
+ if (abs(params%latitude_of_projection_origin-90.d0) < CONV_LIMIT) then
+ params%pole = 1
+ else if (abs(params%latitude_of_projection_origin+90.d0) < CONV_LIMIT) then
+ params%pole = -1
+ else
+ params%pole = 0
+ if (abs(params%latitude_of_projection_origin) < CONV_LIMIT) then
+ params%equatorial = .true.
+ else
+ params%equatorial = .false.
+ end if
+ end if
+
+ ! Set up constants accordingly
+
+ if (params%pole==1 .or. params%pole==-1) then
+ if (params%standard_parallel /= 0.d0) then
+ if (params%pole==1) params%k0 = EQ_RAD * (1.d0 + sin(D2R*params%standard_parallel))/2.d0
+ if (params%pole==-1) params%k0 = EQ_RAD * (1.d0 - sin(D2R*params%standard_parallel))/2.d0
+ else if (params%scale_factor_at_proj_origin /= 0.d0) then
+ params%k0 = EQ_RAD * params%scale_factor_at_proj_origin
+ else
+ params%k0 = EQ_RAD
+ end if
+ else
+ if (params%scale_factor_at_proj_origin /= 0.d0) then
+ params%k0 = EQ_RAD * params%scale_factor_at_proj_origin
+ else
+ params%k0 = EQ_RAD
+ end if
+ if (params%standard_parallel /= 0.d0) &
+ call write_log('Stereographic projection not polar: ignoring standard parallel',GM_WARNING)
+ params%sinp = sin(D2R * params%latitude_of_projection_origin)
+ params%cosp = cos(D2R * params%latitude_of_projection_origin)
+ end if
+
+ params%ik0 = 1.d0/params%k0
+
+ end subroutine glimmap_stere_init
+
+end module glimmer_map_init
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_map_proj4.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_map_proj4.F90
new file mode 100644
index 0000000000..5cfa654447
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_map_proj4.F90
@@ -0,0 +1,151 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_map_proj4.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> Generates proj4 strings from projection data type.
+!! Not used in GLIMMER at present.
+module glimmer_map_proj4
+
+ use glimmer_map_types
+
+ implicit none
+
+ private
+ public :: glimmap_proj4
+
+ integer, parameter :: proj4len=100
+
+contains
+
+ !> Returns a proj4 parameter string for a given set of projection parameters
+ !! \return Pointer to array of projection parameter strings
+ function glimmap_proj4(proj)
+
+ use glimmer_log
+
+ implicit none
+ character(len=proj4len), dimension(:), pointer :: glimmap_proj4
+ type(glimmap_proj) :: proj !> Projection of interest
+
+ if (.not.glimmap_allocated(proj)) then
+ call write_log('No known projection found!',GM_WARNING)
+ return
+ end if
+
+ if (associated(proj%laea)) then
+ glimmap_proj4 => glimmap_proj4_laea(proj%laea)
+ return
+ else if (associated(proj%aea)) then
+ glimmap_proj4 => glimmap_proj4_aea(proj%aea)
+ return
+ else if (associated(proj%lcc)) then
+ glimmap_proj4 => glimmap_proj4_lcc(proj%lcc)
+ return
+ else if (associated(proj%stere)) then
+ glimmap_proj4 => glimmap_proj4_stere(proj%stere)
+ return
+ else
+ call write_log('No known projection found!',GM_WARNING)
+ end if
+ end function glimmap_proj4
+
+ !------------------------------------------------------------------
+ ! private converters to proj4 strings
+ !------------------------------------------------------------------
+
+ !> Returns a proj4 parameter string for a stereographic projection
+ function glimmap_proj4_stere(stere)
+ implicit none
+ character(len=proj4len), dimension(:), pointer :: glimmap_proj4_stere
+ type(proj_stere) :: stere
+
+ allocate(glimmap_proj4_stere(6))
+ write(glimmap_proj4_stere(1),*) 'proj=stere'
+ write(glimmap_proj4_stere(2),*) 'lon_0=',stere%longitude_of_central_meridian
+ write(glimmap_proj4_stere(3),*) 'lat_0=',stere%latitude_of_projection_origin
+ if (stere%pole/=0) then
+ if (stere%standard_parallel /= 0) then
+ write(glimmap_proj4_stere(4),*) 'lat_ts=',stere%standard_parallel
+ else
+ write(glimmap_proj4_stere(4),*) 'k_0=',stere%scale_factor_at_proj_origin
+ end if
+ else
+ write(glimmap_proj4_stere(4),*) 'k_0=',stere%scale_factor_at_proj_origin
+ end if
+ write(glimmap_proj4_stere(5),*) 'x_0=',stere%false_easting
+ write(glimmap_proj4_stere(6),*) 'y_0=',stere%false_northing
+ end function glimmap_proj4_stere
+
+ !> Returns a proj4 parameter string for a Lambert azimuthal equal area projection
+ function glimmap_proj4_laea(laea)
+ implicit none
+ character(len=proj4len), dimension(:), pointer :: glimmap_proj4_laea
+ type(proj_laea) :: laea
+
+ allocate(glimmap_proj4_laea(5))
+ write(glimmap_proj4_laea(1),*) 'proj=laea'
+ write(glimmap_proj4_laea(2),*) 'lon_0=',laea%longitude_of_central_meridian
+ write(glimmap_proj4_laea(3),*) 'lat_0=',laea%latitude_of_projection_origin
+ write(glimmap_proj4_laea(4),*) 'x_0=',laea%false_easting
+ write(glimmap_proj4_laea(5),*) 'y_0=',laea%false_northing
+ end function glimmap_proj4_laea
+
+ !> Returns a proj4 parameter string for a Lambert azimuthal equal area projection
+ function glimmap_proj4_aea(aea)
+ implicit none
+ character(len=proj4len), dimension(:), pointer :: glimmap_proj4_aea
+ type(proj_aea) :: aea
+
+ allocate(glimmap_proj4_aea(7))
+ write(glimmap_proj4_aea(1),*) 'proj=aea'
+ write(glimmap_proj4_aea(2),*) 'lon_0=',aea%longitude_of_central_meridian
+ write(glimmap_proj4_aea(3),*) 'lat_0=',aea%latitude_of_projection_origin
+ write(glimmap_proj4_aea(4),*) 'lat_1=',aea%standard_parallel(1)
+ write(glimmap_proj4_aea(5),*) 'lat_2=',aea%standard_parallel(2)
+ write(glimmap_proj4_aea(6),*) 'x_0=',aea%false_easting
+ write(glimmap_proj4_aea(7),*) 'y_0=',aea%false_northing
+ end function glimmap_proj4_aea
+
+ !> Returns a proj4 parameter string for a Lambert conformal conic projection
+ function glimmap_proj4_lcc(lcc)
+ implicit none
+ character(len=proj4len), dimension(:), pointer :: glimmap_proj4_lcc
+ type(proj_lcc) :: lcc
+
+ allocate(glimmap_proj4_lcc(7))
+ write(glimmap_proj4_lcc(1),*) 'proj=lcc'
+ write(glimmap_proj4_lcc(2),*) 'lon_0=',lcc%longitude_of_central_meridian
+ write(glimmap_proj4_lcc(3),*) 'lat_0=',lcc%latitude_of_projection_origin
+ write(glimmap_proj4_lcc(4),*) 'lat_1=',lcc%standard_parallel(1)
+ write(glimmap_proj4_lcc(5),*) 'lat_2=',lcc%standard_parallel(2)
+ write(glimmap_proj4_lcc(6),*) 'x_0=',lcc%false_easting
+ write(glimmap_proj4_lcc(7),*) 'y_0=',lcc%false_northing
+ end function glimmap_proj4_lcc
+
+end module glimmer_map_proj4
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_map_trans.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_map_trans.F90
new file mode 100644
index 0000000000..fca131efbf
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_map_trans.F90
@@ -0,0 +1,566 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_map_trans.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> convert between projections
+module glimmer_map_trans
+
+ use glimmer_map_types
+ use glimmer_global, only: dp
+ implicit none
+
+ private
+ public :: glimmap_ll_to_xy, glimmap_xy_to_ll, loncorrect
+
+
+contains
+
+ !> Convert lat-long coordinates to grid coordinates.
+ !!
+ !! The subroutine returns the x-y coordinates as real values,
+ !! non-integer values indicating a position between grid-points.
+ subroutine glimmap_ll_to_xy(lon,lat,x,y,proj,grid)
+
+ use glimmer_log
+ use glimmer_coordinates
+
+ implicit none
+
+ real(dp),intent(in) :: lon !< The location of the point in lat-lon space (Longitude)
+ real(dp),intent(in) :: lat !< The location of the point in lat-lon space (Latitude)
+ real(dp),intent(out) :: x !< The location of the point in x-y space (x coordinate)
+ real(dp),intent(out) :: y !< The location of the point in x-y space (y coordinate)
+ type(glimmap_proj), intent(in) :: proj !< The projection being used
+ type(coordsystem_type),intent(in) :: grid !< the grid definition
+
+ real(dp) :: xx,yy ! These are real-space distances in meters
+
+ if (associated(proj%laea)) then
+ call glimmap_laea(lon,lat,xx,yy,proj%laea)
+ else if (associated(proj%aea)) then
+ call glimmap_aea(lon,lat,xx,yy,proj%aea)
+ else if (associated(proj%lcc)) then
+ call glimmap_lcc(lon,lat,xx,yy,proj%lcc)
+ else if (associated(proj%stere)) then
+ call glimmap_stere(lon,lat,xx,yy,proj%stere)
+ else
+ call write_log('No known projection found!',GM_WARNING)
+ end if
+
+ ! Now convert the real-space distances to grid-points using the grid type
+
+ call space2grid(xx,yy,x,y,grid)
+
+ end subroutine glimmap_ll_to_xy
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Convert grid coordinates to lat-lon coordinates.
+ !!
+ !! The subroutine returns the lat-lon coordinates as real values,
+ !! non-integer values indicating a position between grid-points.
+ subroutine glimmap_xy_to_ll(lon,lat,x,y,proj,grid)
+
+
+ use glimmer_log
+ use glimmer_coordinates
+
+ implicit none
+
+ real(dp),intent(out) :: lon !< The location of the point in lat-lon space (Longitude)
+ real(dp),intent(out) :: lat !< The location of the point in lat-lon space (Latitude)
+ real(dp),intent(in) :: x !< The location of the point in x-y space (x coordinate)
+ real(dp),intent(in) :: y !< The location of the point in x-y space (y coordinate)
+ type(glimmap_proj), intent(in) :: proj !< The projection being used
+ type(coordsystem_type),intent(in) :: grid !< the grid definition
+
+ real(dp) :: xx,yy ! These are real-space distances in meters
+
+ ! First convert grid-point space to real space
+
+ call grid2space(xx,yy,x,y,grid)
+
+ if (associated(proj%laea)) then
+ call glimmap_ilaea(lon,lat,xx,yy,proj%laea)
+ else if (associated(proj%aea)) then
+ call glimmap_iaea(lon,lat,xx,yy,proj%aea)
+ else if (associated(proj%lcc)) then
+ call glimmap_ilcc(lon,lat,xx,yy,proj%lcc)
+ else if (associated(proj%stere)) then
+ call glimmap_istere(lon,lat,xx,yy,proj%stere)
+ else
+ call write_log('No known projection found!',GM_WARNING)
+ end if
+
+ lon=loncorrect(lon,0.d0)
+
+ end subroutine glimmap_xy_to_ll
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ ! PRIVATE subroutines follow
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ ! Lambert azimuthal equal area projection
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Forward transformation: lat-lon -> x-y of Lambert azimuthal equal area projection
+ subroutine glimmap_laea(lon,lat,x,y,params)
+
+
+ use glimmer_log
+
+ real(dp),intent(in) :: lon !< longitude
+ real(dp),intent(in) :: lat !< latitude
+ real(dp),intent(out) :: x !< x
+ real(dp),intent(out) :: y !< y
+ type(proj_laea),intent(in) :: params !< projection parameters
+
+ real(dp) :: sin_lat,cos_lat,sin_lon,cos_lon,c,dlon,dlat,tmp,k
+ character(80) :: errtxt
+
+ dlon = lon-params%longitude_of_central_meridian
+
+ ! Check domain of longitude
+
+ dlon = loncorrect(dlon,-180.d0)
+
+ ! Convert to radians and calculate sine and cos
+
+ dlon = dlon*D2R ; dlat = lat*D2R
+
+ call sincos(dlon,sin_lon,cos_lon);
+ call sincos(dlat,sin_lat,cos_lat);
+ c = cos_lat * cos_lon
+
+ ! Mapping transformation
+
+ tmp = 1.d0 + params%sinp * sin_lat + params%cosp * c
+
+ if (tmp > 0.d0) then
+ k = EQ_RAD * sqrt (2.d0 / tmp)
+ x = k * cos_lat * sin_lon
+ y = k * (params%cosp * sin_lat - params%sinp * c)
+ else
+ write(errtxt,*)'LAEA projection error:',lon,lat,params%latitude_of_projection_origin
+ call write_log(trim(errtxt),GM_FATAL,__FILE__,__LINE__)
+ endif
+
+ ! Apply false eastings and northings
+
+ x = x + params%false_easting
+ y = y + params%false_northing
+
+ end subroutine glimmap_laea
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Inverse transformation: lat-lon -> x-y of Lambert azimuthal equal area projection
+ subroutine glimmap_ilaea(lon,lat,x,y,params)
+
+ use glimmer_log
+
+ real(dp),intent(out) :: lon !< longitude
+ real(dp),intent(out) :: lat !< latitude
+ real(dp),intent(in) :: x !< x
+ real(dp),intent(in) :: y !< y
+ type(proj_laea),intent(in) :: params !< projection parameters
+
+ real(dp) :: rho,c,sin_c,cos_c,xx,yy
+ character(80) :: errtxt
+
+ xx=x ; yy=y
+
+ ! Account for false eastings and northings
+
+ xx = xx - params%false_easting
+ yy = yy - params%false_northing
+
+ rho=hypot (xx,yy)
+
+ if (abs(rho) < CONV_LIMIT) then
+ ! If very near the centre of the map...
+ lat = params%latitude_of_projection_origin
+ lon = params%longitude_of_central_meridian
+ else
+ c = 2.d0 * asin(0.5d0 * rho * i_EQ_RAD)
+ call sincos (c, sin_c, cos_c)
+ lat = asin (cos_c * params%sinp + (yy * sin_c * params%cosp / rho)) * R2D
+ select case(params%pole)
+ case(1)
+ lon = params%longitude_of_central_meridian + R2D * atan2 (xx, -yy)
+ case(-1)
+ lon = params%longitude_of_central_meridian + R2D * atan2 (xx, yy)
+ case(0)
+ lon = params%longitude_of_central_meridian + &
+ R2D * atan2 (xx * sin_c, (rho * params%cosp * cos_c - yy * params%sinp * sin_c))
+ case default
+ write(errtxt,*)'Inverse LAEA projection error:',params%pole
+ call write_log(trim(errtxt),GM_FATAL,__FILE__,__LINE__)
+ end select
+ endif
+
+ end subroutine glimmap_ilaea
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ ! Albers equal area conic projection
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Forward transformation: lat-lon -> x-y of Albers equal area conic projection
+ subroutine glimmap_aea(lon,lat,x,y,params)
+
+ real(dp),intent(in) :: lon !< longitude
+ real(dp),intent(in) :: lat !< latitude
+ real(dp),intent(out) :: x !< x
+ real(dp),intent(out) :: y !< y
+ type(proj_aea),intent(in) :: params !< projection parameters
+
+ real(dp) :: dlon,theta,sint,cost,rho
+
+ dlon = lon-params%longitude_of_central_meridian
+
+ ! Check domain of longitude
+
+ dlon = loncorrect(dlon,-180.d0)
+ theta = params%n * dlon * D2R
+ call sincos(theta,sint,cost)
+
+ rho = params%i_n*sqrt(params%c - 2.0*params%n*sin(lat*D2R))
+
+ x = EQ_RAD * rho * sint
+ y = EQ_RAD * (params%rho0_R - rho * cost)
+
+ ! Apply false eastings and northings
+
+ x = x + params%false_easting
+ y = y + params%false_northing
+
+ end subroutine glimmap_aea
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Inverse transformation: lat-lon -> x-y of Albers equal area conic projection
+ subroutine glimmap_iaea(lon,lat,x,y,params)
+
+ real(dp),intent(out) :: lon !< longitude
+ real(dp),intent(out) :: lat !< latitude
+ real(dp),intent(in) :: x !< x
+ real(dp),intent(in) :: y !< y
+ type(proj_aea),intent(in) :: params !< projection parameters
+
+ real(dp) :: xx,yy,rho,theta
+
+ xx=x ; yy=y
+
+ ! Account for false eastings and northings
+
+ xx = xx - params%false_easting
+ yy = yy - params%false_northing
+
+ rho = sqrt(xx**2.d0 + (params%rho0 - yy)**2.d0)
+ if (params%n > 0.d0) then
+ theta = atan2(xx,(params%rho0-yy))
+ else
+ theta = atan2(-xx,(yy-params%rho0))
+ end if
+
+ lat = asin((params%c-(rho*params%n/EQ_RAD)**2.d0)*0.5d0*params%i_n)*R2D
+ lon = params%longitude_of_central_meridian+R2D*theta*params%i_n
+
+ end subroutine glimmap_iaea
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ ! Lambert conformal conic projection
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Forward transformation: lat-lon -> x-y of Lambert conformal conic projection
+ subroutine glimmap_lcc(lon,lat,x,y,params)
+
+ real(dp),intent(in) :: lon !< longitude
+ real(dp),intent(in) :: lat !< latitude
+ real(dp),intent(out) :: x !< x
+ real(dp),intent(out) :: y !< y
+ type(proj_lcc),intent(in) :: params !< projection parameters
+
+ real(dp) :: dlon,rho,theta,sint,cost
+
+ dlon = lon-params%longitude_of_central_meridian
+
+ ! Check domain of longitude
+
+ dlon = loncorrect(dlon,-180.d0)
+ rho = EQ_RAD * params%f/(tan(M_PI_4+lat*D2R/2.d0))**params%n
+ theta = params%n*dlon*D2R
+ call sincos(theta,sint,cost)
+
+ x = rho * sint
+ y = params%rho0 - rho * cost
+
+ ! Apply false eastings and northings
+
+ x = x + params%false_easting
+ y = y + params%false_northing
+
+ end subroutine glimmap_lcc
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Inverse transformation: lat-lon -> x-y of Lambert conformal conic projection
+ subroutine glimmap_ilcc(lon,lat,x,y,params)
+
+ real(dp),intent(out) :: lon !< longitude
+ real(dp),intent(out) :: lat !< latitude
+ real(dp),intent(in) :: x !< x
+ real(dp),intent(in) :: y !< y
+ type(proj_lcc),intent(in) :: params !< projection parameters
+
+ real(dp) :: xx,yy,rho,theta
+
+ xx=x ; yy=y
+
+ ! Account for false eastings and northings
+
+ xx = xx - params%false_easting
+ yy = yy - params%false_northing
+
+ rho = sign(sqrt(xx**2.d0 + (params%rho0-yy)**2.d0),params%n)
+ if (params%n > 0.d0) then
+ theta = atan2(xx,(params%rho0-yy))
+ else
+ theta = atan2(-xx,(yy-params%rho0))
+ end if
+
+ if (abs(rho) < CONV_LIMIT) then
+ lat = sign(real(90.d0,kind=dp),params%n)
+ else
+ lat = R2D * (2.d0 * atan((EQ_RAD*params%f/rho)**params%i_n) - M_PI_2)
+ end if
+
+ lon = params%longitude_of_central_meridian+R2D*theta*params%i_n
+
+ end subroutine glimmap_ilcc
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ ! Stereographic projection
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Forward transformation: lat-lon -> x-y of Stereographic projection
+ subroutine glimmap_stere(lon,lat,x,y,params)
+
+ use glimmer_log
+
+ real(dp),intent(in) :: lon !< longitude
+ real(dp),intent(in) :: lat !< latitude
+ real(dp),intent(out) :: x !< x
+ real(dp),intent(out) :: y !< y
+ type(proj_stere),intent(in) :: params !< projection parameters
+
+ real(dp) :: dlon,k,dlat,slat,clat,slon,clon
+ character(80) :: errtxt
+
+ dlon = lon-params%longitude_of_central_meridian
+
+ ! Check domain of longitude
+
+ dlon = loncorrect(dlon,-180.d0)
+ dlon = dlon * D2R
+ dlat = lat * D2R
+ call sincos(dlon,slon,clon)
+
+ select case(params%pole)
+ case(1) ! North pole
+ x = 2.d0 * params%k0 * tan(M_PI_4 - dlat/2.d0)*slon
+ y = -2.d0 * params%k0 * tan(M_PI_4 - dlat/2.d0)*clon
+ case(-1) ! South pole
+ x = 2.d0 * params%k0 * tan(M_PI_4 + dlat/2.d0)*slon
+ y = 2.d0 * params%k0 * tan(M_PI_4 + dlat/2.d0)*clon
+ case(0) ! Oblique
+ call sincos(dlat,slat,clat)
+ if (params%equatorial) then
+ k = 2.d0 * params%k0 / (1.d0 + clat*clon)
+ y = k * slat
+ else
+ k = 2.d0 * params%k0 / (1.d0 + params%sinp*slat + params%cosp*clat*clon)
+ y = k * (params%cosp*slat - params%sinp*clat*clon)
+ end if
+ x = k * clat * slon
+ case default
+ write(errtxt,*)'Stereographic projection error:',params%pole
+ call write_log(trim(errtxt),GM_FATAL,__FILE__,__LINE__)
+ end select
+
+ ! Apply false eastings and northings
+
+ x = x + params%false_easting
+ y = y + params%false_northing
+
+ end subroutine glimmap_stere
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Inverse transformation: lat-lon -> x-y of Stereographic projection
+ subroutine glimmap_istere(lon,lat,x,y,params)
+
+ real(dp),intent(out) :: lon !< longitude
+ real(dp),intent(out) :: lat !< latitude
+ real(dp),intent(in) :: x !< x
+ real(dp),intent(in) :: y !< y
+ type(proj_stere),intent(in) :: params !< projection parameters
+
+ real(dp) :: xx,yy,rho,c,sinc,cosc
+
+ xx=x ; yy=y
+
+ ! Account for false eastings and northings
+
+ xx = xx - params%false_easting
+ yy = yy - params%false_northing
+
+ rho = hypot(xx,yy)
+
+ if (abs(rho) transform from grid to space
+ subroutine grid2space(x,y,gx,gy,coordsys)
+
+ use glimmer_coordinates
+
+ implicit none
+
+ real(dp),intent(out) :: x !< x-location in real space
+ real(dp),intent(out) :: y !< y-location in real space
+ real(dp),intent(in) :: gx !< x-location in grid space
+ real(dp),intent(in) :: gy !< y-location in grid space
+ type(coordsystem_type), intent(in) :: coordsys !< coordinate system
+
+ x=coordsys%origin%pt(1) + real(gx - 1)*coordsys%delta%pt(1)
+ y=coordsys%origin%pt(2) + real(gy - 1)*coordsys%delta%pt(2)
+
+ end subroutine grid2space
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> convert from space to grid
+ subroutine space2grid(x,y,gx,gy,coordsys)
+
+ use glimmer_coordinates
+
+ implicit none
+
+ real(dp),intent(in) :: x !< x-location in real space
+ real(dp),intent(in) :: y !< y-location in real space
+ real(dp),intent(out) :: gx !< x-location in grid space
+ real(dp),intent(out) :: gy !< y-location in grid space
+ type(coordsystem_type), intent(in) :: coordsys !< coordinate system
+
+ gx = 1.d0 + (x - coordsys%origin%pt(1))/coordsys%delta%pt(1)
+ gy = 1.d0 + (y - coordsys%origin%pt(2))/coordsys%delta%pt(2)
+
+ end subroutine space2grid
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Calculates the sin and cos of an angle.
+ subroutine sincos(a,s,c)
+
+ implicit none
+
+ real(dp),intent(in) :: a !< Input value (radians).
+ real(dp),intent(out) :: s !< sin(a)
+ real(dp),intent(out) :: c !< cos(a)
+
+ s = sin(a)
+ c = cos(a)
+
+ end subroutine sincos
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Normalises a value of longitude to the range starting at min degrees.
+ !! \return The normalised value of longitude.
+ real(dp) function loncorrect(lon,minimum)
+
+ real(dp),intent(in) :: lon !< The longitude under consideration (degrees east)
+ real(dp),intent(in) :: minimum !< The lower end of the output range (degrees east)
+
+ real(dp) :: maximum
+
+ loncorrect = lon
+ maximum = minimum + 360.d0
+
+ do while (loncorrect >= maximum)
+ loncorrect = loncorrect-360.d0
+ enddo
+
+ do while (loncorrect < minimum)
+ loncorrect = loncorrect+360.d0
+ enddo
+
+ end function loncorrect
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> compute \f$\sqrt{x^2+y^2}\f$
+ real(dp) function hypot(x,y)
+
+
+ implicit none
+
+ real(dp),intent(in) :: x !< One input value
+ real(dp),intent(in) :: y !< Another input value
+
+ hypot=sqrt(x*x+y*y)
+
+ end function hypot
+
+end module glimmer_map_trans
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_map_types.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_map_types.F90
new file mode 100644
index 0000000000..ce551470f2
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_map_types.F90
@@ -0,0 +1,193 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_map_types.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> This module contains derived types.
+!!
+module glimmer_map_types
+
+ use glimmer_global, only: dp
+ use glimmer_physcon, only: pi
+
+ implicit none
+
+ !> derived type holding all know map projections. This simulates inheritance
+ type glimmap_proj
+ logical :: found = .false.
+ type(proj_laea), pointer :: laea => NULL() !< Pointer to Lambert azimuthal equal area type
+ type(proj_aea), pointer :: aea => NULL() !< Pointer to Albers equal area conic type
+ type(proj_lcc), pointer :: lcc => NULL() !< Pointer to Lambert conic conformal type
+ type(proj_stere), pointer :: stere => NULL() !< Pointer to Stereographic type
+ end type glimmap_proj
+
+ !-------------------------------------------------------------
+
+ !> Lambert Azimuthal Equal Area
+ type proj_laea
+ real(dp) :: longitude_of_central_meridian !< longitude of central meridian
+ real(dp) :: latitude_of_projection_origin !< latitude of projection origin
+ real(dp) :: false_easting !< false easting
+ real(dp) :: false_northing !< false northing
+ real(dp) :: sinp !< Sine of latitude_of_projection_origin
+ real(dp) :: cosp !< Cosine of latitude_of_projection_origin
+ integer :: pole !< Set to 1 for N pole, -1 for S pole, 0 otherwise
+ end type proj_laea
+
+ !-------------------------------------------------------------
+
+ !> Albers Equal-Area Conic
+ type proj_aea
+ real(dp),dimension(2) :: standard_parallel !< two standard parallels
+ real(dp) :: longitude_of_central_meridian !< longitude of central meridian
+ real(dp) :: latitude_of_projection_origin !< latitude of projection origin
+ real(dp) :: false_easting !< false easting
+ real(dp) :: false_northing !< false northing
+ real(dp) :: rho0 !< Convenience constant
+ real(dp) :: rho0_R !< Convenience constant (is rho0/EQ_RAD)
+ real(dp) :: c !< Convenience constant
+ real(dp) :: n !< Convenience constant
+ real(dp) :: i_n !< Convenience constant (inverse of n)
+ end type proj_aea
+
+ !-------------------------------------------------------------
+
+ !> Lambert Conic Conformal
+ type proj_lcc
+ real(dp),dimension(2) :: standard_parallel !< two standard parallels
+ real(dp) :: longitude_of_central_meridian !< longitude of central meridian
+ real(dp) :: latitude_of_projection_origin !< latitude of projection origin
+ real(dp) :: false_easting !< false easting
+ real(dp) :: false_northing !< false northing
+ real(dp) :: rho0 !< Convenience constant
+ real(dp) :: f !< Convenience constant
+ real(dp) :: n !< Convenience constant
+ real(dp) :: i_n !< Convenience constant (inverse of n)
+ end type proj_lcc
+
+ !-------------------------------------------------------------
+
+ !> Stereographic projection derived type
+ type proj_stere
+ real(dp) :: longitude_of_central_meridian !< longitude of central meridian
+ real(dp) :: latitude_of_projection_origin !< latitude of projection origin
+ real(dp) :: scale_factor_at_proj_origin = 0.d0 !< scale factor at origin
+ real(dp) :: standard_parallel = 0.d0 !< a standard parallel
+ real(dp) :: false_easting !< false easting
+ real(dp) :: false_northing !< false northing
+ integer :: pole !< Set to 1 for N pole, -1 for S pole, 0 otherwise
+ logical :: equatorial !< Set true if equatorial aspect
+ real(dp) :: k0 !< scale factor or std par converted to scale factor
+ real(dp) :: ik0 !< inverse of k0
+ real(dp) :: sinp !< sin of latitude_of_projection_origin
+ real(dp) :: cosp !< cos of latitude_of_projection_origin
+ end type proj_stere
+
+ ! Global mapping parameters ----------------------------------
+
+! real(dp),parameter :: pi = 3.141592654 !< The value of $\pi$. ! defined in glimmer_physcon
+ real(dp),parameter :: M_PI_4 = pi/4.d0 !< The value of $\pi/4$.
+ real(dp),parameter :: M_PI_2 = pi/2.d0 !< The value of $\pi/2$.
+ real(dp),parameter :: D2R = pi/180.d0 !< Degrees-to-radians conversion factor.
+ real(dp),parameter :: R2D = 180.d0/pi !< Radians-to-degrees conversion factor.
+ real(dp),parameter :: EQ_RAD = 6.37d6 !< Radius of the earth (m)
+ real(dp),parameter :: i_EQ_RAD = 1.d0/EQ_RAD !< Inverse radius of the earth (m^-1)
+ real(dp),parameter :: CONV_LIMIT = 1.0d-8 !< Convergence limit (a small number).
+
+ integer, parameter :: GMAP_LAEA=1 !< ID for Lambert azimuthal equal area projection
+ integer, parameter :: GMAP_AEA=2 !< ID for Lambert azimuthal equal area projection
+ integer, parameter :: GMAP_LCC=3 !< ID for Lambert conformal conic projection
+ integer, parameter :: GMAP_STERE=4 !< ID for stereographic projection
+
+contains
+
+ !> return true if structure contains a known projection
+ function glimmap_allocated(proj)
+
+ implicit none
+ type(glimmap_proj) :: proj
+ logical glimmap_allocated
+
+ glimmap_allocated = proj%found
+ end function glimmap_allocated
+
+ !> This is incomplete diagnostics code to output full
+ !! content of projection type. Only does
+ !! Stereographic projections so far.
+ subroutine glimmap_diag(proj)
+
+ use glimmer_log
+
+ type(glimmap_proj) :: proj
+
+ if (associated(proj%stere)) then
+ call glimmap_diag_stere(proj%stere)
+ else
+ call write_log('Stereographic projection not found')
+ end if
+
+ end subroutine glimmap_diag
+
+ !> print out parameters of Stereographic projection
+ subroutine glimmap_diag_stere(params)
+
+ use glimmer_log
+ use glimmer_global, only : msg_length
+
+ type(proj_stere) :: params
+ character(len=msg_length) :: message
+
+ call write_log('***** Stereographic *****')
+ write(message,*)'longitude_of_central_meridian:', params%longitude_of_central_meridian
+ call write_log(message)
+ write(message,*)'latitude_of_projection_origin:', params%latitude_of_projection_origin
+ call write_log(message)
+ write(message,*)'scale_factor_at_proj_origin:', params%scale_factor_at_proj_origin
+ call write_log(message)
+ write(message,*)'standard_parallel:', params%standard_parallel
+ call write_log(message)
+ write(message,*)'false_easting:', params%false_easting
+ call write_log(message)
+ write(message,*)'false_northing:', params%false_northing
+ call write_log(message)
+ write(message,*)'pole:', params%pole
+ call write_log(message)
+ write(message,*)'equatorial:', params%equatorial
+ call write_log(message)
+ write(message,*)'k0:', params%k0
+ call write_log(message)
+ write(message,*)'ik0:', params%ik0
+ call write_log(message)
+ write(message,*)'sinp:', params%sinp
+ call write_log(message)
+ write(message,*)'cosp:', params%cosp
+ call write_log(message)
+
+ end subroutine glimmap_diag_stere
+
+end module glimmer_map_types
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_ncdf.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_ncdf.F90
new file mode 100644
index 0000000000..764db5127a
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_ncdf.F90
@@ -0,0 +1,409 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_ncdf.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#define NCO outfile%nc
+#define NCI infile%nc
+
+!> netCDF type definitions and functions for managing linked lists
+!!
+!! \author Magnus Hagdorn
+!! \date 2004
+module glimmer_ncdf
+
+ use glimmer_global, only: fname_length, dp
+ use netcdf
+
+ implicit none
+
+ integer, parameter :: glimmer_nc_meta_len = 100
+ !> maximum length for meta data
+
+ character(len=*), parameter :: glimmer_nc_mapvarname = 'mapping'
+ !> name of the grid mapping variable
+
+ real(dp), parameter :: glimmer_nc_max_time=1.d10
+ !> maximum time that can be written
+
+ !> Data structure holding netCDF file description
+ type glimmer_nc_stat
+ !> Data structure holding netCDF file description
+
+ logical :: define_mode = .TRUE.
+ !> set to .TRUE. when we are in define mode
+ logical :: just_processed = .FALSE.
+ !> set to .TRUE. if the file was used during the last time step
+ real(dp) :: processsed_time = 0.d0
+ !> the time when the file was last processed
+ character(len=fname_length) :: filename = " "
+ !> name of netCDF file
+ integer id
+ !> id of netCDF file
+
+ integer :: nlevel = 0
+ integer :: nstaglevel = 0
+ integer :: nstagwbndlevel = 0
+ !> size of vertical and stag vertical coordinate
+
+ integer timedim
+ !> id of time dimension
+ integer timevar
+ !> id of time variable
+
+ ! TODO - Create a variable for vars length so it can be made longer (Matt has this implemented in his subglacial hydrology branch)
+ ! Apply it here for vars, vars_copy and to restart_variable_list in glimmer_ncparams.F90
+
+ character(len=310) vars
+ !> string containing variables to be processed
+ logical :: restartfile = .false.
+ !> Set to true if we're writing a restart file
+ character(len=310) vars_copy
+ !> string containing variables to be processed (retained copy)
+ end type glimmer_nc_stat
+
+ type glimmer_nc_meta
+ !> Data structure holding netCDF meta data, see CF user guide
+
+ character(len=glimmer_nc_meta_len) :: title = ''
+ !> title of netCDF file
+ character(len=glimmer_nc_meta_len) :: institution = ''
+ !> where the data was produced
+ character(len=glimmer_nc_meta_len) :: references = ''
+ !> list of references
+ character(len=glimmer_nc_meta_len) :: source = ''
+ !> this string will hold the GLIMMER version
+ character(len=glimmer_nc_meta_len) :: history = ''
+ !> netCDF file history string
+ character(len=glimmer_nc_meta_len) :: comment = ''
+ !> some comments
+ character(len=10000) :: config = ''
+ !> the contents of the glide config file
+ end type glimmer_nc_meta
+
+ type glimmer_nc_output
+ !> element of linked list describing netCDF output file
+ !NO_RESTART previous
+
+ type(glimmer_nc_stat) :: nc !< structure containg file info
+ real(dp) :: freq = 1000.d0 !< frequency at which data is written to file
+ real(dp) :: next_write = 0.d0 !< next time step at which data is dumped
+ real(dp) :: end_write = glimmer_nc_max_time !< stop writing after this year
+ integer :: timecounter = 1 !< time counter
+ real(dp) :: total_time = 0.d0 !< accumulate time steps (used for taking time averages)
+
+ integer :: default_xtype = NF90_REAL !< the default external type for storing floating point values
+ logical :: do_averages = .false. !< set to .true. if we need to handle averages
+
+ type(glimmer_nc_meta) :: metadata
+ !> structure holding metadata
+
+ type(glimmer_nc_output), pointer :: next=>NULL()
+ !> next element in list
+ type(glimmer_nc_output), pointer :: previous=>NULL()
+ !> previous element in list
+ logical :: append = .false.
+ !> Set to true if we are appending onto an existing file.
+ end type glimmer_nc_output
+
+ type glimmer_nc_input
+ !> element of linked list describing netCDF input file
+ !NO_RESTART previous
+ type(glimmer_nc_stat) :: nc
+ !> structure containg file info
+ real(dp), pointer, dimension(:) :: times => NULL()
+ !> pointer to array holding times
+ integer :: nt, current_time=1
+ !>number of elements in times and current time index
+ integer :: get_time_slice = 1
+ !> -1 if all times should be loaded, > 0 to load particular slice and then close file
+
+ type(glimmer_nc_input), pointer :: next=>NULL()
+ !> next element in list
+ type(glimmer_nc_input), pointer :: previous=>NULL()
+ !> previous element in list
+ end type glimmer_nc_input
+
+
+ interface delete
+ module procedure delete_output, delete_input
+ end interface
+
+ interface add
+ module procedure add_output, add_input
+ end interface
+
+contains
+
+ function delete_output(oc, cf)
+ !> remove element from linked list
+ use glimmer_log
+ implicit none
+ type(glimmer_nc_output), pointer :: delete_output
+ type(glimmer_nc_output), pointer :: oc !< the output file to be removed
+ logical, intent(in), optional :: cf !< set to .True. if file should be closed
+ ! local variables
+ logical closefile
+ integer status
+
+ if (present(cf)) then
+ closefile = cf
+ else
+ closefile = .true.
+ end if
+
+ if (associated(oc)) then
+ if (associated(oc%previous)) then
+ oc%previous%next => oc%next
+ end if
+ if (associated(oc%next)) then
+ oc%next%previous => oc%previous
+ delete_output => oc%next
+ else
+ delete_output => NULL()
+ end if
+ if (closefile) then
+ status = nf90_close(oc%nc%id)
+ call write_log_div
+ call write_log('Closing output file '//trim(oc%nc%filename))
+ end if
+ deallocate(oc)
+ end if
+ end function delete_output
+
+ !> remove input file from linked list
+ !!
+ !! \return the next input file or NULL()
+ function delete_input(ic,cf)
+ !> remove element from linked list
+ use glimmer_log
+ implicit none
+ type(glimmer_nc_input), pointer :: delete_input
+ type(glimmer_nc_input), pointer :: ic !< the input file to be removed
+ logical, intent(in), optional :: cf !< set to .True. if file should be closed
+
+ ! local variables
+ logical closefile
+ integer status
+
+ if (present(cf)) then
+ closefile = cf
+ else
+ closefile = .true.
+ end if
+
+ if (associated(ic)) then
+ if (associated(ic%previous)) then
+ ic%previous%next => ic%next
+ end if
+ if (associated(ic%next)) then
+ ic%next%previous => ic%previous
+ delete_input => ic%next
+ else
+ delete_input => NULL()
+ end if
+ if (closefile) then
+ status = nf90_close(ic%nc%id)
+ call write_log_div
+ call write_log('Closing input file '//trim(ic%nc%filename))
+ end if
+ deallocate(ic%times)
+ deallocate(ic)
+ end if
+ end function delete_input
+
+ !> add a new output file
+ !!
+ !! \return pointer to added output file
+ function add_output(oc)
+ implicit none
+ type(glimmer_nc_output), pointer :: add_output
+ type(glimmer_nc_output), pointer :: oc !< the output file to be added
+
+ allocate(add_output)
+
+ if (associated(oc)) then
+ add_output%previous => oc
+ if (associated(oc%next)) then
+ add_output%next => oc%next
+ oc%next%previous => add_output
+ end if
+ oc%next => add_output
+ end if
+ end function add_output
+
+ !> add a new input file
+ !!
+ !! \return pointer to added input file
+ function add_input(ic)
+ implicit none
+ type(glimmer_nc_input), pointer :: add_input
+ type(glimmer_nc_input), pointer :: ic !< the input file to be added
+
+ allocate(add_input)
+
+ if (associated(ic)) then
+ add_input%previous => ic
+ if (associated(ic%next)) then
+ add_input%next => ic%next
+ ic%next%previous => add_input
+ end if
+ ic%next => add_input
+ end if
+ end function add_input
+
+ !> for debugging print all output files in linked list
+ recursive subroutine nc_print_output(output)
+
+ !> For debugging
+
+ type(glimmer_nc_output),pointer :: output
+
+ if (.not.associated(output)) then
+ Print*,'*** Output section not associated'
+ return
+ end if
+
+ call nc_print_stat(output%nc)
+ print*,'freq: ',output%freq
+ print*,'next_write: ',output%next_write
+ print*,'timecounter:',output%timecounter
+ ! call nc_print_meta(output%metadata)
+ if (associated(output%next)) call nc_print_output(output%next)
+
+ end subroutine nc_print_output
+
+ subroutine nc_print_stat(stat)
+
+ type(glimmer_nc_stat) :: stat
+
+ print*,'define_mode: ',stat%define_mode
+ print*,'just_processed: ',stat%just_processed
+ print*,'processsed_time:',stat%processsed_time
+ print*,'filename: ',stat%filename
+ print*,'id: ',stat%id
+ print*,'nlevel: ',stat%nlevel
+ print*,'nstaglevel: ',stat%nstaglevel
+ print*,'nstagwbndlevel: ',stat%nstagwbndlevel
+ print*,'timedim: ',stat%timedim
+ print*,'timevar: ',stat%timevar
+ print*,'vars: ',trim(stat%vars)
+
+ end subroutine nc_print_stat
+
+ !> Sets up previous points in the linked list correctly
+ !!
+ !! This is needed after a restart, as trying to save both
+ !! next and previous pointers would cause problems
+ !! Also resets some other internal components
+ subroutine nc_repair_outpoint(output)
+
+ implicit none
+
+ type(glimmer_nc_output),pointer :: output
+ type(glimmer_nc_output),pointer :: most_recent
+ type(glimmer_nc_output),pointer :: tmp
+
+ most_recent => null()
+ if (.not.associated(output)) return
+ tmp => output
+
+ do
+ if (associated(most_recent)) tmp%previous => most_recent
+ tmp%nc%vars=tmp%nc%vars_copy
+ if (.not.associated(tmp%next)) exit
+ most_recent => tmp
+ tmp => tmp%next
+ end do
+
+ end subroutine nc_repair_outpoint
+
+ subroutine nc_repair_inpoint(input)
+
+ implicit none
+
+ !> Sets up previous points in the linked list correctly
+ !> This is needed after a restart, as trying to save both
+ !> next and previous pointers would cause problems
+
+ type(glimmer_nc_input),pointer :: input
+ type(glimmer_nc_input),pointer :: most_recent
+ type(glimmer_nc_input),pointer :: tmp
+
+ most_recent => null()
+ if (.not.associated(input)) return
+ tmp => input
+
+ do
+ if (associated(most_recent)) tmp%previous => most_recent
+ if (.not.associated(tmp%next)) exit
+ most_recent => tmp
+ tmp => tmp%next
+ end do
+
+ end subroutine nc_repair_inpoint
+
+ subroutine nc_prefix_outfiles(output,prefix)
+
+ !> Adds a prefix to all the filenames stored in the linked list.
+ !> Used for restarts.
+
+ type(glimmer_nc_output),pointer :: output
+ character(*) :: prefix
+
+ type(glimmer_nc_output),pointer :: tmp
+
+ tmp => output
+ do
+ tmp%nc%filename=trim(prefix)//trim(tmp%nc%filename)
+ if (.not.associated(tmp%next)) exit
+ tmp => tmp%next
+ end do
+
+ end subroutine nc_prefix_outfiles
+
+ subroutine nc_errorhandle(file,line,status)
+ !> handle netCDF error
+ use netcdf
+ use glimmer_log
+ implicit none
+ character(len=*), intent(in) :: file
+ !> name of f90 file error occured in
+ integer, intent(in) :: line
+ !> line number error occured at
+ integer, intent(in) :: status
+ !> netCDF return value
+
+ if (status /= NF90_NOERR) then
+ call write_log(nf90_strerror(status),type=GM_FATAL,file=file,line=line)
+ end if
+ end subroutine nc_errorhandle
+
+end module glimmer_ncdf
+
+
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_ncio.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_ncio.F90
new file mode 100644
index 0000000000..f933369138
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_ncio.F90
@@ -0,0 +1,670 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_ncio.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#define NCO outfile%nc
+#define NCI infile%nc
+
+module glimmer_ncio
+ !> module for common netCDF I/O
+ !> written by Magnus Hagdorn, 2004
+
+ use glimmer_ncdf
+
+ implicit none
+
+ integer,parameter,private :: msglen=512
+
+contains
+ !*****************************************************************************
+ ! netCDF output
+ !*****************************************************************************
+ subroutine openall_out(model,outfiles)
+ !> open all netCDF files for output
+ use glide_types
+ use glimmer_ncdf
+ implicit none
+ type(glide_global_type) :: model
+ type(glimmer_nc_output),pointer,optional :: outfiles
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ do while(associated(oc))
+ if (oc%append) then
+ call glimmer_nc_openappend(oc,model)
+ else
+ call glimmer_nc_createfile(oc,model)
+ end if
+ oc=>oc%next
+ end do
+ end subroutine openall_out
+
+ subroutine closeall_out(model,outfiles)
+ !> close all netCDF files for output
+ use glide_types
+ use glimmer_ncdf
+ implicit none
+ type(glide_global_type) :: model
+ type(glimmer_nc_output),pointer,optional :: outfiles
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ do while(associated(oc))
+ oc=>delete(oc)
+ end do
+ if (.not.present(outfiles)) model%funits%out_first=>NULL()
+ end subroutine closeall_out
+
+ subroutine glimmer_nc_openappend(outfile,model)
+ !> open netCDF file for appending
+ use parallel
+ use glimmer_log
+ use glide_types
+ use glimmer_map_CFproj
+ use glimmer_map_types
+ use glimmer_filenames
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ !> structure containg output netCDF descriptor
+ type(glide_global_type) :: model
+ !> the model instance
+
+ ! local variables
+ integer :: status,timedimid,ntime,timeid
+ real(dp),dimension(1) :: last_time
+ character(len=msglen) :: message
+
+ ! open existing netCDF file
+ status = parallel_open(process_path(NCO%filename),NF90_WRITE,NCO%id)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ call write_log_div
+ write(message,*) 'Reopening file ',trim(process_path(NCO%filename)),' for output; '
+ call write_log(trim(message))
+ ! Find out when last time-slice was
+ status = parallel_inq_dimid(NCO%id,'time',timedimid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_inquire_dimension(NCO%id,timedimid,len=ntime)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ ! Set timecounter
+ outfile%timecounter=ntime+1
+ write(message,*) ' Starting output at ',outfile%next_write,' and write every ',outfile%freq,' years'
+ call write_log(trim(message))
+
+ ! Get time varid
+ status = parallel_inq_varid(NCO%id,'time',NCO%timevar)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ ! Put dataset into define mode
+ status = parallel_redef(NCO%id)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ end subroutine glimmer_nc_openappend
+
+ subroutine glimmer_nc_createfile(outfile,model)
+ !> create a new netCDF file
+ use parallel
+ use glimmer_log
+ use glide_types
+ use glimmer_map_CFproj
+ use glimmer_map_types
+ use glimmer_filenames
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ !> structure containg output netCDF descriptor
+ type(glide_global_type) :: model
+ !> the model instance
+
+ ! local variables
+ integer status
+ integer mapid
+ character(len=msglen) message
+
+ ! create new netCDF file
+ !WHL - Changed the following line to support large netCDF output files
+!! status = parallel_create(process_path(NCO%filename),NF90_CLOBBER,NCO%id)
+ status = parallel_create(process_path(NCO%filename), or(NF90_CLOBBER,NF90_64BIT_OFFSET), NCO%id)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ call write_log_div
+ write(message,*) 'Opening file ',trim(process_path(NCO%filename)),' for output; '
+ call write_log(trim(message))
+ write(message,*) ' Starting output at ',outfile%next_write,' and write every ',outfile%freq,' years'
+ call write_log(trim(message))
+ if (outfile%end_write < glimmer_nc_max_time) then
+ write(message,*) ' Stop writing at ',outfile%end_write
+ call write_log(trim(message))
+ end if
+ NCO%define_mode=.TRUE.
+
+ ! writing meta data
+ status = parallel_put_att(NCO%id, NF90_GLOBAL, 'Conventions', "CF-1.3")
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, NF90_GLOBAL,'title',trim(outfile%metadata%title))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, NF90_GLOBAL,'institution',trim(outfile%metadata%institution))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, NF90_GLOBAL,'source',trim(outfile%metadata%source))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, NF90_GLOBAL,'history',trim(outfile%metadata%history))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, NF90_GLOBAL,'references',trim(outfile%metadata%references))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, NF90_GLOBAL,'comment',trim(outfile%metadata%comment))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, NF90_GLOBAL,'configuration',trim(outfile%metadata%config))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ ! defining time dimension and variable
+ status = parallel_def_dim(NCO%id,'time',NF90_UNLIMITED,NCO%timedim)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ ! time -- Model time
+ call write_log('Creating variable time')
+ !EIB! lanl version
+ !status = nf90_def_var(NCO%id,'time',NF90_FLOAT,(/NCO%timedim/),NCO%timevar)
+ !EIB! gc2 version
+ status = parallel_def_var(NCO%id,'time',outfile%default_xtype,(/NCO%timedim/),NCO%timevar)
+ !EIB! pick one and consistant
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_put_att(NCO%id, NCO%timevar, 'long_name', 'Model time')
+ status = parallel_put_att(NCO%id, NCO%timevar, 'standard_name', 'time')
+ status = parallel_put_att(NCO%id, NCO%timevar, 'units', 'year since 1-1-1 0:0:0')
+ status = parallel_put_att(NCO%id, NCO%timevar, 'calendar', 'none')
+
+ ! adding projection info
+ if (glimmap_allocated(model%projection)) then
+ status = parallel_def_var(NCO%id,glimmer_nc_mapvarname,NF90_CHAR,mapid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ call glimmap_CFPutProj(NCO%id,mapid,model%projection)
+ end if
+
+ ! setting the size of the level and staglevel dimension
+ NCO%nlevel = model%general%upn
+ NCO%nstaglevel = model%general%upn-1
+ NCO%nstagwbndlevel = model%general%upn ! MJH this is the max index, not the size
+ end subroutine glimmer_nc_createfile
+
+ subroutine glimmer_nc_checkwrite(outfile,model,forcewrite,time)
+ !> check if we should write to file
+ use parallel
+ use glimmer_log
+ use glide_types
+ use glimmer_filenames
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ type(glide_global_type) :: model
+ logical forcewrite
+ real(dp),optional :: time
+
+ character(len=msglen) :: message
+ integer status
+ real(dp) :: sub_time
+
+ real(dp), parameter :: eps = 1.d-11
+
+ ! Check for optional time argument
+ if (present(time)) then
+ sub_time=time
+ else
+ sub_time=model%numerics%time
+ end if
+
+ ! check if we are still in define mode and if so leave it
+ if (NCO%define_mode) then
+ status = parallel_enddef(NCO%id)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ NCO%define_mode = .FALSE.
+ end if
+
+ if (sub_time > NCO%processsed_time) then
+ if (NCO%just_processed) then
+ ! finished writing during last time step, need to increase counter...
+
+ outfile%timecounter = outfile%timecounter + 1
+ status = parallel_sync(NCO%id)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ NCO%just_processed = .FALSE.
+ end if
+ end if
+
+ !WHL - Allow for small roundoff error in computing the time
+!! if (sub_time >= outfile%next_write .or. (forcewrite .and. sub_time > outfile%next_write-outfile%freq)) then ! prone to roundoff error
+ if (sub_time + eps >= outfile%next_write .or. (forcewrite .and. sub_time > outfile%next_write-outfile%freq)) then
+ if (sub_time <= outfile%end_write .and. .not.NCO%just_processed) then
+ call write_log_div
+ write(message,*) 'Writing to file ', trim(process_path(NCO%filename)), ' at time ', sub_time
+ call write_log(trim(message))
+ ! increase next_write
+ outfile%next_write = outfile%next_write + outfile%freq
+ NCO%processsed_time = sub_time
+ ! write time
+ status = parallel_put_var(NCO%id,NCO%timevar,sub_time,(/outfile%timecounter/))
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ NCO%just_processed = .TRUE.
+ end if
+ end if
+
+ end subroutine glimmer_nc_checkwrite
+
+ !*****************************************************************************
+ ! netCDF input
+ !*****************************************************************************
+ subroutine openall_in(model)
+ !> open all netCDF files for input
+ use glide_types
+ use glimmer_ncdf
+ implicit none
+ type(glide_global_type) :: model
+
+ ! local variables
+ type(glimmer_nc_input), pointer :: ic
+
+ ! open input files
+ ic=>model%funits%in_first
+ do while(associated(ic))
+ call glimmer_nc_openfile(ic,model)
+ ic=>ic%next
+ end do
+
+ ! open forcing files
+ ic=>model%funits%frc_first
+ do while(associated(ic))
+ call glimmer_nc_openfile(ic,model)
+ ic=>ic%next
+ end do
+ end subroutine openall_in
+
+ subroutine closeall_in(model)
+ !> close all netCDF files for input
+ use glide_types
+ use glimmer_ncdf
+ implicit none
+ type(glide_global_type) :: model
+
+ ! local variables
+ type(glimmer_nc_input), pointer :: ic
+
+ ! Input files
+ ic=>model%funits%in_first
+ do while(associated(ic))
+ ic=>delete(ic)
+ end do
+ model%funits%in_first=>NULL()
+
+ ! Forcing files
+ ic=>model%funits%frc_first
+ do while(associated(ic))
+ ic=>delete(ic)
+ end do
+ model%funits%frc_first=>NULL()
+
+ end subroutine closeall_in
+
+ subroutine glimmer_nc_openfile(infile,model)
+ !> open an existing netCDF file
+ use glide_types
+ use glimmer_map_CFproj
+ use glimmer_map_types
+ use glimmer_log
+ use glimmer_paramets, only: len0
+ use glimmer_filenames
+ use parallel
+ implicit none
+ type(glimmer_nc_input), pointer :: infile
+ !> structure containg input netCDF descriptor
+ type(glide_global_type) :: model
+ !> the model instance
+
+ ! local variables
+ integer dimsize, dimid, varid
+ real, dimension(2) :: delta
+ integer status
+ character(len=msglen) message
+
+ real,parameter :: small = 1.e-6
+
+ ! open netCDF file
+ status = parallel_open(process_path(NCI%filename),NF90_NOWRITE,NCI%id)
+ if (status /= NF90_NOERR) then
+ call write_log('Error opening file '//trim(process_path(NCI%filename))//': '//nf90_strerror(status),&
+ type=GM_FATAL,file=__FILE__,line=__LINE__)
+ end if
+ call write_log_div
+ call write_log('opening file '//trim(process_path(NCI%filename))//' for input')
+
+ ! getting projection, if none defined already
+ if (.not.glimmap_allocated(model%projection)) model%projection = glimmap_CFGetProj(NCI%id)
+
+ ! getting time dimension
+ status = parallel_inq_dimid(NCI%id, 'time', NCI%timedim)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ ! get id of time variable
+ status = parallel_inq_varid(NCI%id,'time',NCI%timevar)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+ ! getting length of time dimension and allocating memory for array containing times
+ status = parallel_inquire_dimension(NCI%id,NCI%timedim,len=dimsize)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ allocate(infile%times(dimsize))
+ infile%nt=dimsize
+ status = parallel_get_var(NCI%id,NCI%timevar,infile%times)
+
+ ! setting the size of the level and staglevel dimension
+ NCI%nlevel = model%general%upn
+ NCI%nstaglevel = model%general%upn-1
+ NCI%nstagwbndlevel = model%general%upn !MJH This is the max index, not size
+
+ ! checking if dimensions and grid spacing are the same as in the configuration file
+ ! x1
+ status = parallel_inq_dimid(NCI%id,'x1',dimid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (dimsize /= global_ewn) then
+ write(message,*) 'Dimension x1 of file '//trim(process_path(NCI%filename))// &
+ ' does not match with config dimension: ', dimsize, global_ewn
+ call write_log(message,type=GM_FATAL)
+ end if
+ status = parallel_inq_varid(NCI%id,'x1',varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_var(NCI%id,varid,delta)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+!WHL - mod to prevent code from crashing due to small roundoff error
+! if (abs(delta(2)-delta(1) - model%numerics%dew*len0) > small) then
+ if (abs( (delta(2)-delta(1) - model%numerics%dew*len0) / (model%numerics%dew*len0) ) > small) then
+ write(message,*) 'deltax1 of file '//trim(process_path(NCI%filename))// &
+ ' does not match with config deltax: ', delta(2)-delta(1),model%numerics%dew*len0
+ call write_log(message,type=GM_FATAL)
+ end if
+
+ ! x0
+ !status = nf90_inq_dimid(NCI%id,'x0',dimid)
+ !call nc_errorhandle(__FILE__,__LINE__,status)
+ !status = nf90_inquire_dimension(NCI%id,dimid,len=dimsize)
+ !call nc_errorhandle(__FILE__,__LINE__,status)
+ !if (dimsize /= model%general%ewn-1) then
+ ! write(message,*) 'Dimension x0 of file ',trim(process_path(NCI%filename)),' does not match with config dimension: ', &
+ ! dimsize, model%general%ewn-1
+ ! call write_log(message,type=GM_FATAL)
+ !end if
+ !status = nf90_inq_varid(NCI%id,'x0',varid)
+ !call nc_errorhandle(__FILE__,__LINE__,status)
+ !status = nf90_get_var(NCI%id,varid,delta)
+ !call nc_errorhandle(__FILE__,__LINE__,status)
+ !if (abs(delta(2)-delta(1) - model%numerics%dew*len0) > small) then
+ ! write(message,*) 'deltax0 of file '//trim(process_path(NCI%filename))//' does not match with config deltax: ', &
+ ! delta(2)-delta(1),model%numerics%dew*len0
+ ! call write_log(message,type=GM_FATAL)
+ !end if
+
+ ! y1
+ status = parallel_inq_dimid(NCI%id,'y1',dimid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_inquire_dimension(NCI%id,dimid,len=dimsize)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ if (dimsize /= global_nsn) then
+ write(message,*) 'Dimension y1 of file '//trim(process_path(NCI%filename))// &
+ ' does not match with config dimension: ', dimsize, global_nsn
+ call write_log(message,type=GM_FATAL)
+ end if
+ status = parallel_inq_varid(NCI%id,'y1',varid)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+ status = parallel_get_var(NCI%id,varid,delta)
+ call nc_errorhandle(__FILE__,__LINE__,status)
+
+
+!WHL - mod to prevent code from crashing due to small roundoff error
+! if (abs(delta(2)-delta(1) - model%numerics%dns*len0) > small) then
+ if (abs( (delta(2)-delta(1) - model%numerics%dns*len0) / (model%numerics%dns*len0) ) > small) then
+ write(message,*) 'deltay1 of file '//trim(process_path(NCI%filename))// &
+ ' does not match with config deltay: ', delta(2)-delta(1),model%numerics%dns*len0
+ call write_log(message,type=GM_FATAL)
+ end if
+
+ ! y0
+ !status = nf90_inq_dimid(NCI%id,'y0',dimid)
+ !call nc_errorhandle(__FILE__,__LINE__,status)
+ !status = nf90_inquire_dimension(NCI%id,dimid,len=dimsize)
+ !call nc_errorhandle(__FILE__,__LINE__,status)
+ !if (dimsize /= model%general%nsn-1) then
+ ! write(message,*) 'Dimension y0 of file '//trim(process_path(NCI%filename))//' does not match with config dimension: ',&
+ ! dimsize, model%general%nsn-1
+ ! call write_log(message,type=GM_FATAL)
+ !end if
+ !status = nf90_inq_varid(NCI%id,'y0',varid)
+ !call nc_errorhandle(__FILE__,__LINE__,status)
+ !status = nf90_get_var(NCI%id,varid,delta)
+ !call nc_errorhandle(__FILE__,__LINE__,status)
+ !if (abs(delta(2)-delta(1) - model%numerics%dns*len0) > small) then
+ ! write(message,*) 'deltay0 of file '//trim(process_path(NCI%filename))//' does not match with config deltay: ',&
+ ! delta(2)-delta(1),model%numerics%dns*len0
+ ! call write_log(message,type=GM_FATAL)
+ !end if
+
+ ! Check that the number of vertical layers is the same, though it's asking for trouble
+ ! to check whether the spacing is the same (don't want to put that burden on setup,
+ ! plus f.p. compare has been known to cause problems here)
+ status = parallel_inq_dimid(NCI%id,'level',dimid)
+ ! If we couldn't find the 'level' dimension fail with a warning.
+ ! We don't want to throw an error, as input files are only required to have it if they
+ ! include 3D data fields.
+ if (status == NF90_NOERR) then
+ status = parallel_inquire_dimension(NCI%id, dimid, len=dimsize)
+ call nc_errorhandle(__FILE__, __LINE__, status)
+ if (dimsize /= model%general%upn .and. dimsize /= 1) then
+ write(message,*) 'Dimension level of file '//trim(process_path(NCI%filename))//&
+ ' does not match with config dimension: ', &
+ dimsize, model%general%upn
+ call write_log(message,type=GM_FATAL)
+ end if
+ else
+ call write_log("Input file contained no level dimension. This is not necessarily a problem.", type=GM_WARNING)
+ end if
+
+ end subroutine glimmer_nc_openfile
+
+ subroutine glimmer_nc_checkread(infile,model,time)
+ !> check if we should read from file
+ use glimmer_log
+ use glide_types
+ use glimmer_filenames
+ implicit none
+ type(glimmer_nc_input), pointer :: infile !> structure containg output netCDF descriptor
+ type(glide_global_type) :: model !> the model instance
+ real(dp),optional :: time !> Optional alternative time
+
+ character(len=msglen) :: message
+
+ integer :: pos ! to identify restart files
+
+ real(dp) :: restart_time ! time of restart (yr)
+
+ if (infile%current_time <= infile%nt) then
+ if (.not.NCI%just_processed) then
+ call write_log_div
+ !EIB! added form gc2, needed?
+ ! Reset model%numerics%tstart if reading a restart file
+ !write(message,*) 'Check for restart:', trim(infile%nc%filename)
+ !call write_log(message)
+ pos = index(infile%nc%filename,'.r.') ! use CESM naming convention for restart files
+ if (pos /= 0) then ! get the start time based on the current time slice
+ restart_time = infile%times(infile%current_time) ! years
+ model%numerics%tstart = restart_time
+ model%numerics%time = restart_time
+ write(message,*) 'Restart: New tstart =', model%numerics%tstart
+ call write_log(message)
+ endif
+ !EIB! end add
+ write(message,*) 'Reading time slice ',infile%current_time,'(',infile%times(infile%current_time),') from file ', &
+ trim(process_path(NCI%filename)), ' at time ', sub_time(model, time)
+ call write_log(message)
+ NCI%just_processed = .TRUE.
+ NCI%processsed_time = sub_time(model, time)
+ end if
+ end if
+
+ if (sub_time(model, time) > NCI%processsed_time) then
+ if (NCI%just_processed) then
+ ! finished reading during last time step, need to increase counter...
+ infile%current_time = infile%current_time + 1
+ NCI%just_processed = .FALSE.
+ end if
+ end if
+
+ contains
+ real(dp) function sub_time(model, time)
+ ! Get the current time applicable to this subroutine.
+ ! If time is present, use that; otherwise use model%numerics%time
+ !
+ ! We need this function to avoid code duplication. We canNOT simply set a local
+ ! sub_time variable variable at the start of glimmer_nc_checkread, because model
+ ! %numerics%time can be updated in the midst of this routine... so we need to
+ ! determine sub_time when it's actually needed, with this function.
+ use glide_types
+ implicit none
+ type(glide_global_type) :: model !> the model instance
+ real(dp),optional :: time !> Optional alternative time
+
+ if (present(time)) then
+ sub_time = time
+ else
+ sub_time = model%numerics%time
+ end if
+ end function sub_time
+
+ end subroutine glimmer_nc_checkread
+
+!------------------------------------------------------------------------------
+
+ subroutine check_for_tempstag(whichdycore, nc)
+ ! Check for the need to output tempstag and update the output variables if needed.
+ !
+ ! For the glam/glissade dycore, the vertical temperature grid has an extra level.
+ ! In that case, the netCDF output file should include a variable
+ ! called tempstag(0:nz) instead of temp(1:nz). This subroutine is added for
+ ! convenience to allow the variable "temp" to be specified in the config
+ ! file in all cases and have it converted to "tempstag" when appropriate.
+ ! MJH
+
+ use glimmer_log
+ use glide_types
+
+ implicit none
+ integer, intent(in) :: whichdycore
+ type(glimmer_nc_stat) :: nc
+
+ ! Locals
+ integer :: i
+
+ ! Check if tempstag should be output
+
+ ! If both temp and tempstag are specified, temp will get converted to tempstag
+ ! and then there will be two tempstags in the list, but that is ok because
+ ! the parser ignores duplicate entries in the varlist.
+ ! (The check for the existence of variables looks like: pos = index(NCO%vars,' acab ') )
+
+ !print *, "Original varstring:", varstring
+
+ if (whichdycore/=DYCORE_GLIDE) then
+ ! We want temp to become tempstag
+ i = index(nc%vars, " temp ")
+ if (i > 0) then
+ ! temp was specified - change it to tempstag
+ ! If temp is listed more than once, this just changes the first instance
+ nc%vars = nc%vars(1:i-1) // " tempstag " // nc%vars(i+6:len(nc%vars))
+ call write_log('Temperature remapping option uses temperature on a staggered vertical grid.' // &
+ ' The netCDF output variable "temp" has been changed to "tempstag".' )
+ endif
+ ! Now check if flwa needs to be changed to flwastag
+ i = index(nc%vars, " flwa ") ! Look for flwa
+ if (i > 0) then
+ ! flwa was specified - change to flwastag
+ nc%vars = nc%vars(1:i-1) // " flwastag " // nc%vars(i+6:len(nc%vars))
+ call write_log('Temperature remapping option uses flwa on a staggered vertical grid.' // &
+ ' The netCDF output variable "flwa" has been changed to "flwastag".' )
+ endif
+ ! Now check if dissip needs to be changed to dissipstag
+ i = index(nc%vars, " dissip ") ! Look for dissip
+ if (i > 0) then
+ ! dissip was specified - change to dissipstag
+ nc%vars = nc%vars(1:i-1) // " dissipstag " // nc%vars(i+6:len(nc%vars))
+ call write_log('Temperature remapping option uses dissip on a staggered vertical grid.' // &
+ ' The netCDF output variable "dissip" has been changed to "dissipstag".' )
+ endif
+ else ! glide dycore
+ ! We want tempstag to become temp
+ i = index(nc%vars, " tempstag ")
+ if (i > 0) then
+ !Change tempstag to temp
+ nc%vars = nc%vars(1:i-1) // " temp " // nc%vars(i+10:len(nc%vars))
+ call write_log('The netCDF output variable "tempstag" should not be used with the Glide dycore.' // &
+ ' The netCDF output variable "tempstag" has been changed to "temp".' )
+ endif
+ ! We want flwastag to become flwa
+ i = index(nc%vars, " flwastag ")
+ if (i > 0) then
+ !Change flwastag to flwa
+ nc%vars = nc%vars(1:i-1) // " flwa " // nc%vars(i+10:len(nc%vars))
+ call write_log('The netCDF output variable "flwastag" should not be used with the Glide dycore.' // &
+ ' The netCDF output variable "flwastag" has been changed to "flwa".' )
+ endif
+ ! We want dissipstag to become dissip
+ i = index(nc%vars, " dissipstag ")
+ if (i > 0) then
+ !Change dissipstag to dissip
+ nc%vars = nc%vars(1:i-1) // " dissip " // nc%vars(i+10:len(nc%vars))
+ call write_log('The netCDF output variable "dissipstag" should not be used with the Glide dycore.' // &
+ ' The netCDF output variable "dissipstag" has been changed to "dissip".' )
+ endif
+ endif ! whichdycore
+
+ ! Copy any changes to vars_copy
+ nc%vars_copy = nc%vars
+
+ end subroutine check_for_tempstag
+
+!------------------------------------------------------------------------------
+
+
+end module glimmer_ncio
+
+!------------------------------------------------------------------------------
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_ncparams.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_ncparams.F90
new file mode 100644
index 0000000000..1552475e3b
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_ncparams.F90
@@ -0,0 +1,273 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_ncparams.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+#define NCO outfile%nc
+#define NCI infile%nc
+
+module glimmer_ncparams
+
+ ! read netCDF I/O related configuration files
+ ! written by Magnus Hagdorn, May 2004
+
+ use glimmer_ncdf, only: glimmer_nc_meta
+
+ implicit none
+
+ private
+ public :: glimmer_nc_readparams, default_metadata, handle_output, handle_input, configstring
+
+ type(glimmer_nc_meta),save :: default_metadata
+ character(10000) :: configstring
+
+
+contains
+ subroutine glimmer_nc_readparams(model,config)
+ ! read netCDF I/O related configuration file
+ use glide_types
+ use glimmer_config
+ implicit none
+ type(glide_global_type) :: model ! model instance
+ type(ConfigSection), pointer :: config ! structure holding sections of configuration file
+
+ ! local variables
+ type(ConfigSection), pointer :: section
+ type(glimmer_nc_output), pointer :: output => null()
+ type(glimmer_nc_input), pointer :: input => null()
+ type(glimmer_nc_input), pointer :: forcing => null()
+
+ ! get config string
+ call ConfigAsString(config,configstring)
+
+ ! get default meta data
+ call GetSection(config,section,'CF default')
+ if (associated(section)) then
+ call handle_metadata(section, default_metadata, .true.)
+ end if
+
+ ! setup outputs
+ call GetSection(config,section,'CF output')
+ do while(associated(section))
+ output => handle_output(section,output,model%numerics%tstart,configstring)
+ if (.not.associated(model%funits%out_first)) then
+ model%funits%out_first => output
+ end if
+ call GetSection(section%next,section,'CF output')
+ end do
+
+ ! setup inputs
+ call GetSection(config,section,'CF input')
+ do while(associated(section))
+ input => handle_input(section,input)
+ if (.not.associated(model%funits%in_first)) then
+ model%funits%in_first => input
+ end if
+ call GetSection(section%next,section,'CF input')
+ end do
+
+ ! setup forcings
+ call GetSection(config,section,'CF forcing')
+ do while(associated(section))
+ forcing => handle_forcing(section,forcing)
+ if (.not.associated(model%funits%frc_first)) then
+ model%funits%frc_first => forcing
+ end if
+ call GetSection(section%next,section,'CF forcing')
+ end do
+
+ output => null()
+ input => null()
+ forcing => null()
+
+ end subroutine glimmer_nc_readparams
+
+ !==================================================================================
+ ! private procedures
+ !==================================================================================
+
+ subroutine handle_metadata(section,metadata, default)
+ use glimmer_ncdf
+ use glimmer_config
+ !use glimmer_global, only: glimmer_version !EIB! glimmer_verision not module in gc2
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glimmer_nc_meta) ::metadata
+ logical :: default
+
+ !EIB! from gc2, may have been replaced by glimmer_version about, or vice versa??
+
+ character(len=100), external :: glimmer_version_char
+
+ ! local variables
+ character(len=8) :: date
+ character(len=10) :: time
+
+ if (.not.default) then
+ metadata%title = trim(default_metadata%title)
+ metadata%institution = trim(default_metadata%institution)
+ metadata%references = trim(default_metadata%references)
+ metadata%comment = trim(default_metadata%comment)
+ end if
+
+ call GetValue(section,'title',metadata%title)
+ call GetValue(section,'institution',metadata%institution)
+ call GetValue(section,'references',metadata%references)
+ call GetValue(section,'comment',metadata%comment)
+
+ if (default) then
+ call date_and_time(date,time)
+ !EIB!metadata%source = 'Generated by '//trim(glimmer_version)
+ metadata%source = 'Generated by '//trim(glimmer_version_char())
+ write(metadata%history,fmt="(a4,'-',a2,'-',a2,' ',a2,':',a2,':',a6,' : ',a)") date(1:4),date(5:6),date(7:8),&
+ !EIB!time(1:2),time(3:4),time(5:10),trim(glimmer_version)
+ time(1:2),time(3:4),time(5:10),trim(glimmer_version_char())
+ else
+ metadata%source = trim(default_metadata%source)
+ metadata%history = trim(default_metadata%history)
+ end if
+ end subroutine handle_metadata
+
+
+ function handle_output(section, output, start_yr, configstring)
+ use glimmer_ncdf
+ use glimmer_config
+ use glimmer_log
+ use glimmer_global, only: dp
+ implicit none
+
+ type(ConfigSection), pointer :: section
+ type(glimmer_nc_output), pointer :: output
+ type(glimmer_nc_output), pointer :: handle_output
+ real(dp), intent(in) :: start_yr
+ character(*),intent(in) :: configstring
+ character(10) :: mode_str,xtype_str
+
+ handle_output=>add(output)
+
+ handle_output%next_write = start_yr
+ mode_str=''
+ xtype_str = 'real'
+
+ ! get filename
+ call GetValue(section,'name',handle_output%nc%filename)
+ call GetValue(section,'start',handle_output%next_write)
+ call GetValue(section,'stop',handle_output%end_write)
+ call GetValue(section,'frequency',handle_output%freq)
+ call GetValue(section,'variables',handle_output%nc%vars)
+ call GetValue(section,'mode',mode_str)
+ call GetValue(section,'xtype',xtype_str)
+
+ ! handle mode field
+ if (trim(mode_str)=='append'.or.trim(mode_str)=='APPEND') then
+ handle_output%append = .true.
+ else
+ handle_output%append = .false.
+ end if
+
+ !EIB! from gc2
+ ! handle xtype field
+ if (trim(xtype_str)=='real'.or.trim(xtype_str)=='REAL') then
+ handle_output%default_xtype = NF90_REAL
+ else if (trim(xtype_str)=='double'.or.trim(xtype_str)=='DOUBLE') then
+ handle_output%default_xtype = NF90_DOUBLE
+ else
+ call write_log('Error, unknown xtype, must be real or double [netCDF output]',GM_FATAL)
+ end if
+ !EIB!
+
+ ! add config data
+ handle_output%metadata%config=trim(configstring)
+
+ ! Make copy of variables for future reference
+ handle_output%nc%vars_copy=handle_output%nc%vars
+
+ ! get metadata
+ call handle_metadata(section, handle_output%metadata,.false.)
+ if (handle_output%nc%filename(1:1)==' ') then
+ call write_log('Error, no file name specified [netCDF output]',GM_FATAL)
+ end if
+ end function handle_output
+
+
+ function handle_input(section, input)
+ use glimmer_ncdf
+ use glimmer_config
+ use glimmer_log
+ use glimmer_filenames, only : filenames_inputname !EIB! not in lanl, which is newer?
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glimmer_nc_input), pointer :: input
+ type(glimmer_nc_input), pointer :: handle_input
+
+ handle_input=>add(input)
+
+ ! get filename
+ call GetValue(section,'name',handle_input%nc%filename)
+ call GetValue(section,'time',handle_input%get_time_slice)
+
+ handle_input%current_time = handle_input%get_time_slice
+
+ if (handle_input%nc%filename(1:1)==' ') then
+ call write_log('Error, no file name specified [netCDF input]',GM_FATAL)
+ end if
+
+ !EIB! from gc2
+ handle_input%nc%filename = trim(filenames_inputname(handle_input%nc%filename))
+
+ end function handle_input
+
+
+ function handle_forcing(section, forcing)
+ use glimmer_ncdf
+ use glimmer_config
+ use glimmer_log
+ use glimmer_filenames, only : filenames_inputname
+ implicit none
+ type(ConfigSection), pointer :: section
+ type(glimmer_nc_input), pointer :: forcing
+ type(glimmer_nc_input), pointer :: handle_forcing
+
+ handle_forcing=>add(forcing)
+
+ ! get filename
+ call GetValue(section,'name',handle_forcing%nc%filename)
+ call GetValue(section,'time',handle_forcing%get_time_slice) ! MJH don't think we'll use 'time' keyword in the forcing config section
+
+ handle_forcing%current_time = handle_forcing%get_time_slice
+
+ if (handle_forcing%nc%filename(1:1)==' ') then
+ call write_log('Error, no file name specified [netCDF forcing]',GM_FATAL)
+ end if
+
+ handle_forcing%nc%filename = trim(filenames_inputname(handle_forcing%nc%filename))
+
+ end function handle_forcing
+
+
+end module glimmer_ncparams
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_paramets.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_paramets.F90
new file mode 100644
index 0000000000..ef03aac887
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_paramets.F90
@@ -0,0 +1,145 @@
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_paramets.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> model scaling constants
+module glimmer_paramets
+
+ use glimmer_global, only : dp
+ use glimmer_physcon, only : scyr, rhoi, grav, gn
+
+ implicit none
+ save
+
+!WHL - logical parameter for code testing
+! If oldglide = T, the glide dycore will reproduce
+! (within roundoff error) the results
+! of Glimmer 1.0.18 for the dome and EISMINT-2 test cases.
+
+ !TODO - Remove oldglide parameter when comparisons to old Glide are no longer desired
+ logical, parameter :: oldglide = .false.
+! logical, parameter :: oldglide = .true.
+
+!TODO - redundant output units (stdout and glimmer_unit)
+! It is redundant to define both stdout (which is public) and
+! glimmer_unit (which is private to glimmer_log.F90).
+! However, it is sometimes convenient to write to stdout in Glimmer
+! without calling write_log.
+! May want to delete this later (and declare stdout in glc_constants
+! for CESM runs).
+
+ integer :: stdout = 6
+
+! logical flag to turn on special DEBUG output (related to test points), false by default
+
+ logical :: GLC_DEBUG = .false.
+
+!TODO: Redefine scaling parameters to have SI or similar units?
+! Considered removing these parameters from the code, but may be too much work.
+!
+! Note: If tau0 is redefined in the code in terms of rhoi and grav,
+! then all the scaling parameters could be written in terms of scyr.
+!
+! See comments below for details.
+
+
+ ! unphysical value used for initializing certain variables (e.g., temperature) so we can tell
+ ! later if they were read from an input file or otherwise computed correctly
+ real(dp), parameter :: unphys_val = -999.d0
+
+! scaling parameters
+
+! The fundamental scaling parameters are thk0, len0, and vel0. The others are derived from these.
+
+!SCALING - DFM, 2, Oct 2012 - made scaled vs. unscaled values for thk0, len0,
+! and vel0 switchable by the reconstituted NO_RESCALE compilation flag.
+! (necessary to be compatible with alternate dycores)
+
+#ifndef NO_RESCALE
+! The following are the old Glimmer scaling parameters.
+ real(dp), parameter :: thk0 = 2000.0d0 ! m
+ real(dp), parameter :: len0 = 200.0d3 ! m
+ real(dp), parameter :: vel0 = 500.d0 / scyr ! m yr^{-1} converted to S.I. units
+!! real(dp), parameter :: vis0 = 5.70d-18 / scyr ! yr^{-1} Pa^{-3} converted to S.I. units
+#else
+! (no rescaling)
+ real(dp), parameter :: thk0 = 1.d0 ! no scaling of thickness
+ real(dp), parameter :: len0 = 1.d0 ! no scaling of length
+ real(dp), parameter :: vel0 = 1.d0 / scyr ! yr * s^{-1}
+!Note - With the new value of vel0, the serial JFNK solver barely converges
+! for the first time step of the dome test. The Picard solver does fine.
+! Safer to use old scaling for now.
+! end (no rescaling)
+#endif
+
+ !Note: Both the SIA and HO solvers fail unless tim0 = len0/vel0. Not sure if this can be changed.
+ ! With the revised scaling, tim0 = scyr.
+ real(dp), parameter :: tim0 = len0 / vel0 ! s
+ real(dp), parameter :: acc0 = thk0 * vel0 / len0 ! m s^{-1}
+
+!Note - With thk0 = 1, can replace tau0 by rhoi*grav in code and remove stress scaling.
+! Similarly can redefine vis0 and evs0
+
+ ! GLAM scaling parameters; units are correct if thk0 has units of meters
+ real(dp), parameter :: tau0 = rhoi*grav*thk0 ! stress scale in GLAM ( Pa )
+ real(dp), parameter :: evs0 = tau0 / (vel0/len0) ! eff. visc. scale in GLAM ( Pa s )
+ real(dp), parameter :: vis0 = tau0**(-gn) * (vel0/len0) ! rate factor scale in GLAM ( Pa^-3 s^-1 )
+
+!SCALING - This is the scaling we would use if we had velocity in m/yr and thk0 = len0 = 1.
+! real(dp), parameter :: thk0 = 1.d0
+! real(dp), parameter :: len0 = 1.d0
+! real(dp), parameter :: vel0 = 1.d0 / scyr
+! real(dp), parameter :: tim0 = scyr
+! real(dp), parameter :: acc0 = 1.d0 / scyr
+! real(dp), parameter :: tau0 = rhoi*grav
+! real(dp), parameter :: evs0 = tau0*scyr
+! real(dp), parameter :: vis0 = tau0**(-gn) / scyr
+
+!WHL - Here I am defining some new constants that have the same values as thk0, len0, etc. in old Glimmer.
+! I am giving the new constants new names to minimize confusion.
+! These are used in only a few places. For instance, we have this in glide_thck:
+!
+! residual = maxval(abs(model%geometry%thck-model%thckwk%oldthck2))
+!
+! In old Glimmer, thk0 = 2000 m and thck = O(1)
+! In new CISM, thk0 = 1 and thck = true thickness in meters
+! With thk0 = 1, we need to divide the rhs by 2000 m to reproduce the results of old Glimmer.
+! The following code satisfies either of the two conventions:
+!
+! residual = maxval( abs(model%geometry%thck-model%thckwk%oldthck2) * (thk0/thk_scale) )
+
+ real(dp), parameter :: thk_scale = 2000.0d0 ! m
+ real(dp), parameter :: len_scale = 200.0d3 ! m
+ real(dp), parameter :: vel_scale = 500.0 / scyr ! m yr^{-1} converted to S.I. units
+ real(dp), parameter :: tau_scale = rhoi*grav*thk_scale ! stress scale in GLAM ( Pa )
+ real(dp), parameter :: vis_scale = tau_scale**(-gn) * (vel_scale/len_scale) ! rate factor scale in GLAM ( Pa^-3 s^-1 )
+ real(dp), parameter :: evs_scale = tau_scale / (vel_scale/len_scale) ! eff. visc. scale in GLAM ( Pa s )
+
+end module glimmer_paramets
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_physcon.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_physcon.F90
new file mode 100644
index 0000000000..9484a6d3d8
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_physcon.F90
@@ -0,0 +1,87 @@
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_physcon.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> Contains physical constants required by the ice model.
+module glimmer_physcon
+
+ use glimmer_global, only : dp
+
+#ifdef CCSMCOUPLED
+
+ use shr_const_mod, only: pi=> SHR_CONST_PI,&
+ rhoi=> SHR_CONST_RHOICE,&
+ rhoo=> SHR_CONST_RHOSW,&
+ rhow=> SHR_CONST_RHOFW,&
+ rearth=> SHR_CONST_REARTH,&
+ grav=> SHR_CONST_G,&
+ shci=> SHR_CONST_CPICE,&
+ lhci=> SHR_CONST_LATICE,&
+ trpt=> SHR_CONST_TKTRIP
+ implicit none
+ save
+
+#else
+
+ implicit none
+ save
+
+ real(dp),parameter :: pi = 3.14159265358979d0 !< Value of \f$\pi\f$.
+ real(dp),parameter :: rhoi = 910.d0 !< The density of ice (kg m-3)
+ real(dp),parameter :: rhoo = 1028.0d0 !< The density of the ocean (kg m-3)
+ real(dp),parameter :: rhow = 1000.0d0 !< The density of fresh water (kg m-3)
+ real(dp),parameter :: rearth = 6.37122d6 ! radius of earth (m)
+ real(dp),parameter :: grav = 9.81d0 !< The acceleration due to gravity (m s-2)
+ real(dp),parameter :: shci = 2009.0d0 !< Specific heat capacity of ice (J kg-1 K-1)
+ real(dp),parameter :: lhci = 335.0d3 !< Latent heat of melting of ice (J kg-1)
+ real(dp),parameter :: trpt = 273.15d0 !< Triple point of water (K)
+#endif
+
+ real(dp),parameter :: scyr = 31536000.d0 !< Number of seconds in a year of exactly 365 days
+ real(dp),parameter :: rhom = 3300.0d0 !< The density of magma(?) (kg m-3)
+ real(dp),parameter :: rhos = 2600.0d0 !< The density of solid till (kg m$^{-3}$)
+ real(dp),parameter :: f = - rhoo / rhoi
+ integer, parameter :: gn = 3 !< The power dependency of Glenn's flow law.
+ real(dp),parameter :: actenh = 139.0d3 !< Activation energy in Glenn's flow law for \f$T^{*}\geq263\f$K. (J mol-1)
+ real(dp),parameter :: actenl = 60.0d3 !< Activation energy in Glenn's flow law for \f$T^{*}<263\f$K. (J mol-1)
+ real(dp),parameter :: arrmlh = 1.733d3 !< Constant of proportionality in Arrhenius relation
+ !< in \texttt{patebudd}, for \f$T^{*}\geq263\f$K.
+ !< (Pa-3 s-1)
+ real(dp),parameter :: arrmll = 3.613d-13 !< Constant of proportionality in Arrhenius relation
+ !< in \texttt{patebudd}, for \f$T^{*}<263\f$K.
+ !< (Pa-3 s-1)
+ real(dp),parameter :: gascon = 8.314d0 !< The gas ideal constant \f$R\f$ (J mol-1 K-1)
+ real(dp),parameter :: coni = 2.1d0 !< Thermal conductivity of ice (W m-1 K-1)
+ real(dp),parameter :: pmlt = 9.7456d-8 !< Factor for dependence of melting point on pressure (K Pa-1)
+ real(dp),parameter :: tocnfrz_sfc = -1.92d0 !< Freezing temperature of seawater (deg C) at surface pressure, S = 35 PSU
+ real(dp),parameter :: dtocnfrz_dh = -7.53d-4 !< Rate of change of freezing temperature of seawater with depth (deg/m), given S = 35 PSU
+ !< These values are from the Ocean Water Freezing Point Calculator,
+ !< http://www.csgnetwork.com/h2ofreezecalc.html (25 Nov. 2014)
+end module glimmer_physcon
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_scales.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_scales.F90
new file mode 100644
index 0000000000..ff00da3f73
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_scales.F90
@@ -0,0 +1,100 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_scales.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+! This module holds scales for various fields
+
+module glimmer_scales
+
+ use glimmer_global, only : dp
+
+ implicit none
+
+ real(dp) :: scale_uvel, scale_uflx, scale_diffu, scale_acab, scale_wvel, scale_btrc
+ real(dp) :: scale_beta, scale_flwa, scale_tau, scale_efvs, scale_resid
+
+ !WHL - Added scale_blfx to flip the sign of bheatflx. Typically, this flux has
+ ! a sign convention of positive up in input data, but the CISM convention
+ ! is positive down.
+ ! Considered changing the sign convention to positive up, but this would require
+ ! changes in several modules.
+ real(dp) :: scale_bflx
+
+contains
+
+ subroutine glimmer_init_scales
+
+ ! set scale factors for I/O (can't have non-integer powers)
+
+ use glimmer_physcon, only : scyr, gn
+ use glimmer_paramets, only : thk0, tim0, vel0, vis0, len0, acc0, tau0, evs0
+ implicit none
+
+#ifndef NO_RESCALE
+ scale_uvel = scyr * vel0 ! uvel, vvel, ubas, vbas, etc.
+ scale_uflx = scyr * vel0 * thk0 ! uflx, vflx
+ scale_diffu = scyr * vel0 * len0 ! diffu
+ scale_acab = scyr * thk0 / tim0 ! acab, bmlt
+ scale_wvel = scyr * thk0 / tim0 ! wvel, wgrd
+ scale_btrc = scyr * vel0 * len0 / (thk0**2) ! btrc, soft
+
+ scale_beta = tau0 / vel0 / scyr ! units: Pa * sec/m * yr/sec = Pa * yr/m
+ ! NOTE: on i/o, beta has units of Pa yr/m. Since vel0 has units of m/s,
+ ! the first two terms on the RHS have units of Pa s/m. Thus, the final
+ ! division by scyr here converts s/m to yr/m. All together, the 3 terms
+ ! on the RHS scale on i/o by Pa yr/m (thus, making dimensionless on input,
+ ! assuming the units on input are Pa yr/m, and also converting to Pa yr/m on output)
+
+ scale_flwa = scyr * vis0 ! flwa
+ scale_tau = tau0 ! tauf, tauxz, btractx
+ scale_efvs = evs0 / scyr ! efvs
+ scale_resid= tau0 / len0 ! resid_u, resid_v
+ scale_bflx = -1.d0 ! bheatflx (CISM sign convention is positive down,
+ ! whereas input data usually assumes positive up)
+#else
+! (no rescaling)
+ scale_uvel = 1.0d0 ! uvel, vvel, ubas, vbas, etc.
+ scale_uflx = 1.0d0 ! uflx, vflx
+ scale_diffu = 1.0d0 ! diffu
+ scale_acab = 1.0d0 ! acab, bmlt
+ scale_wvel = 1.0d0 ! wvel, wgrd
+ scale_btrc = 1.0d0 ! btrc, soft
+ scale_beta = 1.0d0
+
+ scale_flwa = 1.0d0 ! flwa
+ scale_tau = 1.0d0 ! tauf, tauxz, btractx
+ scale_efvs = 1.0d0 ! efvs
+ scale_resid = 1.0d0 ! resid_u, resid_v
+ scale_bflx = -1.d0 ! bheatflx (keeping this one -- CISM sign convention is
+ ! positive down, whereas input data usually assumes positive up)
+#endif
+
+ end subroutine glimmer_init_scales
+
+end module glimmer_scales
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_searchcircle.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_searchcircle.F90
new file mode 100644
index 0000000000..652e41ebee
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_searchcircle.F90
@@ -0,0 +1,250 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_searchcircle.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> improved algorithm for integrating a 2 dimensional array over large circles
+!! this used for calculating continentality
+!!
+!! \author Magnus Hagdorn
+module glimmer_searchcircle
+
+ use glimmer_global, only: dp
+ implicit none
+
+ type searchdata
+ logical :: initialised = .false.
+ integer :: radius !< search radius
+ integer, pointer, dimension(:) :: ipos !< positions on quater circle which will be moved along
+ integer :: istart, jstart !< starting position of grid to be processed, will default to usally 1
+ integer :: isize, jsize !< size of array to be processed
+ real(dp) :: total_area
+ real(dp), pointer, dimension(:,:) :: sarray !< array to be searched (expanded to include outside points
+ real(dp), pointer, dimension(:,:) :: weight !< reciprocal weights
+ end type searchdata
+
+ !MAKE_RESTART
+
+contains
+
+ !> initialise search circle data structure
+ !!
+ !! \return initialised data type
+ function sc_initdata(radius,istart,jstart,isize,jsize,searchgrid)
+ implicit none
+ integer, intent(in) :: radius !< radius of search radius
+ integer, intent(in) :: istart,jstart !< starting position of grid to be processed
+ integer, intent(in) :: isize,jsize !< size of array to be processed
+ real(dp), dimension(:,:), optional :: searchgrid !< used for determining bounds of grid to be searched
+ !< if not present, the bounds are assumed to be the same as the resultgrid
+
+ type(searchdata) :: sc_initdata
+
+ ! local variables
+ real(dp), allocatable, dimension(:) :: area
+ real(dp) :: area_temp
+ integer i,j,intrad,ii,jj
+ integer si_start,si_end,sj_start,sj_end,si_size,sj_size
+
+ ! filling structure
+ sc_initdata%radius = radius
+ sc_initdata%istart = istart
+ sc_initdata%jstart = jstart
+ sc_initdata%isize = isize
+ sc_initdata%jsize = jsize
+ ! allocating data
+ allocate(sc_initdata%sarray(1-radius:isize+radius,1-radius:jsize+radius))
+ allocate(sc_initdata%weight(isize, jsize))
+ allocate(sc_initdata%ipos(radius+1))
+
+ if (present(searchgrid)) then
+ si_start = lbound(searchgrid,1)
+ sj_start = lbound(searchgrid,2)
+ si_end = ubound(searchgrid,1)
+ sj_end = ubound(searchgrid,2)
+ si_size = si_end - si_start + 1
+ sj_size = sj_end - sj_start + 1
+ else
+ si_start = 1
+ sj_start = 1
+ si_end = isize
+ sj_end = jsize
+ si_size = isize
+ sj_size = jsize
+ end if
+
+ ! initialising data
+ ! mask
+ sc_initdata%sarray = 0.d0
+ sc_initdata%ipos = 0
+ ! weights
+ ! calculate integral over quater circle
+ allocate(area(0:radius))
+ area(0) = radius
+ do i=1,radius
+ sc_initdata%ipos(i) = int(sqrt(real(radius*radius-i*i)))
+ area(i) = area(i-1)+real(sc_initdata%ipos(i))
+ end do
+ sc_initdata%total_area = 1.d0 + 4.d0*area(radius)
+
+ ! complaining if search circle does not fit
+ if (si_size < 2.d0*radius+2 .and. sj_size < 2.d0*radius+2) then
+ ! internal sums
+ sc_initdata%weight(1+radius:isize-radius, 1+radius:jsize-radius) = sc_initdata%total_area
+ do j=jstart,jstart+jsize-1
+ !left
+ do i=istart,istart+radius-1
+ area_temp = 0.d0
+ do jj=max(sj_start,j-radius),min(sj_end,j+radius)
+ intrad = int(sqrt(real(radius*radius-(jj-j)*(jj-j))))
+ do ii=max(si_start,i-intrad),min(si_end,i+intrad)
+ area_temp = area_temp + 1.d0
+ end do
+ end do
+ sc_initdata%weight(i-istart+1,j-jstart+1) = area_temp
+ end do
+ !right
+ do i=istart+isize-1-radius,istart+isize-1
+ area_temp = 0.d0
+ do jj=max(sj_start,j-radius),min(sj_end,j+radius)
+ intrad = int(sqrt(real(radius*radius-(jj-j)*(jj-j))))
+ do ii=max(si_start,i-intrad),min(si_end,i+intrad)
+ area_temp = area_temp + 1.d0
+ end do
+ end do
+ sc_initdata%weight(i-istart+1,j-jstart+1) = area_temp
+ end do
+ end do
+ ! lower
+ do j=jstart,jstart+radius-1
+ do i=istart+radius,istart+isize-1-radius
+ area_temp = 0.d0
+ do jj=max(sj_start,j-radius),min(sj_end,j+radius)
+ intrad = int(sqrt(real(radius*radius-(jj-j)*(jj-j))))
+ do ii=max(si_start,i-intrad),min(si_end,i+intrad)
+ area_temp = area_temp + 1.d0
+ end do
+ end do
+ sc_initdata%weight(i-istart+1,j-jstart+1) = area_temp
+ end do
+ end do
+ ! upper
+ do j=jstart+jsize-1-radius,jstart+jsize-1
+ do i=istart+radius,istart+isize-1-radius
+ area_temp = 0.d0
+ do jj=max(sj_start,j-radius),min(sj_end,j+radius)
+ intrad = int(sqrt(real(radius*radius-(jj-j)*(jj-j))))
+ do ii=max(si_start,i-intrad),min(si_end,i+intrad)
+ area_temp = area_temp + 1.d0
+ end do
+ end do
+ sc_initdata%weight(i-istart+1,j-jstart+1) = area_temp
+ end do
+ end do
+ else
+ do j=jstart,jstart+jsize-1
+ do i=istart,istart+isize-1
+ area_temp = 0.d0
+ do jj=max(sj_start,j-radius),min(sj_end,j+radius)
+ intrad = int(sqrt(real(radius*radius-(jj-j)*(jj-j))))
+ do ii=max(si_start,i-intrad),min(si_end,i+intrad)
+ area_temp = area_temp + 1.d0
+ end do
+ end do
+ sc_initdata%weight(i-istart+1,j-jstart+1) = area_temp
+ end do
+ end do
+ end if
+
+ sc_initdata%weight = sc_initdata%total_area/sc_initdata%weight
+
+ sc_initdata%initialised = .true.
+ end function sc_initdata
+
+
+ !> do the search
+ !!
+ !! \bug cony does not match at boundary. no idea what is going on...
+
+ subroutine sc_search(sdata,searchgrid,resultgrid)
+ implicit none
+ type(searchdata) :: sdata !< the search circle type
+ real(dp), dimension(:,:), intent(in) :: searchgrid !< the input mesh
+ real(dp), dimension(:,:), intent(out) :: resultgrid !< the result mesh
+
+ ! local variables
+ integer i,j,ii,jj,intrad
+ integer :: istart,iend,jstart,jend
+
+ if (.not.sdata%initialised) then
+ write(*,*) 'Error (searchcircle), module is not initialised'
+ stop
+ end if
+
+ ! checking grid sizes
+ if (any(shape(resultgrid) /= (/sdata%isize,sdata%jsize/))) then
+ write(*,*) 'Error (searchcircle), size of result grid does not match: ',shape(resultgrid),(/sdata%isize,sdata%jsize/)
+ stop
+ end if
+ !filling search array
+ sdata%sarray = 0.d0
+ istart = max(1, sdata%istart-sdata%radius)
+ iend = min(size(searchgrid,1),sdata%istart+sdata%isize+sdata%radius-1)
+ jstart = max(1, sdata%jstart-sdata%radius)
+ jend = min(size(searchgrid,2),sdata%jstart+sdata%jsize+sdata%radius-1)
+
+ sdata%sarray(1+istart-sdata%istart:iend-sdata%istart+1, 1+jstart-sdata%jstart:jend-sdata%jstart+1) = &
+ searchgrid(istart:iend, jstart:jend)
+ resultgrid = 0.d0
+
+ ! loop over grid
+ do j=1,sdata%jsize
+ ! do the full circle
+ i=1
+ do jj=j-sdata%radius,j+sdata%radius
+ intrad = int(sqrt(real(sdata%radius*sdata%radius-(jj-j)*(jj-j))))
+ do ii=i-intrad,i+intrad
+ resultgrid(i,j) = resultgrid(i,j) + sdata%sarray(ii,jj)
+ end do
+ end do
+
+ ! loop over the remaing columns in the current row
+ do i=2,sdata%isize
+ resultgrid(i,j) = resultgrid(i-1,j) - sdata%sarray(i-sdata%radius,j) + sdata%sarray(i+sdata%radius,j)
+ do jj=1,sdata%radius
+ resultgrid(i,j) = resultgrid(i,j) - sdata%sarray(i-sdata%ipos(jj),j+jj) + sdata%sarray(i+sdata%ipos(jj),j+jj)
+ resultgrid(i,j) = resultgrid(i,j) - sdata%sarray(i-sdata%ipos(jj),j-jj) + sdata%sarray(i+sdata%ipos(jj),j-jj)
+ end do
+ end do
+ end do
+
+ ! applying weights
+ resultgrid = resultgrid * sdata%weight
+ end subroutine sc_search
+
+end module glimmer_searchcircle
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_ts.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_ts.F90
new file mode 100644
index 0000000000..f898bc9d9e
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_ts.F90
@@ -0,0 +1,270 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_ts.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> handling time series
+!!
+!! \author Magnus Hagdorn
+!! \date 2006
+!!
+!! this module provides support for reading in tabulated ASCII data such as
+!! time series. The data can then be accessed through functions which
+!! interpolated the data.
+module glimmer_ts
+
+ use glimmer_global, only: dp
+ implicit none
+
+ !> time series derived type
+ type glimmer_tseries
+ integer :: numt=0 !< number of times in time series
+ integer :: numv=1 !< number of values per time
+ integer :: current=1 !< current position in ts
+ real(dp), dimension(:), pointer :: times=>NULL() !< array holding times
+ real(dp), dimension(:,:), pointer :: values=>NULL()!< array holding values
+ end type glimmer_tseries
+
+ interface glimmer_ts_step
+ module procedure glimmer_ts_step_array, glimmer_ts_step_scalar
+ end interface
+
+ interface glimmer_ts_linear
+ module procedure glimmer_ts_linear_array,glimmer_ts_linear_scalar
+ end interface
+
+ private :: get_i
+
+contains
+
+ !> read tabulated ASCII file
+ subroutine glimmer_read_ts(ts,fname,numv)
+ use glimmer_log
+ use glimmer_global, only : msg_length
+ implicit none
+ type(glimmer_tseries) :: ts !< time series data
+ character(len=*), intent(in) :: fname !< read from this file
+ integer, intent(in),optional :: numv !< number of values per time
+
+ ! local variables
+ real(dp) :: d1,d2,fact=1.
+ integer i,j,ios
+ character(len=msg_length) :: message
+
+ if (present(numv)) then
+ ts%numv = numv
+ else
+ ts%numv = 1
+ end if
+
+ open(99,file=trim(fname),status='old',iostat=ios)
+
+ if (ios.ne.0) then
+ call write_log('Error opening file: '//trim(fname),type=GM_FATAL)
+ end if
+
+ ! find number of times and checking if ts is strictly monotonic
+ ios = 0
+ d1 = 1
+ do
+ d2 = d1
+ read(99,*,iostat=ios) d1
+ d1 = fact*d1
+ if (ios.ne.0) then
+ exit
+ end if
+ ts%numt = ts%numt + 1
+ if (ts%numt.eq.1) then
+ cycle
+ else if (ts%numt.eq.2) then
+ if (d1 > d2) then
+ fact = 1.
+ else if (d1 < d2) then
+ fact = -1.
+ d1 = -d1
+ else
+ write(message,*) 'Error, time series in file: '//trim(fname)//' is not monotonic line: ',ts%numt
+ call write_log(message,type=GM_FATAL)
+ end if
+ else
+ if (d1 <= d2) then
+ write(message,*) 'Error, time series in file: '//trim(fname)//' is not monotonic line: ',ts%numt
+ call write_log(message,type=GM_FATAL)
+ end if
+ end if
+ end do
+ rewind(99)
+
+ allocate(ts%times(ts%numt))
+ allocate(ts%values(ts%numv,ts%numt))
+ ! read data
+ do i=1,ts%numt
+ read(99,*) ts%times(i),(ts%values(j,i),j=1,ts%numv)
+ end do
+ close(99)
+ end subroutine glimmer_read_ts
+
+ !> interpolate time series by stepping
+ subroutine glimmer_ts_step_array(ts,time,value)
+ use glimmer_log
+ implicit none
+ type(glimmer_tseries) :: ts !< time series data
+ real(dp), intent(in) :: time !< time value to get
+ real(dp), dimension(:) :: value !< interpolated value
+
+ integer i
+
+ if (size(value).ne.ts%numv) then
+ call write_log('Error, wrong number of values',GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ i = get_i(ts,time)
+ if (i.eq.-1) then
+ i = 1
+ else if (i.eq.ts%numt+1) then
+ i = ts%numt
+ end if
+
+ value = ts%values(:,i)
+ end subroutine glimmer_ts_step_array
+
+ !> interpolate time series by stepping
+ subroutine glimmer_ts_step_scalar(ts,time,value)
+ use glimmer_log
+ implicit none
+ type(glimmer_tseries) :: ts !< time series data
+ real(dp), intent(in) :: time !< time value to get
+ real(dp) :: value !< interpolated value
+
+ integer i
+
+ i = get_i(ts,time)
+ if (i.eq.-1) then
+ i = 1
+ else if (i.eq.ts%numt+1) then
+ i = ts%numt
+ end if
+
+ value = ts%values(1,i)
+ end subroutine glimmer_ts_step_scalar
+
+ !> linear interpolate time series
+ subroutine glimmer_ts_linear_array(ts,time,value)
+ use glimmer_log
+ implicit none
+ type(glimmer_tseries) :: ts !< time series data
+ real(dp), intent(in) :: time !< time value to get
+ real(dp), dimension(:) :: value !< interpolated value
+
+ integer i
+ real(dp),dimension(size(value)) :: slope
+
+ if (size(value).ne.ts%numv) then
+ call write_log('Error, wrong number of values',GM_FATAL,__FILE__,__LINE__)
+ end if
+
+ i = get_i(ts,time)
+ if (i.eq.-1) then
+ value(:) = ts%values(:,1)
+ else if (i.eq.ts%numt+1) then
+ value(:) = ts%values(:,ts%numt)
+ else
+ slope(:) = (ts%values(:,i+1)-ts%values(:,i))/(ts%times(i+1)-ts%times(i))
+ value(:) = ts%values(:,i) + slope(:)*(time-ts%times(i))
+ end if
+ end subroutine glimmer_ts_linear_array
+
+ !> linear interpolate time series
+ subroutine glimmer_ts_linear_scalar(ts,time,value)
+ use glimmer_log
+ implicit none
+ type(glimmer_tseries) :: ts !< time series data
+ real(dp), intent(in) :: time !< time value to get
+ real(dp) :: value !< interpolated value
+
+ integer i
+ real(dp) :: slope
+
+ i = get_i(ts,time)
+ if (i.eq.-1) then
+ value = ts%values(1,1)
+ else if (i.eq.ts%numt+1) then
+ value = ts%values(1,ts%numt)
+ else
+ slope = (ts%values(1,i+1)-ts%values(1,i))/(ts%times(i+1)-ts%times(i))
+ value = ts%values(1,i) + slope*(time-ts%times(i))
+ end if
+ end subroutine glimmer_ts_linear_scalar
+
+ !> get find the index
+ function get_i(ts,time)
+ implicit none
+ type(glimmer_tseries) :: ts !< time series data
+ real(dp), intent(in) :: time !< time value to get
+ integer get_i
+ integer upper,lower
+
+ ! BC
+ if (time <= ts%times(1)) then
+ get_i = -1
+ return
+ end if
+ if (time >= ts%times(ts%numt)) then
+ get_i = ts%numt + 1
+ return
+ end if
+ ! first try if the interpolated value is around the last value
+ ts%current=min(ts%current,ts%numt-1)
+ if (time >= ts%times(ts%current) .and. time < ts%times(ts%current+1)) then
+ get_i = ts%current
+ return
+ end if
+ ! this didn't work, let's try the next interval
+ ts%current=ts%current+1
+ if (time >= ts%times(ts%current) .and. time < ts%times(ts%current+1)) then
+ get_i = ts%current
+ return
+ end if
+ ! nope, let's do a Newton search
+ lower = 1
+ upper = ts%numt
+ do
+ ts%current = lower+int((upper-lower)/2.)
+ if (time >= ts%times(ts%current) .and. time < ts%times(ts%current+1)) then
+ get_i = ts%current
+ return
+ end if
+ if (time > ts%times(ts%current)) then
+ lower = ts%current
+ else
+ upper = ts%current
+ end if
+ end do
+ end function get_i
+
+end module glimmer_ts
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_utils.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_utils.F90
new file mode 100644
index 0000000000..35b109c732
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_utils.F90
@@ -0,0 +1,315 @@
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_utils.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> Module containing utility code for GLIMMER.
+ !TODO - Move check_conformal and fix_bcs2d to Glint? Used by glint_interp only.
+
+module glimmer_utils
+
+ use glimmer_global, only: dp
+
+ implicit none
+
+ interface array_bcs
+ module procedure array_bcs1d,array_bcs2d
+ end interface
+
+ interface check_conformal
+ module procedure check_conformal_2d_real
+ end interface
+
+contains
+
+ !> Returns the value of a 1D array location,checking first for the boundaries.
+ !!
+ !! the location is wrapped around the array boundaries until it falls within the array
+ !! \author The value of the location in question.
+ real(dp) function array_bcs1d(array,i)
+
+ ! Arguments
+
+ real(dp),dimension(:),intent(in) :: array !< The array to be indexed.
+ integer,intent(in) :: i !< The location to be extracted.
+
+ ! Internal variables
+
+ integer :: n,ii
+
+ n=size(array)
+ ii=i
+
+ if ((i<=n).and.(i>=1)) then
+ array_bcs1d=array(i)
+ endif
+
+ do while (ii>n)
+ ii=ii-n
+ enddo
+
+ do while (ii<1)
+ ii=ii+n
+ enddo
+
+ array_bcs1d=array(ii)
+
+ end function array_bcs1d
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Returns the value of a 1D array location,checking first for the boundaries.
+ !!
+ !! the location is wrapped around the array boundaries until it falls within the array
+ !! as array_bcs1d but for polar boundary conditions
+ !! \author The value of the location in question.
+ real(dp) function array_bcs_lats(array,i)
+
+
+ ! Arguments
+
+ real(dp),dimension(:),intent(in) :: array !< The array to be indexed.
+ integer,intent(in) :: i !< The location to be extracted.
+
+ ! Internal variables
+
+ integer :: n,ii
+
+ n=size(array)
+ ii=i
+
+ if ((i<=n).and.(i>=1)) then
+ array_bcs_lats=array(i)
+ return
+ endif
+
+ if (ii>n) then
+ ii=2*n-ii
+ array_bcs_lats=-180.d0+array(ii)
+ endif
+
+ if (ii<1) then
+ ii=1-ii
+ array_bcs_lats=180.d0-array(ii)
+ endif
+
+ end function array_bcs_lats
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Returns the value of an array
+ !! location, checking first for the boundaries.
+ !! Over-the-pole boundary conditions are implemented here.
+ !! \return The value of the location specified.
+ real(dp) function array_bcs2d(array,i,j)
+
+ ! Arguments
+
+ real(dp),dimension(:,:),intent(in) :: array !< Array to be indexed
+ integer,intent(in) :: i !< The location to be extracted
+ integer,intent(in) :: j !< The location to be extracted
+
+ ! Internal variables
+
+ integer :: nx,ny,ii,jj
+
+ nx=size(array,1) ; ny=size(array,2)
+
+ if ((i>=1).and.(i<=nx).and.(j>=1).and.(j<=ny)) then
+ array_bcs2d=array(i,j)
+ return
+ endif
+
+ ii=i ; jj=j
+
+ if (jj>ny) then
+ jj=2*ny-jj
+ ii=ii+nx/2
+ endif
+
+ if (jj<1) then
+ jj=1-jj
+ ii=ii+nx/2
+ endif
+
+ do while (ii>nx)
+ ii=ii-nx
+ enddo
+
+ do while (ii<1)
+ ii=ii+nx
+ enddo
+
+ array_bcs2d=array(ii,jj)
+
+ end function array_bcs2d
+
+!++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine fix_bcs2d(i,j,nx,ny)
+
+ !> Adjusts array location indices
+ !! so that they fall within the domain.
+
+ integer,intent(inout) :: i !< The location of interest
+ integer,intent(inout) :: j !< The location of interest
+ integer,intent(in) :: nx !< The size of the domain (number of points in each direction)
+ integer,intent(in) :: ny !< The size of the domain (number of points in each direction)
+
+ if ((i>=1).and.(i<=nx).and.(j>=1).and.(j<=ny)) return
+
+ if (j>ny) then
+ j=2*ny-j
+ i=i+nx/2
+ endif
+
+ if (j<1) then
+ j=1-j
+ i=i+nx/2
+ endif
+
+ do while (i>nx)
+ i=i-nx
+ enddo
+
+ do while (i<1)
+ i=i+nx
+ enddo
+
+ end subroutine fix_bcs2d
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine check_conformal_2d_real(array1,array2,label)
+
+ !> Checks that two arrays are of the same size.
+
+ use glimmer_log
+
+ real(dp),dimension(:,:),intent(in) :: array1 !< The array 1 to be checked
+ real(dp),dimension(:,:),intent(in) :: array2 !< The array 2 to be checked
+ character(*),intent(in),optional :: label !< Optional label, to facilitate bug tracking if the check fails.
+
+ if ((size(array1,1)/=size(array2,1)).or.(size(array1,2)/=size(array2,2))) then
+ if (present(label)) then
+ call write_log('Non-conformal arrays. Label: '//label,GM_FATAL,__FILE__,__LINE__)
+ else
+ call write_log('ERROR: Non-conformal arrays. No label',GM_FATAL,__FILE__,__LINE__)
+ endif
+ endif
+
+ end subroutine check_conformal_2d_real
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> compute horizontal sum for each vertical level
+ !!
+ !! Calculates the sum of a given three-dimensional field at each
+ !! level. The vertical coordinate of the input is the first index of
+ !! the array.
+ !! \return
+ !! A one-dimensional array of the same size as the first dimension of
+ !! inp is returned, containing the sum of inp for
+ !! each level.
+ function hsum(inp)
+
+
+ implicit none
+
+ real(dp),dimension(:,:,:),intent(in) :: inp !< The input array. The first index is the vertical, the othe two horizontal.
+ real(dp),dimension(size(inp,dim=1)) :: hsum
+
+ integer up
+
+ do up=1,size(inp,dim=1)
+ hsum(up) = sum(inp(up,:,:))
+ end do
+
+ end function hsum
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Calculates the sum of a given two-dimensional field along one axis.
+ !! Within GLIMMER, this function calculates the mean vertical profile
+ !! in a 2D vertical slice.
+ !! \return
+ !! A one-dimensional array of the same size as the first dimension of
+ !! inp is returned, containing the sum of inp for
+ !! each row.
+
+ function lsum(inp)
+
+
+ implicit none
+
+ real(dp),dimension(:,:), intent(in) :: inp !< Input array
+ real(dp),dimension(size(inp,dim=1)) :: lsum
+
+ lsum = sum(inp(:,:),dim=2)
+
+ end function lsum
+
+ !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ !> Tridiagonal solver. All input/output arrays should have the
+ !! same number of elements.
+
+ subroutine tridiag(a,b,c,x,y)
+
+
+ real(dp),dimension(:) :: a !< Lower diagonal; a(1) is ignored.
+ real(dp),dimension(:) :: b !< Centre diagonal
+ real(dp),dimension(:) :: c !< Upper diagonal; c(n) is ignored.
+ real(dp),dimension(:) :: x !< Unknown vector
+ real(dp),dimension(:) :: y !< Right-hand side
+
+ real(dp),dimension(size(a)) :: aa
+ real(dp),dimension(size(a)) :: bb
+
+ integer :: n,i
+
+ n=size(a)
+
+ aa(1) = c(1)/b(1)
+ bb(1) = y(1)/b(1)
+
+ do i=2,n
+ aa(i) = c(i)/(b(i)-a(i)*aa(i-1))
+ bb(i) = (y(i)-a(i)*bb(i-1))/(b(i)-a(i)*aa(i-1))
+ end do
+
+ x(n) = bb(n)
+
+ do i=n-1,1,-1
+ x(i) = bb(i)-aa(i)*x(i+1)
+ end do
+
+ end subroutine tridiag
+
+end module glimmer_utils
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_vers.F90.cmake.in b/components/cism/glimmer-cism/libglimmer/glimmer_vers.F90.cmake.in
new file mode 100644
index 0000000000..4432e75dba
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_vers.F90.cmake.in
@@ -0,0 +1,44 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_vers.F90.cmake.in - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+
+!> the glimmer version as a string
+function glimmer_version_char()
+ implicit none
+ character(len=100) :: glimmer_version_char
+
+ !glimmer_version_char = 'GLIMMER v. ??: (CMake build does not get version)'
+ glimmer_version_char = 'CISM 2.0'
+end function glimmer_version_char
+
+!> the glimmer version as an integer
+function glimmer_version_int()
+ implicit none
+ integer :: glimmer_version_int
+ glimmer_version_int = 10000*1 + 100*7 + 1
+end function glimmer_version_int
+
+
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_vers.F90.in.in b/components/cism/glimmer-cism/libglimmer/glimmer_vers.F90.in.in
new file mode 100644
index 0000000000..70ddca3423
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_vers.F90.in.in
@@ -0,0 +1,42 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_vers.F90.in.in - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+
+!> the glimmer version as a string
+function glimmer_version_char()
+ implicit none
+ character(len=100) :: glimmer_version_char
+ glimmer_version_char = 'GLIMMER v. @GLIMMER_MAJOR_VERSION@.@GLIMMER_MINOR_VERSION@.@GLIMMER_MICRO_VERSION@@GLIMMER_SVN_VERS@'
+end function glimmer_version_char
+
+!> the glimmer version as an integer
+function glimmer_version_int()
+ implicit none
+ integer :: glimmer_version_int
+ glimmer_version_int = 10000*@GLIMMER_MAJOR_VERSION@ + 100*@GLIMMER_MINOR_VERSION@ + @GLIMMER_MICRO_VERSION@
+end function glimmer_version_int
+
+
diff --git a/components/cism/glimmer-cism/libglimmer/glimmer_writestats.F90 b/components/cism/glimmer-cism/libglimmer/glimmer_writestats.F90
new file mode 100644
index 0000000000..833a2624d4
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/glimmer_writestats.F90
@@ -0,0 +1,48 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! glimmer_writestats.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+module glimmer_writestats
+ !> F90 wrapper to gc_writestats
+ !!
+ !! \author Magnus Hagdorn
+ !! \date April 2009
+
+ implicit none
+
+contains
+
+ subroutine glimmer_write_stats(resname, cfgname,wallTime)
+ use glimmer_global, only : dp
+ use parallel, only: main_task
+ implicit none
+ character(len=*), intent(in) :: resname !< name of the output result file
+ character(len=*), intent(in) :: cfgname !< name of configuration file
+ real(kind=dp), intent(in) :: wallTime!< elapsed wall clock tine in seconds
+
+ if (main_task) call gf_writestats(resname,cfgname,wallTime)
+ end subroutine glimmer_write_stats
+
+end module glimmer_writestats
diff --git a/components/cism/glimmer-cism/libglimmer/mpi_mod.F90 b/components/cism/glimmer-cism/libglimmer/mpi_mod.F90
new file mode 100644
index 0000000000..9927e87125
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/mpi_mod.F90
@@ -0,0 +1,42 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! mpi_mod.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+module mpi_mod
+! This module wraps the external mpi module
+
+#ifndef NO_MPIMOD
+ use mpi
+#endif
+
+ implicit none
+
+#ifdef NO_MPIMOD
+#include
+#endif
+
+ public
+
+end module mpi_mod
diff --git a/components/cism/glimmer-cism/libglimmer/nan_mod.F90 b/components/cism/glimmer-cism/libglimmer/nan_mod.F90
new file mode 100644
index 0000000000..8a884729db
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/nan_mod.F90
@@ -0,0 +1,46 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! nan_mod.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+module nan_mod
+
+! Set parameter for the floating point flag "nan" not-a-number
+!
+! Based on the similar module in CESM's CLM & CAM
+!
+ use glimmer_global, only : dp
+
+ implicit none
+ save
+
+#ifdef __PGI
+! quiet nan for portland group compilers
+ real(dp), parameter :: NaN = O'0777700000000000000000'
+#else
+! signaling nan otherwise
+ real(dp), parameter :: NaN = O'0777610000000000000000'
+#endif
+
+end module nan_mod
diff --git a/components/cism/glimmer-cism/libglimmer/ncdf_template.F90.in b/components/cism/glimmer-cism/libglimmer/ncdf_template.F90.in
new file mode 100644
index 0000000000..9910db6ed4
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/ncdf_template.F90.in
@@ -0,0 +1,564 @@
+! WJS (1-30-12): The following (turning optimization off) is needed as a workaround for an
+! xlf compiler bug, at least in IBM XL Fortran for AIX, V12.1 on bluefire
+#ifdef CPRIBM
+@PROCESS OPT(0)
+#endif
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! ncdf_template.F90.in - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#define NCO outfile%nc
+#define NCI infile%nc
+
+!GENVAR_HAVE_AVG!
+
+module NAME_io
+ ! template for creating subsystem specific I/O routines
+ ! written by Magnus Hagdorn, 2004
+
+ use DATAMOD
+
+ implicit none
+
+ private :: get_xtype, is_enabled, is_enabled_0dint, is_enabled_1dint, is_enabled_2dint, is_enabled_0dreal, is_enabled_1dreal, is_enabled_2dreal, is_enabled_3dreal
+
+ character(310), save :: restart_variable_list='' ! list of variables needed for a restart
+!TODO change 310 to a variable - see glimmer_ncdf.F90 in the definition for type glimmer_nc_stat for other instances of this value.
+
+ interface is_enabled ! MJH 10/21/13: Interface needed for determining if arrays have been enabled. See notes below in NAME_io_create.
+ module procedure is_enabled_0dint
+ module procedure is_enabled_1dint
+ module procedure is_enabled_2dint
+ module procedure is_enabled_0dreal
+ module procedure is_enabled_1dreal
+ module procedure is_enabled_2dreal
+ module procedure is_enabled_3dreal
+ end interface is_enabled
+
+contains
+
+ !*****************************************************************************
+ ! netCDF output
+ !*****************************************************************************
+ subroutine NAME_io_createall(model,data,outfiles)
+ ! open all netCDF files for output
+ use DATAMOD
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(glide_global_type) :: model
+ type(DATATYPE) :: data ! MJH 10/21/13: Making 'data' mandatory. See notes below in NAME_io_create
+ type(glimmer_nc_output),optional,pointer :: outfiles
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ do while(associated(oc))
+ call NAME_io_create(oc,model,data)
+ oc=>oc%next
+ end do
+ end subroutine NAME_io_createall
+
+ subroutine NAME_io_writeall(data,model,atend,outfiles,time)
+ ! if necessary write to netCDF files
+ use DATAMOD
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(DATATYPE) :: data
+ type(glide_global_type) :: model
+ logical, optional :: atend
+ type(glimmer_nc_output),optional,pointer :: outfiles
+ real(dp),optional :: time
+
+ ! local variables
+ type(glimmer_nc_output), pointer :: oc
+ logical :: forcewrite=.false.
+
+ if (present(outfiles)) then
+ oc => outfiles
+ else
+ oc=>model%funits%out_first
+ end if
+
+ if (present(atend)) then
+ forcewrite = atend
+ end if
+
+ do while(associated(oc))
+#ifdef HAVE_AVG
+ if (oc%do_averages) then
+ call NAME_avg_accumulate(oc,data,model)
+ end if
+#endif
+ call glimmer_nc_checkwrite(oc,model,forcewrite,time)
+ if (oc%nc%just_processed) then
+ ! write standard variables
+ call NAME_io_write(oc,data)
+#ifdef HAVE_AVG
+ if (oc%do_averages) then
+ call NAME_avg_reset(oc,data)
+ end if
+#endif
+ end if
+ oc=>oc%next
+ end do
+ end subroutine NAME_io_writeall
+
+ subroutine NAME_io_create(outfile,model,data)
+ use parallel
+ use glide_types
+ use DATAMOD
+ use glimmer_ncdf
+ use glimmer_ncio
+ use glimmer_map_types
+ use glimmer_log
+ use glimmer_paramets
+ use glimmer_scales
+ use glimmer_log
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ type(glide_global_type) :: model
+ type(DATATYPE) :: data ! MJH 10/21/13: Making 'data' mandatory. See note below
+
+ integer status,varid,pos
+
+ ! MJH 10/21/13: Local variables needed for checking if a variable is enabled.
+ real(dp) :: tavgf
+ integer :: up
+
+ !GENVAR_DIMS!
+
+ ! Expanding restart variables: if 'restart' or 'hot' is present, we remove that
+ ! word from the variable list, and flip the restartfile flag.
+ ! In CISM 2.0, 'restart' is the preferred name to represent restart variables,
+ ! but 'hot' is supported for backward compatibility. Thus, we check for both.
+ NCO%vars = ' '//trim(adjustl(NCO%vars))//' ' ! Need to maintain a space at beginning and end of list
+ ! expanding restart variables
+ pos = index(NCO%vars,' restart ')
+ if (pos.ne.0) then
+ NCO%vars = NCO%vars(:pos)//NCO%vars(pos+8:)
+ NCO%restartfile = .true.
+ end if
+ pos = index(NCO%vars,' hot ')
+ if (pos.ne.0) then
+ NCO%vars = NCO%vars(:pos)//NCO%vars(pos+4:)
+ NCO%restartfile = .true.
+ end if
+ ! Now apply necessary changes if the file is a restart file.
+ if (NCO%restartfile) then
+ if ((len_trim(NCO%vars) + len_trim(restart_variable_list) + 2) >= len(NCO%vars) ) then
+ call write_log('Adding restart variables has made the list of output variables too long for file ' // NCO%filename, GM_FATAL)
+ else
+ ! Expand the restart variable list
+ ! Need to maintain a space at beginning and end of list
+ NCO%vars = trim(NCO%vars) // ' ' // trim(restart_variable_list) // ' ' ! (a module variable)
+ ! Set the xtype to be double (required for an exact restart)
+ outfile%default_xtype = NF90_DOUBLE
+ endif
+ end if
+
+ ! Convert temp and flwa to versions on stag grid, if needed
+ ! Note: this check must occur after restart variables are expanded which happens in glimmer_nc_readparams
+ call check_for_tempstag(model%options%whichdycore,NCO)
+
+ ! checking if we need to handle time averages
+ pos = index(NCO%vars,AVG_SUFF)
+ if (pos.ne.0) then
+ outfile%do_averages = .True.
+ end if
+
+ ! Now that the output variable list is finalized, make sure we aren't truncating what the user intends to be output.
+ ! Note: this only checks that the text in the variable list does not extend to within one character of the end of the variable.
+ ! It does not handle the case where the user exactly fills the allowable length with variables or has a too-long list with more than one space between variable names.
+ if ((len_trim(NCO%vars) + 1 ) >= len(NCO%vars)) then
+ call write_log('The list of output variables is too long for file ' // NCO%filename, GM_FATAL)
+ endif
+
+
+ ! MJH, 10/21/13: In the auto-generated code below, the creation of each output variable is wrapped by a check if the data for that
+ ! variable has a size greater than 0. This is because of recently added checks in glide_types.F90 that don't fully allocate
+ ! some variables if certain model options are disabled. This is to lower memory requirements while running the model.
+ ! The reason they have to be allocated with size zero rather than left unallocated is because the data for
+ ! some netCDF output variables is defined with math, which causes an error if the operands are unallocated.
+ ! Note that if a variable is not created, then it will not be subsequently written to.
+ ! Also note that this change requires that data be a mandatory argument to this subroutine.
+
+ ! Some output variables will need tavgf. The value does not matter, but it must exist.
+ ! Nonetheless, for completeness give it the proper value that it has in NAME_io_write.
+ tavgf = outfile%total_time
+ if (tavgf.ne.0.d0) then
+ tavgf = 1.d0/tavgf
+ end if
+ ! Similarly, some output variables use the variable up. Give it value of 0 here.
+ up = 0
+
+ !GENVAR_VARDEF!
+ end subroutine NAME_io_create
+
+ subroutine NAME_io_write(outfile,data)
+ use parallel
+ use DATAMOD
+ use glimmer_ncdf
+ use glimmer_paramets
+ use glimmer_scales
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(DATATYPE) :: data
+ ! the model instance
+
+ ! local variables
+ real(dp) :: tavgf
+ integer status, varid
+ integer up
+
+ tavgf = outfile%total_time
+ if (tavgf.ne.0.d0) then
+ tavgf = 1.d0/tavgf
+ end if
+
+ ! write variables
+ !GENVAR_WRITE!
+ end subroutine NAME_io_write
+
+
+ subroutine NAME_add_to_restart_variable_list(vars_to_add)
+ ! This subroutine adds variables to the list of variables needed for a restart.
+ ! It is a public subroutine that allows other parts of the model to modify the list,
+ ! which is a module level variable. MJH 1/17/2013
+
+ use glimmer_log
+ implicit none
+
+ !------------------------------------------------------------------------------------
+ ! Subroutine arguments
+ !------------------------------------------------------------------------------------
+ character(len=*), intent (in) :: vars_to_add ! list of variable(s) to be added to the list of restart variables
+ !character(*), intent (inout) :: restart_variable_list ! list of variables needed to perform an exact restart - module variable
+
+ !------------------------------------------------------------------------------------
+ ! Internal variables
+ !------------------------------------------------------------------------------------
+
+ !------------------------------------------------------------------------------------
+
+ ! Add the variables to the list so long as they don't make the list too long.
+ if ( (len_trim(restart_variable_list) + 1 + len_trim(vars_to_add)) > len(restart_variable_list)) then
+ call write_log('Adding restart variables has made the restart variable list too long.',GM_FATAL)
+ else
+ restart_variable_list = trim(adjustl(restart_variable_list)) // ' ' // trim(vars_to_add)
+ !call write_log('Adding to NAME restart variable list: ' // trim(vars_to_add) )
+ endif
+
+ end subroutine NAME_add_to_restart_variable_list
+
+
+ ! Functions for the interface 'is_enabled'. These are needed by the auto-generated code in NAME_io_create
+ ! to determine if a variable is 'turned on', and should be written.
+
+ function is_enabled_0dint(var)
+ integer, intent(in) :: var
+ logical :: is_enabled_0dint
+ is_enabled_0dint = .true. ! scalars are always enabled
+ return
+ end function is_enabled_0dint
+
+ function is_enabled_1dint(var)
+ integer, dimension(:), pointer, intent(in) :: var
+ logical :: is_enabled_1dint
+ if (associated(var)) then
+ is_enabled_1dint = .true.
+ else
+ is_enabled_1dint = .false.
+ endif
+ return
+ end function is_enabled_1dint
+
+ function is_enabled_2dint(var)
+ integer, dimension(:,:), pointer, intent(in) :: var
+ logical :: is_enabled_2dint
+ if (associated(var)) then
+ is_enabled_2dint = .true.
+ else
+ is_enabled_2dint = .false.
+ endif
+ return
+ end function is_enabled_2dint
+
+ function is_enabled_0dreal(var)
+ real(dp), intent(in) :: var
+ logical :: is_enabled_0dreal
+ is_enabled_0dreal = .true. ! scalars are always enabled
+ return
+ end function is_enabled_0dreal
+
+ function is_enabled_1dreal(var)
+ real(dp), dimension(:), pointer, intent(in) :: var
+ logical :: is_enabled_1dreal
+ if (associated(var)) then
+ is_enabled_1dreal = .true.
+ else
+ is_enabled_1dreal = .false.
+ endif
+ return
+ end function is_enabled_1dreal
+
+ function is_enabled_2dreal(var)
+ real(dp), dimension(:,:), pointer, intent(in) :: var
+ logical :: is_enabled_2dreal
+ if (associated(var)) then
+ is_enabled_2dreal = .true.
+ else
+ is_enabled_2dreal = .false.
+ endif
+ return
+ end function is_enabled_2dreal
+
+ function is_enabled_3dreal(var)
+ real(dp), dimension(:,:,:), pointer, intent(in) :: var
+ logical :: is_enabled_3dreal
+ if (associated(var)) then
+ is_enabled_3dreal = .true.
+ else
+ is_enabled_3dreal = .false.
+ endif
+ return
+ end function is_enabled_3dreal
+
+
+ !*****************************************************************************
+ ! netCDF input
+ !*****************************************************************************
+ subroutine NAME_io_readall(data, model, filetype)
+ ! read from netCDF file
+ use DATAMOD
+ use glide_types
+ use glimmer_ncdf
+ use glimmer_ncio
+ implicit none
+ type(DATATYPE) :: data
+ type(glide_global_type) :: model
+ integer, intent(in), optional :: filetype ! 0 for input, 1 for forcing; defaults to input
+
+ ! local variables
+ type(glimmer_nc_input), pointer :: ic
+ integer :: filetype_local
+
+ if (present(filetype)) then
+ filetype_local = filetype
+ else
+ filetype_local = 0 ! default to input type
+ end if
+
+ if (filetype_local == 0) then
+ ic=>model%funits%in_first
+ else
+ ic=>model%funits%frc_first
+ endif
+ do while(associated(ic))
+ call glimmer_nc_checkread(ic,model)
+ if (ic%nc%just_processed) then
+ call NAME_io_read(ic,data)
+ end if
+ ic=>ic%next
+ end do
+ end subroutine NAME_io_readall
+
+
+ subroutine NAME_read_forcing(data, model)
+ ! Read data from forcing files
+ use glimmer_log
+ use glide_types
+ use glimmer_ncdf
+
+ implicit none
+ type(DATATYPE) :: data
+ type(glide_global_type), intent(inout) :: model
+
+ ! Locals
+ type(glimmer_nc_input), pointer :: ic
+ integer :: t
+ real(dp) :: eps ! a tolerance to use for stepwise constant forcing
+
+ ! Make eps a fraction of the time step.
+ eps = model%numerics%tinc * 1.0d-4
+
+ ! read forcing files
+ ic=>model%funits%frc_first
+ do while(associated(ic))
+
+ !print *, 'possible forcing times', ic%times
+
+ ! Find the current time in the file
+ do t = ic%nt, 1, -1 ! look through the time array backwards
+ if ( ic%times(t) <= model%numerics%time + eps) then
+ ! use the largest time that is smaller or equal to the current time (stepwise forcing)
+
+ ! Set the desired time to be read
+ ic%current_time = t
+ !print *, 'time, forcing index, forcing time', model%numerics%time, ic%current_time, ic%times(ic%current_time)
+ exit ! once we find the time, exit the loop
+ endif
+ end do
+
+ ! read all forcing fields present in this file for the time specified above
+ ic%nc%just_processed = .false. ! set this to false so it will be re-processed every time through - this ensures info gets written to the log, and that time levels don't get skipped.
+ call NAME_io_readall(data, model, filetype=1)
+
+ ! move on to the next forcing file
+ ic=>ic%next
+ end do
+
+ end subroutine NAME_read_forcing
+
+
+!------------------------------------------------------------------------------
+
+
+ subroutine NAME_io_read(infile,data)
+ ! read variables from a netCDF file
+ use parallel
+ use glimmer_log
+ use glimmer_ncdf
+ use DATAMOD
+ use glimmer_paramets
+ use glimmer_scales
+ implicit none
+ type(glimmer_nc_input), pointer :: infile
+ ! structure containg output netCDF descriptor
+ type(DATATYPE) :: data
+ ! the model instance
+
+ ! local variables
+ integer status,varid
+ integer up
+ real(dp) :: scaling_factor
+
+ ! read variables
+ !GENVAR_READ!
+ end subroutine NAME_io_read
+
+ subroutine NAME_io_checkdim(infile,model,data)
+ ! check if dimension sizes in file match dims of model
+ use parallel
+ use glimmer_log
+ use glimmer_ncdf
+ use glide_types
+ use DATAMOD
+ implicit none
+ type(glimmer_nc_input), pointer :: infile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: model
+ type(DATATYPE), optional :: data
+
+ integer status,dimid,dimsize
+ character(len=150) message
+
+ ! check dimensions
+ !GENVAR_CHECKDIM!
+ end subroutine NAME_io_checkdim
+
+ !*****************************************************************************
+ ! calculating time averages
+ !*****************************************************************************
+#ifdef HAVE_AVG
+ subroutine NAME_avg_accumulate(outfile,data,model)
+ use parallel
+ use glide_types
+ use DATAMOD
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(glide_global_type) :: model
+ type(DATATYPE) :: data
+
+ ! local variables
+ real(dp) :: factor
+ integer status, varid
+
+ ! increase total time
+ outfile%total_time = outfile%total_time + model%numerics%tinc
+ factor = model%numerics%tinc
+
+ !GENVAR_CALCAVG!
+ end subroutine NAME_avg_accumulate
+
+ subroutine NAME_avg_reset(outfile,data)
+ use parallel
+ use DATAMOD
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile
+ ! structure containg output netCDF descriptor
+ type(DATATYPE) :: data
+
+ ! local variables
+ integer status, varid
+
+ ! reset total time
+ outfile%total_time = 0.d0
+
+ !GENVAR_RESETAVG!
+ end subroutine NAME_avg_reset
+#endif
+
+ !*********************************************************************
+ ! some private procedures
+ !*********************************************************************
+
+ !> apply default type to be used in netCDF file
+ integer function get_xtype(outfile,xtype)
+ use glimmer_ncdf
+ implicit none
+ type(glimmer_nc_output), pointer :: outfile !< derived type holding information about output file
+ integer, intent(in) :: xtype !< the external netCDF type
+
+ get_xtype = xtype
+
+ if (xtype.eq.NF90_REAL .and. outfile%default_xtype.eq.NF90_DOUBLE) then
+ get_xtype = NF90_DOUBLE
+ end if
+ if (xtype.eq.NF90_DOUBLE .and. outfile%default_xtype.eq.NF90_REAL) then
+ get_xtype = NF90_REAL
+ end if
+ end function get_xtype
+
+ !*********************************************************************
+ ! lots of accessor subroutines follow
+ !*********************************************************************
+ !GENVAR_ACCESSORS!
+
+end module NAME_io
diff --git a/components/cism/glimmer-cism/libglimmer/ncdf_utils.F90 b/components/cism/glimmer-cism/libglimmer/ncdf_utils.F90
new file mode 100644
index 0000000000..3c97eb4b3c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/ncdf_utils.F90
@@ -0,0 +1,168 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! ncdf_utils.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!TODO - Move this module to utils directory? No longer used.
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> This code provides a simple interface to create and then add
+!! to a netcdf file containing time-slices of a single 2D field,
+!! for use in debugging.
+module ncdf_utils
+
+ use netcdf
+ use glimmer_global, only: sp, dp
+
+ implicit none
+
+ type ncdf_utils_type
+ integer :: id,varid,dimid1,dimid2,dimid3,d3id
+ integer :: next=1
+ character(100) :: fname
+ end type ncdf_utils_type
+
+contains
+
+ ! Note: This subroutine currently is not called, as far as I can tell
+
+ subroutine ncdf_utils_create(handle,fname,varname,d1name,d2name,d1,d2)
+
+ type(ncdf_utils_type),intent(out) :: handle !< Netcdf file handles
+ character(*), intent(in) :: fname !< File name
+ character(*), intent(in) :: varname !< Variable name
+ character(*), intent(in) :: d1name !< Name of first dimension
+ character(*), intent(in) :: d2name !< Name of second dimension
+ real(sp),dimension(:),intent(in) :: d1 !< Dimension 1
+ real(sp),dimension(:),intent(in) :: d2 !< Dimension 2
+
+ integer :: ncerr,d1id,d2id
+
+ ! Create file
+
+ ncerr=nf90_create(fname,0,handle%id)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ handle%fname=fname
+
+ ! Define dimensions
+
+ ncerr=nf90_def_dim(handle%id,d1name,size(d1),handle%dimid1)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ ncerr=nf90_def_dim(handle%id,d2name,size(d2),handle%dimid2)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ ncerr=nf90_def_dim(handle%id,'time',NF90_UNLIMITED,handle%dimid3)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+
+ ! Define dimension variables
+
+ ncerr=nf90_def_var(handle%id,d1name,NF90_FLOAT,(/handle%dimid1/),d1id)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ ncerr=nf90_def_var(handle%id,d2name,NF90_FLOAT,(/handle%dimid2/),d2id)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ ncerr=nf90_def_var(handle%id,'time',NF90_FLOAT,(/handle%dimid3/),handle%d3id)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+
+ ! Define 2D variable
+
+ ncerr=nf90_def_var(handle%id,varname,NF90_DOUBLE, &
+ (/handle%dimid1,handle%dimid2,handle%dimid3/),handle%varid)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+
+ ! Exit define mode and save dimension variables
+
+ ncerr=nf90_enddef(handle%id)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ ncerr=nf90_put_var(handle%id,d1id,d1)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ ncerr=nf90_put_var(handle%id,d2id,d2)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+
+ end subroutine ncdf_utils_create
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine ncdf_utils_write(handle,var,time)
+
+ type(ncdf_utils_type), intent(inout) :: handle
+ real(dp),dimension(:,:),intent(in) :: var
+ real(dp), intent(in) :: time
+
+ integer :: ncerr
+
+ ncerr=nf90_put_var(handle%id,handle%varid,real(var,dp),(/1,1,handle%next/))
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ ncerr=nf90_put_var(handle%id,handle%d3id,time,(/handle%next/))
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ ncerr=nf90_sync(handle%id)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+
+ handle%next=handle%next+1
+
+ end subroutine ncdf_utils_write
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine ncdf_utils_close(handle)
+
+ type(ncdf_utils_type), intent(in) :: handle
+
+ integer :: ncerr
+
+ ncerr=nf90_close(handle%id)
+
+ end subroutine ncdf_utils_close
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine ncdf_utils_read_slice(filename,varname,slice,array)
+
+ character(*), intent(in) :: filename
+ character(*), intent(in) :: varname
+ integer, intent(in) :: slice
+ real(dp),dimension(:,:),intent(out) :: array
+
+ integer :: ncerr,fileid,varid
+
+ ncerr=nf90_open(filename,0,fileid)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ ncerr=nf90_inq_varid(fileid,varname,varid)
+ if (ncerr/=NF90_NOERR) call ncerr_handle(ncerr)
+ ncerr=nf90_get_var(fileid,varid,array,(/1,1,slice/))
+
+ end subroutine ncdf_utils_read_slice
+
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ subroutine ncerr_handle(ncerr)
+
+ integer,intent(in) :: ncerr
+
+ print*,nf90_strerror(ncerr)
+ stop
+
+ end subroutine ncerr_handle
+
+end module ncdf_utils
diff --git a/components/cism/glimmer-cism/libglimmer/parallel_mpi.F90 b/components/cism/glimmer-cism/libglimmer/parallel_mpi.F90
new file mode 100644
index 0000000000..16bc5f048c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/parallel_mpi.F90
@@ -0,0 +1,5947 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! parallel_mpi.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+module parallel
+
+ use netcdf
+ implicit none
+
+ ! Information on the local & global bounds of an array
+ ! This is used to distinguish between arrays on the staggered vs. unstaggered grids
+ type, private :: bounds_info_type
+ ! Global number of points in each dimension
+ integer :: global_ewn
+ integer :: global_nsn
+
+ ! Range of indices that this proc is responsible for (excludes halo cells)
+ ! These are the indices in global index space
+ integer :: mybounds_ew_lb
+ integer :: mybounds_ew_ub
+ integer :: mybounds_ns_lb
+ integer :: mybounds_ns_ub
+
+ ! Local indices that this proc is responsible for (excludes halo cells)
+ ! These are the indices in local index space
+ integer :: ilo
+ integer :: ihi
+ integer :: jlo
+ integer :: jhi
+ end type bounds_info_type
+
+
+!PW - Repeat from glimmer_horiz_bcs_parallel.F90
+ integer, parameter, private :: HORIZ_BCS_WALL_SLIP = 0
+ integer, parameter, private :: HORIZ_BCS_CYCLIC = 1
+
+ integer, parameter, private :: horiz_bcs_type_north = HORIZ_BCS_CYCLIC
+ integer, parameter, private :: horiz_bcs_type_south = HORIZ_BCS_CYCLIC
+ integer, parameter, private :: horiz_bcs_type_east = HORIZ_BCS_CYCLIC
+ integer, parameter, private :: horiz_bcs_type_west = HORIZ_BCS_CYCLIC
+!PW - End of repeat
+
+ ! Debug and Verification Level
+ integer,parameter :: DEBUG_LEVEL = 1
+ ! If > 0, then debug code executed. Added for parallel_halo_verify()
+
+ !NOTE: The glam/glissade dycore currently requires nhalo = 2,
+ ! whereas the glide dycore requires nhalo = 0.
+ ! For glide simulations, we set nhalo = 0 by calling distributed_grid
+ ! with optional argument nhalo = 0.
+
+ integer, save :: nhalo = 2
+
+ !TODO - Define lhalo and uhalo in terms of nhalo.
+
+ integer, save :: lhalo = 2
+ integer, save :: uhalo = 2
+
+ ! halo widths for staggered grid
+! integer,parameter :: staggered_lhalo = lhalo
+! integer,parameter :: staggered_uhalo = uhalo-1
+ integer, save :: staggered_lhalo = 2
+ integer, save :: staggered_uhalo = 1
+
+!TODO - Remove staggered_whalo/shalo/ehalo/nhalo here and in other parts of the code
+! integer,parameter :: staggered_whalo = lhalo
+! integer,parameter :: staggered_shalo = lhalo
+! integer,parameter :: staggered_ehalo = uhalo-1
+! integer,parameter :: staggered_nhalo = uhalo-1
+ integer, save :: staggered_whalo = 2
+ integer, save :: staggered_shalo = 2
+ integer, save :: staggered_ehalo = 1
+ integer, save :: staggered_nhalo = 1
+
+ integer,save :: main_rank
+ logical,save :: main_task
+ integer,save :: comm, tasks, this_rank
+
+ ! distributed grid
+ integer,save :: global_ewn,global_nsn,local_ewn,local_nsn,own_ewn,own_nsn
+ integer,save :: global_col_offset, global_row_offset
+
+ integer,save :: ewlb,ewub,nslb,nsub
+ integer,save :: east,north,south,west
+
+ !WHL - added global boundary conditions
+ ! global boundary conditions
+ logical,save :: periodic_bc ! doubly periodic
+ logical,save :: outflow_bc ! if true, set scalars in global halo to zero
+ ! does not apply to staggered variables (e.g., uvel, vvel)
+
+ ! common work space
+ integer,dimension(4),save :: d_gs_mybounds
+ integer,dimension(:,:),allocatable,save :: d_gs_bounds
+
+ ! distributed gather flow control parameter
+ integer,parameter :: max_gather_block_size = 64 ! max and default
+
+ ! global IDs
+ integer,save :: ProcsEW
+
+ !TODO - Remove these gathered_* declarations. No longer used.
+
+ ! JEFF Declarations for undistributed variables on main_task.
+ ! Later move to separate module? These are only temporary until code is completely distributed.
+ real(8),dimension(:,:,:),allocatable :: gathered_efvs ! Output var from glam_velo_fordsiapstr(), used often
+ real(8),dimension(:,:,:),allocatable :: gathered_efvs2 ! Variable for testing that scatter/gather are inverses
+ real(8),dimension(:,:,:),allocatable :: gathered_uvel ! Output var from glam_velo_fordsiapstr(), used often
+ real(8),dimension(:,:,:),allocatable :: gathered_vvel ! Output var from glam_velo_fordsiapstr(), used often
+ real(8),dimension(:,:),allocatable :: gathered_uflx ! Output var from glam_velo_fordsiapstr(), used often
+ real(8),dimension(:,:),allocatable :: gathered_vflx ! Output var from glam_velo_fordsiapstr(), used often
+ real(8),dimension(:,:,:),allocatable :: gathered_velnorm ! Variable calculated in run_ho_diagnostic(), is this used?
+ real(8),dimension(:,:),allocatable :: gathered_thck ! Used in horizontal_remap_in()
+ real(8),dimension(:,:),allocatable :: gathered_stagthck ! Used in horizontal_remap_in()
+ real(4),dimension(:,:),allocatable :: gathered_acab ! Used in horizontal_remap_in()
+ real(8),dimension(:,:,:),allocatable :: gathered_temp ! Used in horizontal_remap_in()
+ real(8),dimension(:,:),allocatable :: gathered_dusrfdew ! Used in glide_stress()
+ real(8),dimension(:,:),allocatable :: gathered_dusrfdns ! Used in glide_stress()
+ real(8),dimension(:,:),allocatable :: gathered_dthckdew ! Used in glide_stress()
+ real(8),dimension(:,:),allocatable :: gathered_dthckdns ! Used in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauxx ! Calculated in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauyy ! Calculated in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauxy ! Calculated in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauscalar ! Calculated in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauxz ! Calculated in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauyz ! Calculated in glide_stress()
+ real(8),dimension(:,:),allocatable :: gathered_topg ! Bedrock topology, Used in glide_set_mask()
+ integer,dimension(:,:),allocatable :: gathered_thkmask ! Calculated in glide_set_mask()
+ real(8),dimension(:,:),allocatable :: gathered_marine_bc_normal ! Calculated in glide_marine_margin_normal()
+ real(8),dimension(:,:,:),allocatable :: gathered_surfvel ! Used in calc_gline_flux()
+ real(8),dimension(:,:),allocatable :: gathered_gline_flux ! Calculated in calc_gline_flux()
+ real(8),dimension(:,:),allocatable :: gathered_ubas ! Used in calc_gline_flux()
+ real(8),dimension(:,:),allocatable :: gathered_vbas ! Used in calc_gline_flux()
+ real(8),dimension(:,:),allocatable :: gathered_relx ! Used in glide_marinlim()
+ real(8),dimension(:,:,:),allocatable :: gathered_flwa ! Used in glide_marinlim()
+ real(4),dimension(:,:),allocatable :: gathered_calving ! Used in glide_marinlim()
+ real(4),dimension(:,:),allocatable :: gathered_backstress ! Used in glide_marinlim()
+ real(8),dimension(:,:),allocatable :: gathered_usrf ! Used in glide_marinlim()
+ logical,dimension(:,:),allocatable :: gathered_backstressmap ! Used in glide_marinlim()
+ real(8),dimension(:,:),allocatable :: gathered_tau_x ! Calculated in calc_basal_shear()
+ real(8),dimension(:,:),allocatable :: gathered_tau_y ! Calculated in calc_basal_shear()
+ real(8),dimension(:,:),allocatable :: gathered_lsrf ! Used in glide_marinlim()
+
+ interface broadcast
+ module procedure broadcast_character
+ module procedure broadcast_integer
+ module procedure broadcast_integer_1d
+ module procedure broadcast_logical
+ module procedure broadcast_real4
+ module procedure broadcast_real4_1d
+ module procedure broadcast_real8
+ module procedure broadcast_real8_1d
+ end interface
+
+ interface distributed_gather_var
+ module procedure distributed_gather_var_integer_2d
+ module procedure distributed_gather_var_logical_2d
+ module procedure distributed_gather_var_real4_2d
+ module procedure distributed_gather_var_real4_3d
+ module procedure distributed_gather_var_real8_2d
+ module procedure distributed_gather_var_real8_3d
+ end interface
+
+ interface distributed_get_var
+ module procedure distributed_get_var_integer_2d
+ module procedure distributed_get_var_real4_1d
+ module procedure distributed_get_var_real4_2d
+ module procedure distributed_get_var_real8_1d
+ module procedure distributed_get_var_real8_2d
+ module procedure distributed_get_var_real8_3d
+ end interface
+
+ interface distributed_print
+ ! Gathers a distributed variable and writes to file
+ module procedure distributed_print_integer_2d
+ module procedure distributed_print_real8_2d
+ module procedure distributed_print_real8_3d
+ end interface
+
+ interface distributed_put_var
+ module procedure distributed_put_var_integer_2d
+ module procedure distributed_put_var_real4_1d
+ module procedure distributed_put_var_real4_2d
+ module procedure distributed_put_var_real8_1d
+ module procedure distributed_put_var_real8_2d
+ module procedure distributed_put_var_real8_3d
+
+ !TODO - Should the parallel_put_var routines be part of this interface?
+ module procedure parallel_put_var_real4
+ module procedure parallel_put_var_real8
+ end interface
+
+ interface distributed_scatter_var
+ module procedure distributed_scatter_var_integer_2d
+ module procedure distributed_scatter_var_logical_2d
+ module procedure distributed_scatter_var_real4_2d
+ module procedure distributed_scatter_var_real4_3d
+ module procedure distributed_scatter_var_real8_2d
+ module procedure distributed_scatter_var_real8_3d
+ end interface
+
+ interface global_sum
+ module procedure global_sum_real8_scalar
+ module procedure global_sum_real8_1d
+ end interface
+
+ interface parallel_convert_haloed_to_nonhaloed
+ module procedure parallel_convert_haloed_to_nonhaloed_real4_2d
+ module procedure parallel_convert_haloed_to_nonhaloed_real8_2d
+ end interface parallel_convert_haloed_to_nonhaloed
+
+ interface parallel_convert_nonhaloed_to_haloed
+ module procedure parallel_convert_nonhaloed_to_haloed_real4_2d
+ module procedure parallel_convert_nonhaloed_to_haloed_real8_2d
+ end interface parallel_convert_nonhaloed_to_haloed
+
+ interface parallel_def_var
+ module procedure parallel_def_var_dimids
+ module procedure parallel_def_var_nodimids
+ end interface
+
+ interface parallel_get_att
+ module procedure parallel_get_att_character
+ module procedure parallel_get_att_real4
+ module procedure parallel_get_att_real4_1d
+ module procedure parallel_get_att_real8
+ module procedure parallel_get_att_real8_1d
+ end interface
+
+ interface parallel_get_var
+ module procedure parallel_get_var_integer_1d
+ module procedure parallel_get_var_real4_1d
+ module procedure parallel_get_var_real8_1d
+ end interface
+
+ interface parallel_halo
+ module procedure parallel_halo_integer_2d
+ module procedure parallel_halo_logical_2d
+ module procedure parallel_halo_real4_2d
+ module procedure parallel_halo_real8_2d
+ module procedure parallel_halo_real8_3d
+ end interface
+
+ interface parallel_halo_verify
+ module procedure parallel_halo_verify_integer_2d
+ module procedure parallel_halo_verify_real8_2d
+ module procedure parallel_halo_verify_real8_3d
+ end interface
+
+ interface staggered_parallel_halo
+ module procedure staggered_parallel_halo_integer_2d
+ module procedure staggered_parallel_halo_integer_3d
+ module procedure staggered_parallel_halo_real8_2d
+ module procedure staggered_parallel_halo_real8_3d
+ module procedure staggered_parallel_halo_real8_4d
+ end interface
+
+ interface staggered_parallel_halo_extrapolate
+ module procedure staggered_parallel_halo_extrapolate_integer_2d
+ module procedure staggered_parallel_halo_extrapolate_real8_2d
+ end interface
+
+ interface parallel_print
+ ! Writes a parallel (same on all processors) variable to file by just writing from main_task
+ module procedure parallel_print_integer_2d
+ module procedure parallel_print_real8_2d
+ module procedure parallel_print_real8_3d
+ end interface
+
+ interface parallel_put_att
+ module procedure parallel_put_att_character
+ module procedure parallel_put_att_real4
+ module procedure parallel_put_att_real4_1d
+ module procedure parallel_put_att_real8
+ module procedure parallel_put_att_real8_1d
+ end interface
+
+ interface parallel_put_var
+ module procedure parallel_put_var_real4
+ module procedure parallel_put_var_real8
+ module procedure parallel_put_var_real8_1d
+ end interface
+
+ interface parallel_reduce_max
+ module procedure parallel_reduce_max_integer
+ module procedure parallel_reduce_max_real4
+ module procedure parallel_reduce_max_real8
+ end interface
+
+ interface parallel_reduce_min
+ module procedure parallel_reduce_min_integer
+ module procedure parallel_reduce_min_real4
+ module procedure parallel_reduce_min_real8
+ end interface
+
+ interface parallel_reduce_sum
+ module procedure parallel_reduce_sum_integer
+ module procedure parallel_reduce_sum_real4
+ module procedure parallel_reduce_sum_real8
+ module procedure parallel_reduce_sum_real8_nvar
+ end interface
+
+ ! This reduce interface determines the global max value and the processor on which it occurs
+ interface parallel_reduce_maxloc
+ module procedure parallel_reduce_maxloc_integer
+ module procedure parallel_reduce_maxloc_real4
+ module procedure parallel_reduce_maxloc_real8
+ end interface
+
+ ! This reduce interface determines the global min value and the processor on which it occurs
+ interface parallel_reduce_minloc
+ module procedure parallel_reduce_minloc_integer
+ module procedure parallel_reduce_minloc_real4
+ module procedure parallel_reduce_minloc_real8
+ end interface
+
+contains
+
+ subroutine broadcast_character(c, proc)
+ use mpi_mod
+ implicit none
+ character(len=*) :: c
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from
+ integer :: ierror,n
+ integer :: source ! local variable indicating which processor to broadcast from
+ ! begin
+ if (present(proc)) then
+ source = proc
+ else
+ source = main_rank
+ endif
+ n = len(c)
+ call mpi_bcast(c,n,mpi_character,source,comm,ierror)
+ end subroutine broadcast_character
+
+ subroutine broadcast_integer(i, proc)
+ use mpi_mod
+ implicit none
+ integer :: i,ierror
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from
+ integer :: source ! local variable indicating which processor to broadcast from
+ ! begin
+ if (present(proc)) then
+ source = proc
+ else
+ source = main_rank
+ endif
+ call mpi_bcast(i,1,mpi_integer,source,comm,ierror)
+ end subroutine broadcast_integer
+
+ subroutine broadcast_integer_1d(a, proc)
+ use mpi_mod
+ implicit none
+ integer,dimension(:) :: a
+ integer :: ierror
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from
+ integer :: source ! local variable indicating which processor to broadcast from
+ ! begin
+ if (present(proc)) then
+ source = proc
+ else
+ source = main_rank
+ endif
+ call mpi_bcast(a,size(a),mpi_integer,source,comm,ierror)
+ end subroutine broadcast_integer_1d
+
+ subroutine broadcast_logical(l, proc)
+ use mpi_mod
+ implicit none
+ logical :: l
+ integer :: ierror
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from
+ integer :: source ! local variable indicating which processor to broadcast from
+ ! begin
+ if (present(proc)) then
+ source = proc
+ else
+ source = main_rank
+ endif
+ call mpi_bcast(l,1,mpi_logical,source,comm,ierror)
+ end subroutine broadcast_logical
+
+ subroutine broadcast_real4(r, proc)
+ use mpi_mod
+ implicit none
+ integer :: ierror
+ real(4) :: r
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from
+ integer :: source ! local variable indicating which processor to broadcast from
+ ! begin
+ if (present(proc)) then
+ source = proc
+ else
+ source = main_rank
+ endif
+ call mpi_bcast(r,1,mpi_real4,source,comm,ierror)
+ end subroutine broadcast_real4
+
+ subroutine broadcast_real4_1d(a, proc)
+ use mpi_mod
+ implicit none
+ real(4),dimension(:) :: a
+ integer :: ierror
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from
+ integer :: source ! local variable indicating which processor to broadcast from
+ ! begin
+ if (present(proc)) then
+ source = proc
+ else
+ source = main_rank
+ endif
+ call mpi_bcast(a,size(a),mpi_real4,source,comm,ierror)
+ end subroutine broadcast_real4_1d
+
+ subroutine broadcast_real8(r, proc)
+ use mpi_mod
+ implicit none
+ integer :: ierror
+ real(8) :: r
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from
+ integer :: source ! local variable indicating which processor to broadcast from
+ ! begin
+ if (present(proc)) then
+ source = proc
+ else
+ source = main_rank
+ endif
+ call mpi_bcast(r,1,mpi_real8,source,comm,ierror)
+ end subroutine broadcast_real8
+
+ subroutine broadcast_real8_1d(a, proc)
+ use mpi_mod
+ implicit none
+ real(8),dimension(:) :: a
+ integer :: ierror
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from
+ integer :: source ! local variable indicating which processor to broadcast from
+ ! begin
+ if (present(proc)) then
+ source = proc
+ else
+ source = main_rank
+ endif
+ call mpi_bcast(a,size(a),mpi_real8,source,comm,ierror)
+ end subroutine broadcast_real8_1d
+
+ function distributed_execution()
+ ! Returns if running distributed or not.
+ logical distributed_execution
+
+ distributed_execution = .true.
+ end function distributed_execution
+
+ subroutine distributed_gather_var_integer_2d(values, global_values)
+
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+
+ use mpi_mod
+ implicit none
+ integer,dimension(:,:),intent(in) :: values
+ integer,dimension(:,:),allocatable,intent(inout) :: global_values
+
+ integer :: i,ierror,j,k
+ integer,dimension(:),allocatable :: displs,recvcounts
+ integer,dimension(:),allocatable :: recvbuf
+ integer,dimension(:,:),allocatable :: sendbuf
+
+ if (uhalo==0 .and. size(values,1)==local_ewn-1) then
+ ! Fixing this would require some generalization as is done for distributed_put_var
+ write(*,*) "distributed_gather does not currently work for"
+ write(*,*) "variables on the staggered grid when uhalo=0"
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ ! first time
+ if (.not. allocated(d_gs_bounds)) then
+ if (main_task) then
+ allocate(d_gs_bounds(4,tasks))
+ else
+ allocate(d_gs_bounds(1,1))
+ endif
+
+ d_gs_mybounds(1) = ewlb+lhalo
+ d_gs_mybounds(2) = ewub-uhalo
+ d_gs_mybounds(3) = nslb+lhalo
+ d_gs_mybounds(4) = nsub-uhalo
+ call fc_gather_int(d_gs_mybounds,4,mpi_integer,d_gs_bounds,4,&
+ mpi_integer,main_rank,comm)
+ endif
+
+ if (main_task) then
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ allocate(global_values(&
+ minval(d_gs_bounds(1,:)):maxval(d_gs_bounds(2,:)),&
+ minval(d_gs_bounds(3,:)):maxval(d_gs_bounds(4,:))))
+ global_values(:,:) = 0
+ allocate(displs(tasks+1))
+ allocate(recvcounts(tasks))
+ recvcounts(:) = (d_gs_bounds(2,:)-d_gs_bounds(1,:)+1) &
+ *(d_gs_bounds(4,:)-d_gs_bounds(3,:)+1)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+recvcounts(i)
+ end do
+ allocate(recvbuf(displs(tasks+1)))
+ else
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ allocate(global_values(1,1)) ! This prevents a problem with NULL pointers later.
+ allocate(displs(1))
+ allocate(recvcounts(1))
+ allocate(recvbuf(1))
+ end if
+ allocate(sendbuf(d_gs_mybounds(1):d_gs_mybounds(2),&
+ d_gs_mybounds(3):d_gs_mybounds(4)))
+ sendbuf(:,:) = values(1+lhalo:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call fc_gatherv_int(sendbuf,size(sendbuf),mpi_integer,&
+ recvbuf,recvcounts,displs,mpi_integer,main_rank,comm)
+ if (main_task) then
+ do i = 1,tasks
+ global_values(d_gs_bounds(1,i):d_gs_bounds(2,i),&
+ d_gs_bounds(3,i):d_gs_bounds(4,i)) = &
+ reshape(recvbuf(displs(i)+1:displs(i+1)), &
+ (/d_gs_bounds(2,i)-d_gs_bounds(1,i)+1,&
+ d_gs_bounds(4,i)-d_gs_bounds(3,i)+1/))
+ end do
+ end if
+ ! automatic deallocation
+ end subroutine distributed_gather_var_integer_2d
+
+ subroutine distributed_gather_var_logical_2d(values, global_values)
+
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+
+ use mpi_mod
+ implicit none
+ logical,dimension(:,:),intent(in) :: values
+ logical,dimension(:,:),allocatable,intent(inout) :: global_values
+
+ integer :: i,ierror,j,k
+ integer,dimension(:),allocatable :: displs,recvcounts
+ logical,dimension(:),allocatable :: recvbuf
+ logical,dimension(:,:),allocatable :: sendbuf
+
+ if (uhalo==0 .and. size(values,1)==local_ewn-1) then
+ ! Fixing this would require some generalization as is done for distributed_put_var
+ write(*,*) "distributed_gather does not currently work for"
+ write(*,*) "variables on the staggered grid when uhalo=0"
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ ! first time
+ if (.not. allocated(d_gs_bounds)) then
+ if (main_task) then
+ allocate(d_gs_bounds(4,tasks))
+ else
+ allocate(d_gs_bounds(1,1))
+ endif
+
+ d_gs_mybounds(1) = ewlb+lhalo
+ d_gs_mybounds(2) = ewub-uhalo
+ d_gs_mybounds(3) = nslb+lhalo
+ d_gs_mybounds(4) = nsub-uhalo
+ call fc_gather_int(d_gs_mybounds,4,mpi_integer,d_gs_bounds,4,&
+ mpi_integer,main_rank,comm)
+ endif
+
+ if (main_task) then
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ allocate(global_values(&
+ minval(d_gs_bounds(1,:)):maxval(d_gs_bounds(2,:)),&
+ minval(d_gs_bounds(3,:)):maxval(d_gs_bounds(4,:))))
+ global_values(:,:) = .false.
+ allocate(displs(tasks+1))
+ allocate(recvcounts(tasks))
+ recvcounts(:) = (d_gs_bounds(2,:)-d_gs_bounds(1,:)+1)&
+ *(d_gs_bounds(4,:)-d_gs_bounds(3,:)+1)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+recvcounts(i)
+ end do
+ allocate(recvbuf(displs(tasks+1)))
+ else
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ allocate(global_values(1,1)) ! This prevents a problem with NULL pointers later.
+ allocate(displs(1))
+ allocate(recvcounts(1))
+ allocate(recvbuf(1))
+ end if
+ allocate(sendbuf(d_gs_mybounds(1):d_gs_mybounds(2),&
+ d_gs_mybounds(3):d_gs_mybounds(4)))
+ sendbuf(:,:) = values(1+lhalo:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call fc_gatherv_log(sendbuf,size(sendbuf),mpi_logical,&
+ recvbuf,recvcounts,displs,mpi_logical,main_rank,comm)
+ if (main_task) then
+ do i = 1,tasks
+ global_values(d_gs_bounds(1,i):d_gs_bounds(2,i),&
+ d_gs_bounds(3,i):d_gs_bounds(4,i)) = &
+ reshape(recvbuf(displs(i)+1:displs(i+1)), &
+ (/d_gs_bounds(2,i)-d_gs_bounds(1,i)+1,&
+ d_gs_bounds(4,i)-d_gs_bounds(3,i)+1/))
+ end do
+ end if
+ ! automatic deallocation
+ end subroutine distributed_gather_var_logical_2d
+
+ subroutine distributed_gather_var_real4_2d(values, global_values)
+
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+
+ use mpi_mod
+ implicit none
+ real(4),dimension(:,:),intent(in) :: values
+ real(4),dimension(:,:),allocatable,intent(inout) :: global_values
+
+ integer :: i,ierror,j,k
+ integer,dimension(:),allocatable :: displs,recvcounts
+ real(4),dimension(:),allocatable :: recvbuf
+ real(4),dimension(:,:),allocatable :: sendbuf
+
+ if (uhalo==0 .and. size(values,1)==local_ewn-1) then
+ ! Fixing this would require some generalization as is done for distributed_put_var
+ write(*,*) "distributed_gather does not currently work for"
+ write(*,*) "variables on the staggered grid when uhalo=0"
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ ! first time
+ if (.not. allocated(d_gs_bounds)) then
+ if (main_task) then
+ allocate(d_gs_bounds(4,tasks))
+ else
+ allocate(d_gs_bounds(1,1))
+ endif
+
+ d_gs_mybounds(1) = ewlb+lhalo
+ d_gs_mybounds(2) = ewub-uhalo
+ d_gs_mybounds(3) = nslb+lhalo
+ d_gs_mybounds(4) = nsub-uhalo
+ call fc_gather_int(d_gs_mybounds,4,mpi_integer,d_gs_bounds,4,&
+ mpi_integer,main_rank,comm)
+ endif
+
+ if (main_task) then
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ allocate(global_values(&
+ minval(d_gs_bounds(1,:)):maxval(d_gs_bounds(2,:)),&
+ minval(d_gs_bounds(3,:)):maxval(d_gs_bounds(4,:))))
+ global_values(:,:) = 0
+ allocate(displs(tasks+1))
+ allocate(recvcounts(tasks))
+ recvcounts(:) = (d_gs_bounds(2,:)-d_gs_bounds(1,:)+1) &
+ *(d_gs_bounds(4,:)-d_gs_bounds(3,:)+1)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+recvcounts(i)
+ end do
+ allocate(recvbuf(displs(tasks+1)))
+ else
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ allocate(global_values(1,1)) ! This prevents a problem with NULL pointers later.
+ allocate(displs(1))
+ allocate(recvcounts(1))
+ allocate(recvbuf(1))
+ end if
+ allocate(sendbuf(d_gs_mybounds(1):d_gs_mybounds(2),&
+ d_gs_mybounds(3):d_gs_mybounds(4)))
+ sendbuf(:,:) = values(1+lhalo:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call fc_gatherv_real4(sendbuf,size(sendbuf),mpi_real4,&
+ recvbuf,recvcounts,displs,mpi_real4,main_rank,comm)
+ if (main_task) then
+ do i = 1,tasks
+ global_values(d_gs_bounds(1,i):d_gs_bounds(2,i),&
+ d_gs_bounds(3,i):d_gs_bounds(4,i)) = &
+ reshape(recvbuf(displs(i)+1:displs(i+1)), &
+ (/d_gs_bounds(2,i)-d_gs_bounds(1,i)+1,&
+ d_gs_bounds(4,i)-d_gs_bounds(3,i)+1/))
+ end do
+ end if
+ ! automatic deallocation
+ end subroutine distributed_gather_var_real4_2d
+
+ subroutine distributed_gather_var_real4_3d(values, global_values, ld1, ud1)
+
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+
+ use mpi_mod
+ implicit none
+ real(4),dimension(:,:,:),intent(in) :: values
+ real(4),dimension(:,:,:),allocatable,intent(inout) :: global_values
+ integer,optional,intent(in) :: ld1, ud1
+
+ integer :: i,ierror,j,k,d1l,d1u
+ integer,dimension(:),allocatable :: displs,recvcounts
+ real(4),dimension(:),allocatable :: recvbuf
+ real(4),dimension(:,:,:),allocatable :: sendbuf
+
+ if (uhalo==0 .and. size(values,1)==local_ewn-1) then
+ ! Fixing this would require some generalization as is done for distributed_put_var
+ write(*,*) "distributed_gather does not currently work for"
+ write(*,*) "variables on the staggered grid when uhalo=0"
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ ! first time
+ if (.not. allocated(d_gs_bounds)) then
+ if (main_task) then
+ allocate(d_gs_bounds(4,tasks))
+ else
+ allocate(d_gs_bounds(1,1))
+ endif
+
+ d_gs_mybounds(1) = ewlb+lhalo
+ d_gs_mybounds(2) = ewub-uhalo
+ d_gs_mybounds(3) = nslb+lhalo
+ d_gs_mybounds(4) = nsub-uhalo
+ call fc_gather_int(d_gs_mybounds,4,mpi_integer,d_gs_bounds,4,&
+ mpi_integer,main_rank,comm)
+ endif
+
+ if (main_task) then
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ if (present(ld1)) then
+ d1l = ld1
+ else
+ d1l = 1
+ endif
+ if (present(ud1)) then
+ d1u = ud1
+ else
+ d1u = size(values,1)-(d1l-1)
+ endif
+ if (size(values,1) /= d1u-d1l+1) then
+ write(*,*) "size(values,1) .ne. d1u-d1l+1 in gather call"
+ call parallel_stop(__FILE__, __LINE__)
+ endif
+ allocate(global_values(d1l:d1u,&
+ minval(d_gs_bounds(1,:)):maxval(d_gs_bounds(2,:)),&
+ minval(d_gs_bounds(3,:)):maxval(d_gs_bounds(4,:))))
+ global_values(:,:,:) = 0
+ allocate(displs(tasks+1))
+ allocate(recvcounts(tasks))
+ recvcounts(:) = (d_gs_bounds(2,:)-d_gs_bounds(1,:)+1)&
+ *(d_gs_bounds(4,:)-d_gs_bounds(3,:)+1)&
+ *size(values,1)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+recvcounts(i)
+ end do
+ allocate(recvbuf(displs(tasks+1)))
+ else
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ allocate(global_values(1,1,1)) ! This prevents a problem with NULL pointers later.
+ allocate(displs(1))
+ allocate(recvcounts(1))
+ allocate(recvbuf(1))
+ end if
+ allocate(sendbuf(size(values,1),&
+ d_gs_mybounds(1):d_gs_mybounds(2),&
+ d_gs_mybounds(3):d_gs_mybounds(4)))
+ sendbuf(:,:,:) = values(:,1+lhalo:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call fc_gatherv_real4(sendbuf,size(sendbuf),mpi_real4,&
+ recvbuf,recvcounts,displs,mpi_real4,main_rank,comm)
+ if (main_task) then
+ do i = 1,tasks
+ global_values(:,&
+ d_gs_bounds(1,i):d_gs_bounds(2,i),&
+ d_gs_bounds(3,i):d_gs_bounds(4,i)) = &
+ reshape(recvbuf(displs(i)+1:displs(i+1)), &
+ (/size(values,1),&
+ d_gs_bounds(2,i)-d_gs_bounds(1,i)+1,&
+ d_gs_bounds(4,i)-d_gs_bounds(3,i)+1/))
+ end do
+ end if
+ ! automatic deallocation
+ end subroutine distributed_gather_var_real4_3d
+
+ subroutine distributed_gather_var_real8_2d(values, global_values)
+
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+
+ use mpi_mod
+ implicit none
+ real(8),dimension(:,:),intent(in) :: values
+ real(8),dimension(:,:),allocatable,intent(inout) :: global_values
+
+ integer :: i,ierror,j,k
+ integer,dimension(:),allocatable :: displs,recvcounts
+ real(8),dimension(:),allocatable :: recvbuf
+ real(8),dimension(:,:),allocatable :: sendbuf
+
+ if (uhalo==0 .and. size(values,1)==local_ewn-1) then
+ ! Fixing this would require some generalization as is done for distributed_put_var
+ write(*,*) "distributed_gather does not currently work for"
+ write(*,*) "variables on the staggered grid when uhalo=0"
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ ! first time
+ if (.not. allocated(d_gs_bounds)) then
+ if (main_task) then
+ allocate(d_gs_bounds(4,tasks))
+ else
+ allocate(d_gs_bounds(1,1))
+ endif
+
+ d_gs_mybounds(1) = ewlb+lhalo
+ d_gs_mybounds(2) = ewub-uhalo
+ d_gs_mybounds(3) = nslb+lhalo
+ d_gs_mybounds(4) = nsub-uhalo
+ call fc_gather_int(d_gs_mybounds,4,mpi_integer,d_gs_bounds,4,&
+ mpi_integer,main_rank,comm)
+ endif
+
+ if (main_task) then
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ allocate(global_values(&
+ minval(d_gs_bounds(1,:)):maxval(d_gs_bounds(2,:)),&
+ minval(d_gs_bounds(3,:)):maxval(d_gs_bounds(4,:))))
+ global_values(:,:) = 0
+ allocate(displs(tasks+1))
+ allocate(recvcounts(tasks))
+ recvcounts(:) = (d_gs_bounds(2,:)-d_gs_bounds(1,:)+1)&
+ *(d_gs_bounds(4,:)-d_gs_bounds(3,:)+1)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+recvcounts(i)
+ end do
+ allocate(recvbuf(displs(tasks+1)))
+ else
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ allocate(global_values(1,1)) ! This prevents a problem with NULL pointers later.
+ allocate(displs(1))
+ allocate(recvcounts(1))
+ allocate(recvbuf(1))
+ end if
+ allocate(sendbuf(d_gs_mybounds(1):d_gs_mybounds(2),&
+ d_gs_mybounds(3):d_gs_mybounds(4)))
+ sendbuf(:,:) = values(1+lhalo:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call fc_gatherv_real8(sendbuf,size(sendbuf),mpi_real8,&
+ recvbuf,recvcounts,displs,mpi_real8,main_rank,comm)
+ if (main_task) then
+ do i = 1,tasks
+ global_values(d_gs_bounds(1,i):d_gs_bounds(2,i),&
+ d_gs_bounds(3,i):d_gs_bounds(4,i)) = &
+ reshape(recvbuf(displs(i)+1:displs(i+1)), &
+ (/d_gs_bounds(2,i)-d_gs_bounds(1,i)+1,&
+ d_gs_bounds(4,i)-d_gs_bounds(3,i)+1/))
+ end do
+ end if
+ ! automatic deallocation
+ end subroutine distributed_gather_var_real8_2d
+
+ subroutine distributed_gather_var_real8_3d(values, global_values, ld1, ud1)
+
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+
+ use mpi_mod
+ implicit none
+ real(8),dimension(:,:,:),intent(in) :: values
+ real(8),dimension(:,:,:),allocatable,intent(inout) :: global_values
+ integer,optional,intent(in) :: ld1, ud1
+
+ integer :: i,ierror,j,k,d1l,d1u
+ integer,dimension(:),allocatable :: displs,recvcounts
+ real(8),dimension(:),allocatable :: recvbuf
+ real(8),dimension(:,:,:),allocatable :: sendbuf
+
+ if (uhalo==0 .and. size(values,1)==local_ewn-1) then
+ ! Fixing this would require some generalization as is done for distributed_put_var
+ write(*,*) "distributed_gather does not currently work for"
+ write(*,*) "variables on the staggered grid when uhalo=0"
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ ! first time
+ if (.not. allocated(d_gs_bounds)) then
+ if (main_task) then
+ allocate(d_gs_bounds(4,tasks))
+ else
+ allocate(d_gs_bounds(1,1))
+ endif
+
+ d_gs_mybounds(1) = ewlb+lhalo
+ d_gs_mybounds(2) = ewub-uhalo
+ d_gs_mybounds(3) = nslb+lhalo
+ d_gs_mybounds(4) = nsub-uhalo
+ call fc_gather_int(d_gs_mybounds,4,mpi_integer,d_gs_bounds,4,&
+ mpi_integer,main_rank,comm)
+ endif
+
+ if (main_task) then
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ if (present(ld1)) then
+ d1l = ld1
+ else
+ d1l = 1
+ endif
+ if (present(ud1)) then
+ d1u = ud1
+ else
+ d1u = size(values,1)-(d1l-1)
+ endif
+ if (size(values,1) /= d1u-d1l+1) then
+ write(*,*) "size(values,1) .ne. d1u-d1l+1 in gather call"
+ call parallel_stop(__FILE__, __LINE__)
+ endif
+ allocate(global_values(d1l:d1u,&
+ minval(d_gs_bounds(1,:)):maxval(d_gs_bounds(2,:)),&
+ minval(d_gs_bounds(3,:)):maxval(d_gs_bounds(4,:))))
+ global_values(:,:,:) = 0
+ allocate(displs(tasks+1))
+ allocate(recvcounts(tasks))
+ recvcounts(:) = (d_gs_bounds(2,:)-d_gs_bounds(1,:)+1)&
+ *(d_gs_bounds(4,:)-d_gs_bounds(3,:)+1)&
+ *size(values,1)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+recvcounts(i)
+ end do
+ allocate(recvbuf(displs(tasks+1)))
+ else
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ allocate(global_values(1,1,1)) ! This prevents a problem with NULL pointers later.
+ allocate(displs(1))
+ allocate(recvcounts(1))
+ allocate(recvbuf(1))
+ end if
+ allocate(sendbuf(size(values,1),&
+ d_gs_mybounds(1):d_gs_mybounds(2),&
+ d_gs_mybounds(3):d_gs_mybounds(4)))
+ sendbuf(:,:,:) = values(:,1+lhalo:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call fc_gatherv_real8(sendbuf,size(sendbuf),mpi_real8,&
+ recvbuf,recvcounts,displs,mpi_real8,main_rank,comm)
+ if (main_task) then
+ do i = 1,tasks
+ global_values(:,&
+ d_gs_bounds(1,i):d_gs_bounds(2,i),&
+ d_gs_bounds(3,i):d_gs_bounds(4,i)) = &
+ reshape(recvbuf(displs(i)+1:displs(i+1)), &
+ (/size(values,1),&
+ d_gs_bounds(2,i)-d_gs_bounds(1,i)+1,&
+ d_gs_bounds(4,i)-d_gs_bounds(3,i)+1/))
+ end do
+ end if
+ ! automatic deallocation
+ end subroutine distributed_gather_var_real8_3d
+
+ function distributed_get_var_integer_2d(ncid,varid,values,start)
+ use mpi_mod
+ implicit none
+ integer :: distributed_get_var_integer_2d,ncid,varid
+ integer,dimension(:) :: start
+ integer,dimension(:,:) :: values
+
+ integer :: ew,i,ierror,ns
+ integer,dimension(4) :: mybounds
+ integer,dimension(:),allocatable :: displs,sendcounts
+ integer,dimension(:,:),allocatable :: bounds
+ integer,dimension(:),allocatable :: sendbuf
+ integer,dimension(:,:),allocatable :: global_values,recvbuf
+
+ ! begin
+
+ if (size(values,1)==local_ewn) then
+ ew = global_ewn
+ ns = global_nsn
+ else if (size(values,1)==local_ewn-1) then
+ ew = global_ewn-1
+ ns = global_nsn-1
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+ mybounds(1) = ewlb
+ mybounds(2) = ewub
+ mybounds(3) = nslb
+ mybounds(4) = nsub
+ if (main_task) then
+ allocate(bounds(4,tasks))
+ else
+ allocate(bounds(1,1))
+ end if
+ call fc_gather_int(mybounds,4,mpi_integer,bounds,4,&
+ mpi_integer,main_rank,comm)
+ if (main_task) then
+ allocate(global_values(minval(bounds(1,:)):maxval(bounds(2,:)),&
+ minval(bounds(3,:)):maxval(bounds(4,:))))
+ global_values(:,:) = 0
+ distributed_get_var_integer_2d = nf90_get_var(ncid,varid,&
+ global_values(1:ew,1:ns),start)
+ allocate(displs(tasks+1))
+ allocate(sendcounts(tasks))
+ sendcounts(:) = (bounds(2,:)-bounds(1,:)+1)*(bounds(4,:)-bounds(3,:)+1)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+sendcounts(i)
+ end do
+ allocate(sendbuf(displs(tasks+1)))
+ do i = 1,tasks
+ sendbuf(displs(i)+1:displs(i+1)) = reshape(&
+ global_values(bounds(1,i):bounds(2,i),bounds(3,i):bounds(4,i)),&
+ (/displs(i+1)-displs(i)/))
+ end do
+ else
+ allocate(displs(1))
+ allocate(sendcounts(1))
+ allocate(sendbuf(1))
+ end if
+ call broadcast(distributed_get_var_integer_2d)
+ allocate(recvbuf(local_ewn,local_nsn))
+ call mpi_scatterv(sendbuf,sendcounts,displs,mpi_integer,&
+ recvbuf,size(recvbuf),mpi_integer,main_rank,comm,ierror)
+ values(:,:) = recvbuf(:size(values,1),:size(values,2))
+ !automatic deallocation
+ end function distributed_get_var_integer_2d
+
+ function distributed_get_var_real4_1d(ncid,varid,values,start)
+ use mpi_mod
+ use netcdf
+ implicit none
+ integer :: distributed_get_var_real4_1d,ncid,varid
+ integer,dimension(:) :: start
+ real(4),dimension(:) :: values
+
+ integer :: i,ierror,myn,status,x1id,y1id
+ integer,dimension(2) :: mybounds
+ integer,dimension(:),allocatable :: displs,sendcounts
+ integer,dimension(:,:),allocatable :: bounds
+ real(4),dimension(:),allocatable :: global_values,sendbuf
+
+ ! begin
+
+ if (main_task) then
+ allocate(bounds(2,tasks))
+ status = nf90_inq_varid(ncid,"x1",x1id)
+ status = nf90_inq_varid(ncid,"y1",y1id)
+ else
+ allocate(bounds(1,1))
+ end if
+ call broadcast(x1id)
+ call broadcast(y1id)
+ if (varid==x1id) then
+ mybounds(1) = ewlb
+ mybounds(2) = ewub
+ myn = global_ewn
+ else if (varid==y1id) then
+ mybounds(1) = nslb
+ mybounds(2) = nsub
+ myn = global_nsn
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+ call fc_gather_int(mybounds,2,mpi_integer,bounds,2,&
+ mpi_integer,main_rank,comm)
+ if (main_task) then
+ allocate(global_values(minval(bounds(1,:)):maxval(bounds(2,:))))
+ global_values(:) = 0
+ distributed_get_var_real4_1d = &
+ nf90_get_var(ncid,varid,global_values(1:myn),start)
+ allocate(displs(tasks+1))
+ allocate(sendcounts(tasks))
+ sendcounts(:) = bounds(2,:)-bounds(1,:)+1
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+sendcounts(i)
+ end do
+ allocate(sendbuf(displs(tasks+1)))
+ do i = 1,tasks
+ sendbuf(displs(i)+1:displs(i+1)) = &
+ global_values(bounds(1,i):bounds(2,i))
+ end do
+ else
+ allocate(displs(1))
+ allocate(sendcounts(1))
+ allocate(sendbuf(1))
+ end if
+ call broadcast(distributed_get_var_real4_1d)
+ call mpi_scatterv(sendbuf,sendcounts,displs,mpi_real4,&
+ values,size(values),mpi_real4,main_rank,comm,ierror)
+ !automatic deallocation
+ end function distributed_get_var_real4_1d
+
+ function distributed_get_var_real4_2d(ncid,varid,values,start)
+ use mpi_mod
+ implicit none
+ integer :: distributed_get_var_real4_2d,ncid,varid
+ integer,dimension(:) :: start
+ real(4),dimension(:,:) :: values
+
+ integer :: ew,i,ierror,ns
+ integer,dimension(4) :: mybounds
+ integer,dimension(:),allocatable :: displs,sendcounts
+ integer,dimension(:,:),allocatable :: bounds
+ real(4),dimension(:),allocatable :: sendbuf
+ real(4),dimension(:,:),allocatable :: global_values,recvbuf
+
+ ! begin
+
+ if (size(values,1)==local_ewn) then
+ ew = global_ewn
+ ns = global_nsn
+ else if (size(values,1)==local_ewn-1) then
+ ew = global_ewn-1
+ ns = global_nsn-1
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+ mybounds(1) = ewlb
+ mybounds(2) = ewub
+ mybounds(3) = nslb
+ mybounds(4) = nsub
+ if (main_task) then
+ allocate(bounds(4,tasks))
+ else
+ allocate(bounds(1,1))
+ end if
+ call fc_gather_int(mybounds,4,mpi_integer,bounds,4,&
+ mpi_integer,main_rank,comm)
+ if (main_task) then
+ allocate(global_values(minval(bounds(1,:)):maxval(bounds(2,:)),&
+ minval(bounds(3,:)):maxval(bounds(4,:))))
+ global_values(:,:) = 0
+ distributed_get_var_real4_2d = nf90_get_var(ncid,varid,&
+ global_values(1:ew,1:ns),start)
+ allocate(displs(tasks+1))
+ allocate(sendcounts(tasks))
+ sendcounts(:) = (bounds(2,:)-bounds(1,:)+1)*(bounds(4,:)-bounds(3,:)+1)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+sendcounts(i)
+ end do
+ allocate(sendbuf(displs(tasks+1)))
+ do i = 1,tasks
+ sendbuf(displs(i)+1:displs(i+1)) = reshape(&
+ global_values(bounds(1,i):bounds(2,i),bounds(3,i):bounds(4,i)),&
+ (/displs(i+1)-displs(i)/))
+ end do
+ else
+ allocate(displs(1))
+ allocate(sendcounts(1))
+ allocate(sendbuf(1))
+ end if
+ call broadcast(distributed_get_var_real4_2d)
+ allocate(recvbuf(local_ewn,local_nsn))
+ call mpi_scatterv(sendbuf,sendcounts,displs,mpi_real4,&
+ recvbuf,size(recvbuf),mpi_real4,main_rank,comm,ierror)
+ values(:,:) = recvbuf(:size(values,1),:size(values,2))
+ !automatic deallocation
+ end function distributed_get_var_real4_2d
+
+ !WHL - added this function
+ function distributed_get_var_real8_1d(ncid,varid,values,start)
+ use mpi_mod
+ use netcdf
+ implicit none
+ integer :: distributed_get_var_real8_1d,ncid,varid
+ integer,dimension(:) :: start
+ real(8),dimension(:) :: values
+
+ integer :: i,ierror,myn,status,x1id,y1id
+ integer,dimension(2) :: mybounds
+ integer,dimension(:),allocatable :: displs,sendcounts
+ integer,dimension(:,:),allocatable :: bounds
+ real(8),dimension(:),allocatable :: global_values,sendbuf
+
+ ! begin
+
+ if (main_task) then
+ allocate(bounds(2,tasks))
+ status = nf90_inq_varid(ncid,"x1",x1id)
+ status = nf90_inq_varid(ncid,"y1",y1id)
+ else
+ allocate(bounds(1,1))
+ end if
+ call broadcast(x1id)
+ call broadcast(y1id)
+ if (varid==x1id) then
+ mybounds(1) = ewlb
+ mybounds(2) = ewub
+ myn = global_ewn
+ else if (varid==y1id) then
+ mybounds(1) = nslb
+ mybounds(2) = nsub
+ myn = global_nsn
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+ call fc_gather_int(mybounds,2,mpi_integer,bounds,2,&
+ mpi_integer,main_rank,comm)
+ if (main_task) then
+ allocate(global_values(minval(bounds(1,:)):maxval(bounds(2,:))))
+ global_values(:) = 0
+ distributed_get_var_real8_1d = &
+ nf90_get_var(ncid,varid,global_values(1:myn),start)
+ allocate(displs(tasks+1))
+ allocate(sendcounts(tasks))
+ sendcounts(:) = bounds(2,:)-bounds(1,:)+1
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+sendcounts(i)
+ end do
+ allocate(sendbuf(displs(tasks+1)))
+ do i = 1,tasks
+ sendbuf(displs(i)+1:displs(i+1)) = &
+ global_values(bounds(1,i):bounds(2,i))
+ end do
+ else
+ allocate(displs(1))
+ allocate(sendcounts(1))
+ allocate(sendbuf(1))
+ end if
+ call broadcast(distributed_get_var_real8_1d)
+ call mpi_scatterv(sendbuf,sendcounts,displs,mpi_real8,&
+ values,size(values),mpi_real8,main_rank,comm,ierror)
+ !automatic deallocation
+ end function distributed_get_var_real8_1d
+
+ function distributed_get_var_real8_2d(ncid,varid,values,start)
+ use mpi_mod
+ implicit none
+ integer :: distributed_get_var_real8_2d,ncid,varid
+ integer,dimension(:) :: start
+ real(8),dimension(:,:) :: values
+
+ integer :: ew,i,ierror,ns
+ integer,dimension(4) :: mybounds
+ integer,dimension(:),allocatable :: displs,sendcounts
+ integer,dimension(:,:),allocatable :: bounds
+ real(8),dimension(:),allocatable :: sendbuf
+ real(8),dimension(:,:),allocatable :: global_values,recvbuf
+
+ ! begin
+
+ if (size(values,1)==local_ewn) then
+ ew = global_ewn
+ ns = global_nsn
+ else if (size(values,1)==local_ewn-1) then
+ ew = global_ewn-1
+ ns = global_nsn-1
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+ mybounds(1) = ewlb
+ mybounds(2) = ewub
+ mybounds(3) = nslb
+ mybounds(4) = nsub
+ if (main_task) then
+ allocate(bounds(4,tasks))
+ else
+ allocate(bounds(1,1))
+ end if
+ call fc_gather_int(mybounds,4,mpi_integer,bounds,4,&
+ mpi_integer,main_rank,comm)
+ if (main_task) then
+ allocate(global_values(minval(bounds(1,:)):maxval(bounds(2,:)),&
+ minval(bounds(3,:)):maxval(bounds(4,:))))
+ global_values(:,:) = 0
+ distributed_get_var_real8_2d = nf90_get_var(ncid,varid,&
+ global_values(1:ew,1:ns),start)
+ allocate(displs(tasks+1))
+ allocate(sendcounts(tasks))
+ sendcounts(:) = (bounds(2,:)-bounds(1,:)+1)*(bounds(4,:)-bounds(3,:)+1)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+sendcounts(i)
+ end do
+ allocate(sendbuf(displs(tasks+1)))
+ do i = 1,tasks
+ sendbuf(displs(i)+1:displs(i+1)) = reshape(&
+ global_values(bounds(1,i):bounds(2,i),bounds(3,i):bounds(4,i)),&
+ (/displs(i+1)-displs(i)/))
+ end do
+ else
+ allocate(displs(1))
+ allocate(sendcounts(1))
+ allocate(sendbuf(1))
+ end if
+ call broadcast(distributed_get_var_real8_2d)
+ allocate(recvbuf(local_ewn,local_nsn))
+ call mpi_scatterv(sendbuf,sendcounts,displs,mpi_real8,&
+ recvbuf,size(recvbuf),mpi_real8,main_rank,comm,ierror)
+ values(:,:) = recvbuf(:size(values,1),:size(values,2))
+ !automatic deallocation
+
+ end function distributed_get_var_real8_2d
+
+ function distributed_get_var_real8_3d(ncid,varid,values,start)
+ use mpi_mod
+ implicit none
+ integer :: distributed_get_var_real8_3d,ncid,varid
+ integer,dimension(:) :: start
+ real(8),dimension(:,:,:) :: values
+
+ integer :: ew,i,ierror,ns
+ integer,dimension(4) :: mybounds
+ integer,dimension(:),allocatable :: displs,sendcounts
+ integer,dimension(:,:),allocatable :: bounds
+ real(8),dimension(:),allocatable :: sendbuf
+ real(8),dimension(:,:,:),allocatable :: global_values,recvbuf
+
+ ! begin
+
+ if (size(values,1)==local_ewn) then
+ ew = global_ewn
+ ns = global_nsn
+ else if (size(values,1)==local_ewn-1) then
+ ew = global_ewn-1
+ ns = global_nsn-1
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+ mybounds(1) = ewlb
+ mybounds(2) = ewub
+ mybounds(3) = nslb
+ mybounds(4) = nsub
+ if (main_task) then
+ allocate(bounds(4,tasks))
+ else
+ allocate(bounds(1,1))
+ end if
+ call fc_gather_int(mybounds,4,mpi_integer,bounds,4,&
+ mpi_integer,main_rank,comm)
+ if (main_task) then
+ allocate(global_values(minval(bounds(1,:)):maxval(bounds(2,:)),&
+ minval(bounds(3,:)):maxval(bounds(4,:)),size(values,3)))
+ global_values(:,:,:) = 0
+ distributed_get_var_real8_3d = nf90_get_var(ncid,varid,&
+ global_values(1:ew,1:ns,:),start)
+ allocate(displs(tasks+1))
+ allocate(sendcounts(tasks))
+ sendcounts(:) = (bounds(2,:)-bounds(1,:)+1)*&
+ (bounds(4,:)-bounds(3,:)+1)*size(values,3)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+sendcounts(i)
+ end do
+ allocate(sendbuf(displs(tasks+1)))
+ do i = 1,tasks
+ sendbuf(displs(i)+1:displs(i+1)) = reshape(global_values(&
+ bounds(1,i):bounds(2,i),bounds(3,i):bounds(4,i),:),&
+ (/displs(i+1)-displs(i)/))
+ end do
+ else
+ allocate(displs(1))
+ allocate(sendcounts(1))
+ allocate(sendbuf(1))
+ end if
+ call broadcast(distributed_get_var_real8_3d)
+ allocate(recvbuf(local_ewn,local_nsn,size(values,3)))
+ call mpi_scatterv(sendbuf,sendcounts,displs,mpi_real8,&
+ recvbuf,size(recvbuf),mpi_real8,main_rank,comm,ierror)
+ values(:,:,:) = recvbuf(:size(values,1),:size(values,2),:)
+ !automatic deallocation
+ end function distributed_get_var_real8_3d
+
+ function distributed_isparallel()
+ implicit none
+ logical :: distributed_isparallel
+
+ distributed_isparallel = .true.
+ end function distributed_isparallel
+
+ !WHL - added global boundary conditions
+ subroutine distributed_grid(ewn, nsn, nhalo_in, periodic_bc_in, outflow_bc_in)
+
+ implicit none
+ integer, intent(inout) :: ewn, nsn ! global grid dimensions
+ integer, intent(in), optional :: nhalo_in ! number of rows of halo cells
+ logical, intent(in), optional :: periodic_bc_in ! true for periodic global BCs
+ logical, intent(in), optional :: outflow_bc_in ! true for outflow global BCs
+ ! (scalars in global halo set to zero)
+
+ integer :: best,i,j,metric
+ integer :: ewrank,ewtasks,nsrank,nstasks
+ real(8) :: rewtasks,rnstasks
+
+ ! begin
+
+ ! Optionally, change the halo values
+ ! Note: The higher-order dycores (glam, glissade) currently require nhalo = 2.
+ ! The Glide SIA dycore requires nhalo = 0.
+ ! The default halo values at the top of the module are appropriate for
+ ! the higher-order dycores. Here they can be reset to zero for Glide.
+
+ if (present(nhalo_in)) then
+ if (main_task) then
+ write(*,*) 'Setting halo values: nhalo =', nhalo_in
+ if (nhalo_in < 0) then
+ write(*,*) 'ERROR: nhalo must be >= 0'
+ call parallel_stop(__FILE__, __LINE__)
+ elseif (nhalo_in /= 2) then
+ write(*,*) 'WARNING: parallel dycores tested only with nhalo = 2'
+ endif
+ endif
+ nhalo = nhalo_in
+ lhalo = nhalo
+ uhalo = nhalo
+ staggered_lhalo = lhalo
+ staggered_uhalo = max(uhalo-1, 0)
+ !TODO - Remove the following variables
+ staggered_whalo = lhalo
+ staggered_shalo = lhalo
+ staggered_ehalo = max(uhalo-1, 0)
+ staggered_nhalo = max(uhalo-1, 0)
+ endif
+
+ global_ewn = ewn
+ global_nsn = nsn
+
+ ewtasks = 0
+ nstasks = 0
+ best = huge(best)
+ do i = 1,min(tasks,global_ewn)
+ j = tasks/i
+ if (j<=global_nsn.and.i*j==tasks) then ! try to use all tasks
+ metric = abs(i*global_nsn-j*global_ewn) ! zero if ewn/nsn == i/j
+ if (metricthis_rank/ewtasks) east = east-ewtasks
+ south = this_rank-ewtasks
+ if (south<0) south = south+tasks
+ north = this_rank+ewtasks
+ if (north>=tasks) north = north-tasks
+
+ ! Check that haven't split up the problem too much. Idea is that do not want halos overlapping in either dimension.
+ ! local_* - lhalo - uhalo is the actual number of non-halo cells on a processor.
+ if ((local_nsn - lhalo - uhalo) .lt. (lhalo + uhalo + 1)) then
+ write(*,*) "NS halos overlap on processor ", this_rank
+ call parallel_stop(__FILE__, __LINE__)
+ endif
+
+ if ((local_ewn - lhalo - uhalo) .lt. (lhalo + uhalo + 1)) then
+ write(*,*) "EW halos overlap on processor ", this_rank
+ call parallel_stop(__FILE__, __LINE__)
+ endif
+
+ !WHL - added global boundary conditions
+
+ periodic_bc = .true. ! this is the default
+ outflow_bc = .false.
+
+ if (present(outflow_bc_in)) then
+ outflow_bc = outflow_bc_in
+ if (outflow_bc) periodic_bc = .false.
+ endif
+
+ if (present(periodic_bc_in)) then
+ periodic_bc = periodic_bc_in
+ if (periodic_bc) outflow_bc = .false.
+ endif
+
+ ! Print grid geometry
+! write(*,*) "Process ", this_rank, " Total = ", tasks, " ewtasks = ", ewtasks, " nstasks = ", nstasks
+! write(*,*) "Process ", this_rank, " ewrank = ", ewrank, " nsrank = ", nsrank
+! write(*,*) "Process ", this_rank, " l_ewn = ", local_ewn, " o_ewn = ", own_ewn
+! write(*,*) "Process ", this_rank, " l_nsn = ", local_nsn, " o_nsn = ", own_nsn
+! write(*,*) "Process ", this_rank, " ewlb = ", ewlb, " ewub = ", ewub
+! write(*,*) "Process ", this_rank, " nslb = ", nslb, " nsub = ", nsub
+! write(*,*) "Process ", this_rank, " east = ", east, " west = ", west
+! write(*,*) "Process ", this_rank, " north = ", north, " south = ", south
+! write(*,*) "Process ", this_rank, " ew_vars = ", own_ewn, " ns_vars = ", own_nsn
+ call distributed_print_grid(own_ewn, own_nsn)
+
+ end subroutine distributed_grid
+
+ function distributed_owner(ew,ewn,ns,nsn)
+ implicit none
+ logical :: distributed_owner
+ integer :: ew,ewn,ns,nsn
+ ! begin
+ distributed_owner = (ew>lhalo.and.ew<=local_ewn-uhalo.and.&
+ ns>lhalo.and.ns<=local_nsn-uhalo)
+ end function distributed_owner
+
+ subroutine distributed_print_grid(l_ewn,l_nsn)
+ ! Gathers and prints the overall grid layout by processor counts.
+ use mpi_mod
+ implicit none
+
+ integer :: l_ewn, l_nsn
+ integer :: i,j,curr_count
+ integer,dimension(2) :: mybounds
+ integer,dimension(:,:),allocatable :: bounds
+
+ ! begin
+ mybounds(1) = l_ewn
+ mybounds(2) = l_nsn
+
+ if (main_task) then
+ allocate(bounds(2,tasks))
+ else
+ allocate(bounds(1,1))
+ end if
+ call fc_gather_int(mybounds,2,mpi_integer,bounds,2,mpi_integer,main_rank,comm)
+ if (main_task) then
+ do i = 1,tasks
+ if (bounds(1,i) .ne. -1) then
+ ! total up number of processors with matching distribution
+ curr_count = 1
+ do j = i+1,tasks
+ if ((bounds(1,i) .eq. bounds(1,j)) .and. (bounds(2,i) .eq. bounds(2,j))) then
+ ! if matching current distribution, increment counter
+ curr_count = curr_count + 1
+ bounds(1,j) = -1 ! mark so not counted later
+ bounds(2,j) = -1
+ endif
+ enddo
+ write(*,*) "Layout(EW,NS) = ", bounds(1,i), bounds(2,i), " total procs = ", curr_count
+ endif
+ end do
+ end if
+ ! automatic deallocation
+
+ end subroutine distributed_print_grid
+
+ subroutine distributed_print_integer_2d(name,values)
+ use mpi_mod
+ implicit none
+ character(*) :: name
+ integer,dimension(:,:) :: values
+
+ integer,parameter :: u = 33
+ character(3) :: ts
+ integer :: i,ierror,j,k
+ integer,dimension(4) :: mybounds
+ integer,dimension(:),allocatable :: displs,recvcounts
+ integer,dimension(:,:),allocatable :: bounds
+ integer,dimension(:),allocatable :: recvbuf
+ integer,dimension(:,:),allocatable :: global_values,sendbuf
+
+ ! begin
+
+ if (uhalo==0 .and. size(values,1)==local_ewn-1) then
+ ! Fixing this would require some generalization as is done for distributed_put_var
+ write(*,*) "distributed_print does not currently work for"
+ write(*,*) "variables on the staggered grid when uhalo=0"
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ mybounds(1) = ewlb+lhalo
+ mybounds(2) = ewub-uhalo
+ mybounds(3) = nslb+lhalo
+ mybounds(4) = nsub-uhalo
+ if (main_task) then
+ allocate(bounds(4,tasks))
+ else
+ allocate(bounds(1,1))
+ end if
+ call fc_gather_int(mybounds,4,mpi_integer,bounds,4,&
+ mpi_integer,main_rank,comm)
+ if (main_task) then
+ allocate(global_values(minval(bounds(1,:)):maxval(bounds(2,:)),&
+ minval(bounds(3,:)):maxval(bounds(4,:))))
+ global_values(:,:) = 0
+ allocate(displs(tasks+1))
+ allocate(recvcounts(tasks))
+ recvcounts(:) = (bounds(2,:)-bounds(1,:)+1)*(bounds(4,:)-bounds(3,:)+1)
+ displs(1) = 0
+ do i = 1,tasks
+ displs(i+1) = displs(i)+recvcounts(i)
+ end do
+ allocate(recvbuf(displs(tasks+1)))
+ else
+ allocate(displs(1))
+ allocate(recvcounts(1))
+ allocate(recvbuf(1))
+ end if
+ allocate(sendbuf(mybounds(1):mybounds(2),mybounds(3):mybounds(4)))
+ sendbuf(:,:) = values(1+lhalo:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call fc_gatherv_int(sendbuf,size(sendbuf),mpi_integer,&
+ recvbuf,recvcounts,displs,mpi_integer,main_rank,comm)
+ if (main_task) then
+ do i = 1,tasks
+ global_values(bounds(1,i):bounds(2,i),bounds(3,i):bounds(4,i)) = &
+ reshape(recvbuf(displs(i)+1:displs(i+1)), &
+ (/bounds(2,i)-bounds(1,i)+1,bounds(4,i)-bounds(3,i)+1/))
+ end do
+ write(ts,'(i3.3)') tasks
+ open(unit=u,file=name//ts//".txt",form="formatted",status="replace")
+ if (size(values,1)global_ewn.and.ew==ewn-uhalo).or.&
+ (nslb<1.and.ns==1+lhalo).or.&
+ (nsub>global_nsn.and.ns==nsn-uhalo)
+ end function parallel_boundary
+
+ function parallel_close(ncid)
+ implicit none
+ integer :: ncid,parallel_close
+ ! begin
+ if (main_task) parallel_close = nf90_close(ncid)
+ call broadcast(parallel_close)
+ end function parallel_close
+
+ subroutine parallel_convert_haloed_to_nonhaloed_real4_2d(input_with_halo, output_no_halo)
+ ! Given an input array that has halo cells, return an output array without halo cells
+ real(4),dimension(:,:), intent(in) :: input_with_halo
+ real(4),dimension(:,:), intent(out) :: output_no_halo
+
+ if (size(input_with_halo,1) /= local_ewn .or. size(input_with_halo,2) /= local_nsn) then
+ write(*,*) "Unexpected size for input_with_halo: ", &
+ size(input_with_halo,1), size(input_with_halo,2)
+ write(*,*) "Expected size is: ", local_ewn, local_nsn
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ if (size(output_no_halo,1) /= own_ewn .or. size(output_no_halo,2) /= own_nsn) then
+ write(*,*) "Unexpected size for output_no_halo: ", &
+ size(output_no_halo,1), size(output_no_halo,2)
+ write(*,*) "Expected size is: ", own_ewn, own_nsn
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ output_no_halo(1:own_ewn, 1:own_nsn) = &
+ input_with_halo(1+lhalo:local_ewn-uhalo, 1+lhalo:local_nsn-uhalo)
+
+ end subroutine parallel_convert_haloed_to_nonhaloed_real4_2d
+
+ subroutine parallel_convert_haloed_to_nonhaloed_real8_2d(input_with_halo, output_no_halo)
+ ! Given an input array that has halo cells, return an output array without halo cells
+ real(8),dimension(:,:), intent(in) :: input_with_halo
+ real(8),dimension(:,:), intent(out) :: output_no_halo
+
+ if (size(input_with_halo,1) /= local_ewn .or. size(input_with_halo,2) /= local_nsn) then
+ write(*,*) "Unexpected size for input_with_halo: ", &
+ size(input_with_halo,1), size(input_with_halo,2)
+ write(*,*) "Expected size is: ", local_ewn, local_nsn
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ if (size(output_no_halo,1) /= own_ewn .or. size(output_no_halo,2) /= own_nsn) then
+ write(*,*) "Unexpected size for output_no_halo: ", &
+ size(output_no_halo,1), size(output_no_halo,2)
+ write(*,*) "Expected size is: ", own_ewn, own_nsn
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ output_no_halo(1:own_ewn, 1:own_nsn) = &
+ input_with_halo(1+lhalo:local_ewn-uhalo, 1+lhalo:local_nsn-uhalo)
+
+ end subroutine parallel_convert_haloed_to_nonhaloed_real8_2d
+
+ subroutine parallel_convert_nonhaloed_to_haloed_real4_2d(input_no_halo, output_with_halo)
+ ! Given an input array without halo cells, return an output array with halo cells
+ real(4),dimension(:,:), intent(in) :: input_no_halo
+ real(4),dimension(:,:), intent(out) :: output_with_halo
+
+ if (size(input_no_halo,1) /= own_ewn .or. size(input_no_halo,2) /= own_nsn) then
+ write(*,*) "Unexpected size for input_no_halo: ", &
+ size(input_no_halo,1), size(input_no_halo,2)
+ write(*,*) "Expected size is: ", own_ewn, own_nsn
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ if (size(output_with_halo,1) /= local_ewn .or. size(output_with_halo,2) /= local_nsn) then
+ write(*,*) "Unexpected size for output_with_halo: ", &
+ size(output_with_halo,1), size(output_with_halo,2)
+ write(*,*) "Expected size is: ", local_ewn, local_nsn
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ output_with_halo(1+lhalo:local_ewn-uhalo, 1+lhalo:local_nsn-uhalo) = &
+ input_no_halo(1:own_ewn, 1:own_nsn)
+
+ call parallel_halo(output_with_halo)
+
+ end subroutine parallel_convert_nonhaloed_to_haloed_real4_2d
+
+ subroutine parallel_convert_nonhaloed_to_haloed_real8_2d(input_no_halo, output_with_halo)
+ ! Given an input array without halo cells, return an output array with halo cells
+ real(8),dimension(:,:), intent(in) :: input_no_halo
+ real(8),dimension(:,:), intent(out) :: output_with_halo
+
+ if (size(input_no_halo,1) /= own_ewn .or. size(input_no_halo,2) /= own_nsn) then
+ write(*,*) "Unexpected size for input_no_halo: ", &
+ size(input_no_halo,1), size(input_no_halo,2)
+ write(*,*) "Expected size is: ", own_ewn, own_nsn
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ if (size(output_with_halo,1) /= local_ewn .or. size(output_with_halo,2) /= local_nsn) then
+ write(*,*) "Unexpected size for output_with_halo: ", &
+ size(output_with_halo,1), size(output_with_halo,2)
+ write(*,*) "Expected size is: ", local_ewn, local_nsn
+ call parallel_stop(__FILE__, __LINE__)
+ end if
+
+ output_with_halo(1+lhalo:local_ewn-uhalo, 1+lhalo:local_nsn-uhalo) = &
+ input_no_halo(1:own_ewn, 1:own_nsn)
+
+ call parallel_halo(output_with_halo)
+
+ end subroutine parallel_convert_nonhaloed_to_haloed_real8_2d
+
+ function parallel_create(path,cmode,ncid)
+ implicit none
+ integer :: cmode,ncid,parallel_create
+ character(len=*) :: path
+ ! begin
+ if (main_task) parallel_create = nf90_create(path,cmode,ncid)
+ call broadcast(parallel_create)
+ call broadcast(ncid)
+ end function parallel_create
+
+ function parallel_def_dim(ncid,name,len,dimid)
+ use netcdf
+ implicit none
+ integer :: dimid,len,ncid,parallel_def_dim
+ character(len=*) :: name
+ ! begin
+ if (main_task) parallel_def_dim = nf90_def_dim(ncid,name,len,dimid)
+ call broadcast(parallel_def_dim)
+ call broadcast(dimid)
+ end function parallel_def_dim
+
+ function parallel_def_var_dimids(ncid,name,xtype,dimids,varid)
+ implicit none
+ integer :: ncid,parallel_def_var_dimids,varid,xtype
+ integer,dimension(:) :: dimids
+ character(len=*) :: name
+ ! begin
+ if (main_task) parallel_def_var_dimids = &
+ nf90_def_var(ncid,name,xtype,dimids,varid)
+ call broadcast(parallel_def_var_dimids)
+ call broadcast(varid)
+ end function parallel_def_var_dimids
+
+ function parallel_def_var_nodimids(ncid,name,xtype,varid)
+ implicit none
+ integer :: ncid,parallel_def_var_nodimids,varid,xtype
+ character(len=*) :: name
+ ! begin
+ if (main_task) parallel_def_var_nodimids = &
+ nf90_def_var(ncid,name,xtype,varid)
+ call broadcast(parallel_def_var_nodimids)
+ call broadcast(varid)
+ end function parallel_def_var_nodimids
+
+ function parallel_enddef(ncid)
+ implicit none
+ integer :: ncid,parallel_enddef
+ ! begin
+ if (main_task) parallel_enddef = nf90_enddef(ncid)
+ call broadcast(parallel_enddef)
+ end function parallel_enddef
+
+ subroutine parallel_finalise
+ use mpi_mod
+ implicit none
+ integer :: ierror
+ ! begin
+ call mpi_finalize(ierror)
+ end subroutine parallel_finalise
+
+ function parallel_get_att_character(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_get_att_character,varid
+ character(len=*) :: name,values
+ ! begin
+ if (main_task) parallel_get_att_character = &
+ nf90_get_att(ncid,varid,name,values)
+ call broadcast(parallel_get_att_character)
+ call broadcast(values)
+ end function parallel_get_att_character
+
+ function parallel_get_att_real4(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_get_att_real4,varid
+ character(len=*) :: name
+ real(4) :: values
+ ! begin
+ if (main_task) parallel_get_att_real4 = &
+ nf90_get_att(ncid,varid,name,values)
+ call broadcast(parallel_get_att_real4)
+ call broadcast(values)
+ end function parallel_get_att_real4
+
+ function parallel_get_att_real4_1d(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_get_att_real4_1d,varid
+ character(len=*) :: name
+ real(4),dimension(:) :: values
+ ! begin
+ if (main_task) parallel_get_att_real4_1d = &
+ nf90_get_att(ncid,varid,name,values)
+ call broadcast(parallel_get_att_real4_1d)
+ call broadcast(values)
+ end function parallel_get_att_real4_1d
+
+ function parallel_get_att_real8(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_get_att_real8,varid
+ character(len=*) :: name
+ real(8) :: values
+ ! begin
+ if (main_task) parallel_get_att_real8 = &
+ nf90_get_att(ncid,varid,name,values)
+ call broadcast(parallel_get_att_real8)
+ call broadcast(values)
+ end function parallel_get_att_real8
+
+ function parallel_get_att_real8_1d(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_get_att_real8_1d,varid
+ character(len=*) :: name
+ real(8),dimension(:) :: values
+ ! begin
+ if (main_task) parallel_get_att_real8_1d = &
+ nf90_get_att(ncid,varid,name,values)
+ call broadcast(parallel_get_att_real8_1d)
+ call broadcast(values)
+ end function parallel_get_att_real8_1d
+
+ function parallel_get_var_integer_1d(ncid,varid,values)
+ implicit none
+ integer :: ncid,parallel_get_var_integer_1d,varid
+ integer,dimension(:) :: values
+ ! begin
+ if (main_task) parallel_get_var_integer_1d = &
+ nf90_get_var(ncid,varid,values)
+ call broadcast(parallel_get_var_integer_1d)
+ call broadcast(values)
+ end function parallel_get_var_integer_1d
+
+ function parallel_get_var_real4_1d(ncid,varid,values)
+ implicit none
+ integer :: ncid,parallel_get_var_real4_1d,varid
+ real(4),dimension(:) :: values
+ ! begin
+ if (main_task) parallel_get_var_real4_1d = &
+ nf90_get_var(ncid,varid,values)
+ call broadcast(parallel_get_var_real4_1d)
+ call broadcast(values)
+ end function parallel_get_var_real4_1d
+
+ function parallel_get_var_real8_1d(ncid,varid,values)
+ implicit none
+ integer :: ncid,parallel_get_var_real8_1d,varid
+ real(8),dimension(:) :: values
+ ! begin
+ if (main_task) parallel_get_var_real8_1d = &
+ nf90_get_var(ncid,varid,values)
+ call broadcast(parallel_get_var_real8_1d)
+ call broadcast(values)
+ end function parallel_get_var_real8_1d
+
+ !TODO - Is function parallel_globalID still needed? No longer called except from glissade_test_halo.
+
+ function parallel_globalID(locns, locew, upstride)
+ ! Returns a unique ID for a given row and column reference that is identical across all processors.
+ ! For instance if Proc 0: (17,16) is the same global cell as Proc 3: (17,1), then the globalID will be the same for both.
+ ! These IDs are spaced upstride apart. upstride = number of vertical layers. Typically (upn) + number of ghost layers (2 = top and bottom)
+ integer,intent(IN) :: locns, locew, upstride
+ integer :: parallel_globalID
+ ! locns is local NS (row) grid index
+ ! locew is local EW (col) grid index
+ integer :: global_row, global_col, global_ID
+ character(len=40) :: local_coord
+
+ ! including global domain halo adds lhalo to offsets
+ global_row = (locns - lhalo) + (global_row_offset + lhalo)
+ global_col = (locew - lhalo) + (global_col_offset + lhalo)
+
+ ! if halo cell and if using periodic boundary conditions,
+ ! define global ID to be associated non-halo cell
+ if (global_row .le. lhalo) then
+ if (horiz_bcs_type_south .eq. HORIZ_BCS_CYCLIC) then
+ global_row = global_row + global_nsn
+ endif
+ endif
+
+ if (global_row > (global_nsn+lhalo)) then
+ if (horiz_bcs_type_north .eq. HORIZ_BCS_CYCLIC) then
+ global_row = global_row - global_nsn
+ endif
+ endif
+
+ if (global_col .le. lhalo) then
+ if (horiz_bcs_type_west .eq. HORIZ_BCS_CYCLIC) then
+ global_col = global_col + global_ewn
+ endif
+ endif
+
+ if (global_col > (global_ewn+lhalo)) then
+ if (horiz_bcs_type_east .eq. HORIZ_BCS_CYCLIC) then
+ global_col = global_col - global_ewn
+ endif
+ endif
+
+ ! including global domain halo adds (lhalo + uhalo) to global_ewn
+ global_ID = ((global_row - 1) * (global_ewn + lhalo + uhalo) + (global_col - 1)) * upstride + 1
+
+ ! JEFF Testing Code
+ ! write(local_coord, "A13,I10.1,A2,I10.1,A1") " (NS, EW) = (", locns, ", ", locew, ")"
+ ! write(*,*) "Processor reference ", this_rank, local_coord, " globalID = ", global_ID
+
+ !return value
+ parallel_globalID = global_ID
+
+ end function parallel_globalID
+
+
+ function parallel_globalID_scalar(locew, locns, upstride)
+
+ !WHL - This function is similar to parallel_globalID, but assigns 0's to cells outside the global domain
+
+ ! Returns a unique ID for a given row and column reference that is identical across all processors.
+ ! For instance if Proc 0: (17,16) is the same global cell as Proc 3: (17,1), then the globalID will be the same for both.
+ ! These IDs are spaced upstride apart. upstride = number of vertical layers.
+ integer,intent(IN) :: locns, locew, upstride
+ integer :: parallel_globalID_scalar
+ ! locns is local NS (row) grid index
+ ! locew is local EW (col) grid index
+ integer :: global_row, global_col, global_ID
+ character(len=40) :: local_coord
+
+ ! including global domain halo adds lhalo to offsets
+ global_row = (locns - lhalo) + global_row_offset
+ global_col = (locew - lhalo) + global_col_offset
+
+ ! including global domain halo adds (lhalo + uhalo) to global_ewn
+ global_ID = ((global_row - 1)*(global_ewn) + (global_col - 1)) * upstride + 1
+
+ ! JEFF Testing Code
+ ! write(local_coord, "A13,I10.1,A2,I10.1,A1") " (NS, EW) = (", locns, ", ", locew, ")"
+ ! write(*,*) "Processor reference ", this_rank, local_coord, " globalID = ", global_ID
+
+ !return value
+ parallel_globalID_scalar = global_ID
+
+ end function parallel_globalID_scalar
+
+
+ subroutine parallel_globalindex(ilocal, jlocal, iglobal, jglobal)
+ ! Calculates the global i,j indices from the local i,j indices
+ integer,intent(IN) :: ilocal, jlocal ! These include the halos
+ integer,intent(OUT) :: iglobal, jglobal ! These do NOT include halos
+
+ ! Note: if the local index is in a halo, still convert that to its location
+ ! on the global grid (even though that location on the global grid is owned
+ ! by a different processor!)
+ ! No check is currently made for being located in the global (periodic) halo
+ iglobal = (ilocal - lhalo) + global_col_offset
+ jglobal = (jlocal - lhalo) + global_row_offset
+ end subroutine parallel_globalindex
+
+ subroutine parallel_localindex(iglobal, jglobal, ilocal, jlocal, rlocal)
+ ! Calculates the local i,j indices and rank from the global i,j indices
+ integer,intent(IN) :: iglobal, jglobal
+ integer,intent(OUT) :: ilocal, jlocal, rlocal
+ integer :: flag
+
+ flag = 0 ! This flag will be flipped on exactly one processor if the global point is valid
+ ilocal = iglobal + lhalo - global_col_offset
+ jlocal = jglobal + lhalo - global_row_offset
+
+ ! Check whether these are valid values of ilocal and jlocal
+ ! If so, then flip the flag and broadcast these values
+ if ( (ilocal > lhalo .and. ilocal <= lhalo + own_ewn) &
+ .and. &
+ (jlocal > lhalo .and. jlocal <= lhalo + own_nsn) ) then
+ flag = 1
+ endif
+
+ call parallel_reduce_maxloc(flag, flag, rlocal)
+
+ if (flag==1) then
+ call broadcast(ilocal, rlocal)
+ call broadcast(jlocal, rlocal)
+ else ! global indices are invalid
+ if (main_task) then
+ write(*,*) 'Invalid global indices: iglobal, jglobal =', iglobal, jglobal
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+ endif
+ end subroutine parallel_localindex
+
+ subroutine parallel_halo_integer_2d(a)
+ use mpi_mod
+ implicit none
+ integer,dimension(:,:) :: a
+
+ integer :: erequest,ierror,nrequest,srequest,wrequest
+ integer,dimension(lhalo,local_nsn-lhalo-uhalo) :: esend,wrecv
+ integer,dimension(uhalo,local_nsn-lhalo-uhalo) :: erecv,wsend
+ integer,dimension(local_ewn,lhalo) :: nsend,srecv
+ integer,dimension(local_ewn,uhalo) :: nrecv,ssend
+
+ ! begin
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1.and.size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,1)/=local_ewn.or.size(a,2)/=local_nsn) then
+ write(*,*) "Unknown Grid: Size a=(", size(a,1), ",", size(a,2), ") and local_ewn and local_nsn = ", local_ewn, ",", local_nsn
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! unstaggered grid
+ call mpi_irecv(wrecv,size(wrecv),mpi_integer,west,west,&
+ comm,wrequest,ierror)
+ call mpi_irecv(erecv,size(erecv),mpi_integer,east,east,&
+ comm,erequest,ierror)
+ call mpi_irecv(srecv,size(srecv),mpi_integer,south,south,&
+ comm,srequest,ierror)
+ call mpi_irecv(nrecv,size(nrecv),mpi_integer,north,north,&
+ comm,nrequest,ierror)
+
+ esend(:,:) = &
+ a(local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call mpi_send(esend,size(esend),mpi_integer,east,this_rank,comm,ierror)
+ wsend(:,:) = a(1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ call mpi_send(wsend,size(wsend),mpi_integer,west,this_rank,comm,ierror)
+
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = wrecv(:,:)
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = erecv(:,:)
+
+ nsend(:,:) = a(:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ call mpi_send(nsend,size(nsend),mpi_integer,north,this_rank,comm,ierror)
+ ssend(:,:) = a(:,1+lhalo:1+lhalo+uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_integer,south,this_rank,comm,ierror)
+
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+ a(:,:lhalo) = srecv(:,:)
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ a(:,local_nsn-uhalo+1:) = nrecv(:,:)
+
+ if (outflow_bc) then ! set values in global halo to zero
+ ! interior halo cells should not be affected
+
+ if (this_rank >= east) then ! at east edge of global domain
+ a(local_ewn-uhalo+1:,:) = 0
+ endif
+
+ if (this_rank <= west) then ! at west edge of global domain
+ a(:lhalo,:) = 0
+ endif
+
+ if (this_rank >= north) then ! at north edge of global domain
+ a(:,local_nsn-uhalo+1:) = 0
+ endif
+
+ if (this_rank <= south) then ! at south edge of global domain
+ a(:,:lhalo) = 0
+ endif
+
+ endif ! open BC
+
+ end subroutine parallel_halo_integer_2d
+
+ subroutine parallel_halo_logical_2d(a)
+ use mpi_mod
+ implicit none
+ logical,dimension(:,:) :: a
+
+ integer :: erequest,ierror,nrequest,srequest,wrequest
+ logical,dimension(lhalo,local_nsn-lhalo-uhalo) :: esend,wrecv
+ logical,dimension(uhalo,local_nsn-lhalo-uhalo) :: erecv,wsend
+ logical,dimension(local_ewn,lhalo) :: nsend,srecv
+ logical,dimension(local_ewn,uhalo) :: nrecv,ssend
+
+ ! begin
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1.and.size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,1)/=local_ewn.or.size(a,2)/=local_nsn) then
+ write(*,*) "Unknown Grid: Size a=(", size(a,1), ",", size(a,2), ") and local_ewn and local_nsn = ", local_ewn, ",", local_nsn
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! unstaggered grid
+ call mpi_irecv(wrecv,size(wrecv),mpi_logical,west,west,&
+ comm,wrequest,ierror)
+ call mpi_irecv(erecv,size(erecv),mpi_logical,east,east,&
+ comm,erequest,ierror)
+ call mpi_irecv(srecv,size(srecv),mpi_logical,south,south,&
+ comm,srequest,ierror)
+ call mpi_irecv(nrecv,size(nrecv),mpi_logical,north,north,&
+ comm,nrequest,ierror)
+
+ esend(:,:) = &
+ a(local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call mpi_send(esend,size(esend),mpi_logical,east,this_rank,comm,ierror)
+ wsend(:,:) = a(1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ call mpi_send(wsend,size(wsend),mpi_logical,west,this_rank,comm,ierror)
+
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = wrecv(:,:)
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = erecv(:,:)
+
+ nsend(:,:) = a(:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ call mpi_send(nsend,size(nsend),mpi_logical,north,this_rank,comm,ierror)
+ ssend(:,:) = a(:,1+lhalo:1+lhalo+uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_logical,south,this_rank,comm,ierror)
+
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+ a(:,:lhalo) = srecv(:,:)
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ a(:,local_nsn-uhalo+1:) = nrecv(:,:)
+
+ if (outflow_bc) then ! set values in global halo to zero
+ ! interior halo cells should not be affected
+
+ if (this_rank >= east) then ! at east edge of global domain
+ a(local_ewn-uhalo+1:,:) = .false.
+ endif
+
+ if (this_rank <= west) then ! at west edge of global domain
+ a(:lhalo,:) = .false.
+ endif
+
+ if (this_rank >= north) then ! at north edge of global domain
+ a(:,local_nsn-uhalo+1:) = .false.
+ endif
+
+ if (this_rank <= south) then ! at south edge of global domain
+ a(:,:lhalo) = .false.
+ endif
+
+ endif ! open BC
+
+ end subroutine parallel_halo_logical_2d
+
+ subroutine parallel_halo_real4_2d(a)
+ use mpi_mod
+ implicit none
+ real(4),dimension(:,:) :: a
+
+ integer :: erequest,ierror,nrequest,srequest,wrequest
+ real(4),dimension(lhalo,local_nsn-lhalo-uhalo) :: esend,wrecv
+ real(4),dimension(uhalo,local_nsn-lhalo-uhalo) :: erecv,wsend
+ real(4),dimension(local_ewn,lhalo) :: nsend,srecv
+ real(4),dimension(local_ewn,uhalo) :: nrecv,ssend
+
+ ! begin
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1.and.size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,1)/=local_ewn.or.size(a,2)/=local_nsn) then
+ write(*,*) "Unknown Grid: Size a=(", size(a,1), ",", size(a,2), ") and local_ewn and local_nsn = ", local_ewn, ",", local_nsn
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! unstaggered grid
+ call mpi_irecv(wrecv,size(wrecv),mpi_real4,west,west,&
+ comm,wrequest,ierror)
+ call mpi_irecv(erecv,size(erecv),mpi_real4,east,east,&
+ comm,erequest,ierror)
+ call mpi_irecv(srecv,size(srecv),mpi_real4,south,south,&
+ comm,srequest,ierror)
+ call mpi_irecv(nrecv,size(nrecv),mpi_real4,north,north,&
+ comm,nrequest,ierror)
+
+ esend(:,:) = &
+ a(local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call mpi_send(esend,size(esend),mpi_real4,east,this_rank,comm,ierror)
+ wsend(:,:) = a(1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ call mpi_send(wsend,size(wsend),mpi_real4,west,this_rank,comm,ierror)
+
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = wrecv(:,:)
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = erecv(:,:)
+
+ nsend(:,:) = a(:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ call mpi_send(nsend,size(nsend),mpi_real4,north,this_rank,comm,ierror)
+ ssend(:,:) = a(:,1+lhalo:1+lhalo+uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_real4,south,this_rank,comm,ierror)
+
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+ a(:,:lhalo) = srecv(:,:)
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ a(:,local_nsn-uhalo+1:) = nrecv(:,:)
+
+ if (outflow_bc) then ! set values in global halo to zero
+ ! interior halo cells should not be affected
+
+ if (this_rank >= east) then ! at east edge of global domain
+ a(local_ewn-uhalo+1:,:) = 0.
+ endif
+
+ if (this_rank <= west) then ! at west edge of global domain
+ a(:lhalo,:) = 0.
+ endif
+
+ if (this_rank >= north) then ! at north edge of global domain
+ a(:,local_nsn-uhalo+1:) = 0.
+ endif
+
+ if (this_rank <= south) then ! at south edge of global domain
+ a(:,:lhalo) = 0.
+ endif
+
+ endif ! open BC
+
+ end subroutine parallel_halo_real4_2d
+
+
+ subroutine parallel_halo_real8_2d(a, periodic_offset_ew, periodic_offset_ns)
+
+ !WHL - added optional arguments for periodic offsets, to support ismip-hom test cases
+
+ use mpi_mod
+ implicit none
+ real(8),dimension(:,:) :: a
+ real(8), intent(in), optional :: &
+ periodic_offset_ew, &! offset halo values by this amount
+ ! if positive, the offset is positive for W halo, negative for E halo
+ periodic_offset_ns ! offset halo values by this amount
+ ! if positive, the offset is positive for S halo, negative for N halo
+
+ integer :: erequest,ierror,nrequest,srequest,wrequest
+ real(8),dimension(lhalo,local_nsn-lhalo-uhalo) :: esend,wrecv
+ real(8),dimension(uhalo,local_nsn-lhalo-uhalo) :: erecv,wsend
+ real(8),dimension(local_ewn,lhalo) :: nsend,srecv
+ real(8),dimension(local_ewn,uhalo) :: nrecv,ssend
+
+ ! begin
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1.and.size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,1)/=local_ewn.or.size(a,2)/=local_nsn) then
+ write(*,*) "Unknown Grid: Size a=(", size(a,1), ",", size(a,2), ") and local_ewn and local_nsn = ", local_ewn, ",", local_nsn
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! unstaggered grid
+ call mpi_irecv(wrecv,size(wrecv),mpi_real8,west,west,&
+ comm,wrequest,ierror)
+ call mpi_irecv(erecv,size(erecv),mpi_real8,east,east,&
+ comm,erequest,ierror)
+ call mpi_irecv(srecv,size(srecv),mpi_real8,south,south,&
+ comm,srequest,ierror)
+ call mpi_irecv(nrecv,size(nrecv),mpi_real8,north,north,&
+ comm,nrequest,ierror)
+
+ esend(:,:) = &
+ a(local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call mpi_send(esend,size(esend),mpi_real8,east,this_rank,comm,ierror)
+ wsend(:,:) = a(1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ call mpi_send(wsend,size(wsend),mpi_real8,west,this_rank,comm,ierror)
+
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = wrecv(:,:)
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = erecv(:,:)
+
+ if (present(periodic_offset_ew)) then
+ if (periodic_offset_ew /= 0.d0) then
+ if (this_rank <= west) then ! this proc lies at the west edge of the global domain
+! print*, 'Offset at west edge: this_rank, west =', this_rank, west
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = &
+ a(:lhalo,1+lhalo:local_nsn-uhalo) + periodic_offset_ew
+ endif
+ if (this_rank >= east) then ! this proc lies at the east edge of the global domain
+! print*, 'Offset at east edge: this_rank, east =', this_rank, east
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = &
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) - periodic_offset_ew
+ endif
+ endif
+ endif
+
+ nsend(:,:) = a(:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ call mpi_send(nsend,size(nsend),mpi_real8,north,this_rank,comm,ierror)
+ ssend(:,:) = a(:,1+lhalo:1+lhalo+uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_real8,south,this_rank,comm,ierror)
+
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+ a(:,:lhalo) = srecv(:,:)
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ a(:,local_nsn-uhalo+1:) = nrecv(:,:)
+
+ if (present(periodic_offset_ns)) then
+ if (periodic_offset_ns /= 0.d0) then
+ if (this_rank <= south) then ! this proc lies at the south edge of the global domain
+! print*, 'Offset at south edge: this_rank, south =', this_rank, south
+ a(:,:lhalo) = a(:,:lhalo) + periodic_offset_ns
+ endif
+ if (this_rank >= north) then ! this proc lies at the north edge of the global domain
+! print*, 'Offset at north edge: this_rank, north =', this_rank, north
+ a(:,local_nsn-uhalo+1:) = a(:,local_nsn-uhalo+1:) - periodic_offset_ns
+ endif
+ endif
+ endif
+
+ if (outflow_bc) then ! set values in global halo to zero
+ ! interior halo cells should not be affected
+
+ if (this_rank >= east) then ! at east edge of global domain
+ a(local_ewn-uhalo+1:,:) = 0.d0
+ endif
+
+ if (this_rank <= west) then ! at west edge of global domain
+ a(:lhalo,:) = 0.d0
+ endif
+
+ if (this_rank >= north) then ! at north edge of global domain
+ a(:,local_nsn-uhalo+1:) = 0.d0
+ endif
+
+ if (this_rank <= south) then ! at south edge of global domain
+ a(:,:lhalo) = 0.d0
+ endif
+
+ endif ! open BC
+
+ end subroutine parallel_halo_real8_2d
+
+ subroutine parallel_halo_real8_3d(a)
+
+ use mpi_mod
+ implicit none
+ real(8),dimension(:,:,:) :: a
+
+ integer :: erequest,ierror,one,nrequest,srequest,wrequest
+ real(8),dimension(size(a,1),lhalo,local_nsn-lhalo-uhalo) :: esend,wrecv
+ real(8),dimension(size(a,1),uhalo,local_nsn-lhalo-uhalo) :: erecv,wsend
+ real(8),dimension(size(a,1),local_ewn,lhalo) :: nsend,srecv
+ real(8),dimension(size(a,1),local_ewn,uhalo) :: nrecv,ssend
+
+ ! begin
+
+ ! staggered grid
+ if (size(a,2)==local_ewn-1.and.size(a,3)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,2)/=local_ewn.or.size(a,3)/=local_nsn) then
+ write(*,*) "Unknown Grid: Size a=(", size(a,1), ",", size(a,2), ",", size(a,3), ") &
+ &and local_ewn and local_nsn = ", local_ewn, ",", local_nsn
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! unstaggered grid
+ call mpi_irecv(wrecv,size(wrecv),mpi_real8,west,west,&
+ comm,wrequest,ierror)
+ call mpi_irecv(erecv,size(erecv),mpi_real8,east,east,&
+ comm,erequest,ierror)
+ call mpi_irecv(srecv,size(srecv),mpi_real8,south,south,&
+ comm,srequest,ierror)
+ call mpi_irecv(nrecv,size(nrecv),mpi_real8,north,north,&
+ comm,nrequest,ierror)
+
+ esend(:,:,:) = &
+ a(:,local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call mpi_send(esend,size(esend),mpi_real8,east,this_rank,comm,ierror)
+ wsend(:,:,:) = a(:,1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ call mpi_send(wsend,size(wsend),mpi_real8,west,this_rank,comm,ierror)
+
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+ a(:,:lhalo,1+lhalo:local_nsn-uhalo) = wrecv(:,:,:)
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ a(:,local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = erecv(:,:,:)
+
+ nsend(:,:,:) = a(:,:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ call mpi_send(nsend,size(nsend),mpi_real8,north,this_rank,comm,ierror)
+ ssend(:,:,:) = a(:,:,1+lhalo:1+lhalo+uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_real8,south,this_rank,comm,ierror)
+
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+ a(:,:,:lhalo) = srecv(:,:,:)
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ a(:,:,local_nsn-uhalo+1:) = nrecv(:,:,:)
+
+ if (outflow_bc) then ! set values in global halo to zero
+ ! interior halo cells should not be affected
+
+ if (this_rank >= east) then ! at east edge of global domain
+ a(:,local_ewn-uhalo+1:,:) = 0.d0
+ endif
+
+ if (this_rank <= west) then ! at west edge of global domain
+ a(:,:lhalo,:) = 0.d0
+ endif
+
+ if (this_rank >= north) then ! at north edge of global domain
+ a(:,:,local_nsn-uhalo+1:) = 0.d0
+ endif
+
+ if (this_rank <= south) then ! at south edge of global domain
+ a(:,:,:lhalo) = 0.d0
+ endif
+
+ endif ! outflow BC
+
+ end subroutine parallel_halo_real8_3d
+
+
+ function parallel_halo_verify_integer_2d(a)
+ use mpi_mod
+ implicit none
+ integer,dimension(:,:) :: a
+
+ integer :: erequest,ierror,nrequest,srequest,wrequest
+ integer,dimension(lhalo,local_nsn-lhalo-uhalo) :: esend,wrecv
+ integer,dimension(uhalo,local_nsn-lhalo-uhalo) :: erecv,wsend
+ integer,dimension(local_ewn,lhalo) :: nsend,srecv
+ integer,dimension(local_ewn,uhalo) :: nrecv,ssend
+ logical :: notverify_flag
+ logical :: parallel_halo_verify_integer_2d
+
+ ! begin
+
+ if (DEBUG_LEVEL <= 0) return
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1.and.size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,1)/=local_ewn.or.size(a,2)/=local_nsn) &
+ call parallel_stop(__FILE__,__LINE__)
+
+ ! unstaggered grid
+ call mpi_irecv(wrecv,size(wrecv),mpi_integer,west,west,&
+ comm,wrequest,ierror)
+ call mpi_irecv(erecv,size(erecv),mpi_integer,east,east,&
+ comm,erequest,ierror)
+ call mpi_irecv(srecv,size(srecv),mpi_integer,south,south,&
+ comm,srequest,ierror)
+ call mpi_irecv(nrecv,size(nrecv),mpi_integer,north,north,&
+ comm,nrequest,ierror)
+
+ esend(:,:) = &
+ a(local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call mpi_send(esend,size(esend),mpi_integer,east,this_rank,comm,ierror)
+ wsend(:,:) = a(1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ call mpi_send(wsend,size(wsend),mpi_integer,west,this_rank,comm,ierror)
+
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+ ! ANY True if any value is true (LOGICAL)
+ notverify_flag = ANY(a(:lhalo,1+lhalo:local_nsn-uhalo) /= wrecv(:,:))
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ notverify_flag = notverify_flag .OR. &
+ ANY(a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) /= erecv(:,:))
+
+ nsend(:,:) = a(:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ call mpi_send(nsend,size(nsend),mpi_integer,north,this_rank,comm,ierror)
+ ssend(:,:) = a(:,1+lhalo:1+lhalo+uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_integer,south,this_rank,comm,ierror)
+
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+ notverify_flag = notverify_flag .OR. ANY(a(:,:lhalo) /= srecv(:,:))
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ notverify_flag = notverify_flag .OR. ANY(a(:,local_nsn-uhalo+1:) /= nrecv(:,:))
+
+ ! if notverify_flag is TRUE, then there was some difference detected
+ if (notverify_flag) then
+ write(*,*) "Halo Verify FAILED on processor ", this_rank
+ ! call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ parallel_halo_verify_integer_2d = .NOT. notverify_flag ! return if verified (True) or not verified (False)
+ end function parallel_halo_verify_integer_2d
+
+ function parallel_halo_verify_real8_2d(a)
+ use mpi_mod
+ implicit none
+ real(8),dimension(:,:) :: a
+
+ integer :: erequest,ierror,nrequest,srequest,wrequest
+ real(8),dimension(lhalo,local_nsn-lhalo-uhalo) :: esend,wrecv
+ real(8),dimension(uhalo,local_nsn-lhalo-uhalo) :: erecv,wsend
+ real(8),dimension(local_ewn,lhalo) :: nsend,srecv
+ real(8),dimension(local_ewn,uhalo) :: nrecv,ssend
+ logical :: notverify_flag
+ logical :: parallel_halo_verify_real8_2d
+
+ ! begin
+
+ if (DEBUG_LEVEL <= 0) return
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1.and.size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,1)/=local_ewn.or.size(a,2)/=local_nsn) &
+ call parallel_stop(__FILE__,__LINE__)
+
+ ! unstaggered grid
+ call mpi_irecv(wrecv,size(wrecv),mpi_real8,west,west,&
+ comm,wrequest,ierror)
+ call mpi_irecv(erecv,size(erecv),mpi_real8,east,east,&
+ comm,erequest,ierror)
+ call mpi_irecv(srecv,size(srecv),mpi_real8,south,south,&
+ comm,srequest,ierror)
+ call mpi_irecv(nrecv,size(nrecv),mpi_real8,north,north,&
+ comm,nrequest,ierror)
+
+ esend(:,:) = &
+ a(local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call mpi_send(esend,size(esend),mpi_real8,east,this_rank,comm,ierror)
+ wsend(:,:) = a(1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ call mpi_send(wsend,size(wsend),mpi_real8,west,this_rank,comm,ierror)
+
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+ notverify_flag = ANY(a(:lhalo,1+lhalo:local_nsn-uhalo) /= wrecv(:,:))
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ notverify_flag = notverify_flag .OR. &
+ ANY(a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) /= erecv(:,:))
+
+ nsend(:,:) = a(:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ call mpi_send(nsend,size(nsend),mpi_real8,north,this_rank,comm,ierror)
+ ssend(:,:) = a(:,1+lhalo:1+lhalo+uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_real8,south,this_rank,comm,ierror)
+
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+ notverify_flag = notverify_flag .OR. ANY(a(:,:lhalo) /= srecv(:,:))
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ notverify_flag = notverify_flag .OR. ANY(a(:,local_nsn-uhalo+1:) /= nrecv(:,:))
+
+ if (notverify_flag) then
+ write(*,*) "Halo Verify FAILED on processor ", this_rank
+ ! call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ parallel_halo_verify_real8_2d = .NOT. notverify_flag
+ end function parallel_halo_verify_real8_2d
+
+ function parallel_halo_verify_real8_3d(a)
+ use mpi_mod
+ implicit none
+ real(8),dimension(:,:,:) :: a
+
+ integer :: erequest,ierror,one,nrequest,srequest,wrequest
+ real(8),dimension(size(a,1),lhalo,local_nsn-lhalo-uhalo) :: esend,wrecv
+ real(8),dimension(size(a,1),uhalo,local_nsn-lhalo-uhalo) :: erecv,wsend
+ real(8),dimension(size(a,1),local_ewn,lhalo) :: nsend,srecv
+ real(8),dimension(size(a,1),local_ewn,uhalo) :: nrecv,ssend
+ logical :: notverify_flag
+ logical :: parallel_halo_verify_real8_3d
+
+ ! begin
+
+ if (DEBUG_LEVEL <= 0) return
+
+ ! staggered grid
+ if (size(a,2)==local_ewn-1.and.size(a,3)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,2)/=local_ewn.or.size(a,3)/=local_nsn) &
+ call parallel_stop(__FILE__,__LINE__)
+
+ ! unstaggered grid
+ call mpi_irecv(wrecv,size(wrecv),mpi_real8,west,west,&
+ comm,wrequest,ierror)
+ call mpi_irecv(erecv,size(erecv),mpi_real8,east,east,&
+ comm,erequest,ierror)
+ call mpi_irecv(srecv,size(srecv),mpi_real8,south,south,&
+ comm,srequest,ierror)
+ call mpi_irecv(nrecv,size(nrecv),mpi_real8,north,north,&
+ comm,nrequest,ierror)
+
+ esend(:,:,:) = &
+ a(:,local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ call mpi_send(esend,size(esend),mpi_real8,east,this_rank,comm,ierror)
+ wsend(:,:,:) = a(:,1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ call mpi_send(wsend,size(wsend),mpi_real8,west,this_rank,comm,ierror)
+
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+ notverify_flag = ANY(a(:,:lhalo,1+lhalo:local_nsn-uhalo) /= wrecv(:,:,:))
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ notverify_flag = notverify_flag .OR. &
+ ANY(a(:,local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) /= erecv(:,:,:))
+
+ nsend(:,:,:) = a(:,:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ call mpi_send(nsend,size(nsend),mpi_real8,north,this_rank,comm,ierror)
+ ssend(:,:,:) = a(:,:,1+lhalo:1+lhalo+uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_real8,south,this_rank,comm,ierror)
+
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+ notverify_flag = notverify_flag .OR. ANY(a(:,:,:lhalo) /= srecv(:,:,:))
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ notverify_flag = notverify_flag .OR. ANY(a(:,:,local_nsn-uhalo+1:) /= nrecv(:,:,:))
+
+ if (notverify_flag) then
+ write(*,*) "Halo Verify FAILED on processor ", this_rank
+ ! call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ parallel_halo_verify_real8_3d = .NOT. notverify_flag
+ end function parallel_halo_verify_real8_3d
+
+ ! parallel_initialise should generally just be called by standalone cism drivers
+ ! When cism is nested inside a climate model (so mpi_init has already been called) use parallel_set_info instead
+
+ subroutine parallel_initialise
+ use mpi_mod
+ implicit none
+ integer :: ierror
+ integer, parameter :: my_main_rank = 0
+ ! begin
+ call mpi_init(ierror)
+ call parallel_set_info(mpi_comm_world, my_main_rank)
+ end subroutine parallel_initialise
+
+ ! parallel_set_info should be called directly when cism is nested inside a climate model
+ ! (then, mpi_init has already been called, so do NOT use parallel_initialise)
+
+ subroutine parallel_set_info(my_comm, my_main_rank)
+ use mpi_mod
+ implicit none
+ integer, intent(in) :: my_comm ! CISM's global communicator
+ integer, intent(in) :: my_main_rank ! rank of the master task
+ integer :: ierror
+ ! begin
+ comm = my_comm
+ main_rank = my_main_rank
+ call mpi_comm_size(comm,tasks,ierror)
+ call mpi_comm_rank(comm,this_rank,ierror)
+ main_task = (this_rank==main_rank)
+ end subroutine parallel_set_info
+
+ function parallel_inq_attname(ncid,varid,attnum,name)
+ implicit none
+ integer :: attnum,ncid,parallel_inq_attname,varid
+ character(len=*) :: name
+ ! begin
+ if (main_task) parallel_inq_attname = &
+ nf90_inq_attname(ncid,varid,attnum,name)
+ call broadcast(parallel_inq_attname)
+ call broadcast(name)
+ end function parallel_inq_attname
+
+ function parallel_inq_dimid(ncid,name,dimid)
+ implicit none
+ integer :: dimid,ncid,parallel_inq_dimid
+ character(len=*) :: name
+ ! begin
+ if (main_task) parallel_inq_dimid = nf90_inq_dimid(ncid,name,dimid)
+ call broadcast(parallel_inq_dimid)
+ call broadcast(dimid)
+ end function parallel_inq_dimid
+
+ function parallel_inq_varid(ncid,name,varid)
+ implicit none
+ integer :: ncid,parallel_inq_varid,varid
+ character(len=*) :: name
+ ! begin
+ if (main_task) parallel_inq_varid = nf90_inq_varid(ncid,name,varid)
+ call broadcast(parallel_inq_varid)
+ call broadcast(varid)
+ end function parallel_inq_varid
+
+ function parallel_inquire(ncid,nvariables)
+ implicit none
+ integer :: ncid,parallel_inquire,nvariables
+ ! begin
+ if (main_task) parallel_inquire = nf90_inquire(ncid,nvariables=nvariables)
+ call broadcast(parallel_inquire)
+ call broadcast(nvariables)
+ end function parallel_inquire
+
+ function parallel_inquire_dimension(ncid,dimid,name,len)
+ implicit none
+ integer :: dimid,ncid,parallel_inquire_dimension
+ integer,optional :: len
+ character(len=*),optional :: name
+
+ integer :: l
+
+ ! begin
+
+ if (present(name)) then
+ if (main_task) parallel_inquire_dimension = &
+ nf90_inquire_dimension(ncid,dimid,name,len=l)
+ call broadcast(name)
+ else
+ if (main_task) parallel_inquire_dimension = &
+ nf90_inquire_dimension(ncid,dimid,len=l)
+ end if
+ call broadcast(parallel_inquire_dimension)
+ if (present(len)) then
+ call broadcast(l)
+ len = l
+ end if
+ end function parallel_inquire_dimension
+
+ function parallel_inquire_variable(ncid,varid,name,ndims,dimids,natts)
+ implicit none
+ integer :: ncid,parallel_inquire_variable,varid
+ integer,optional :: ndims,natts
+ character(len=*),optional :: name
+ integer,dimension(:),optional :: dimids
+
+ integer :: nd,na
+ ! begin
+ if (present(name)) then
+ if (main_task) parallel_inquire_variable = &
+ nf90_inquire_variable(ncid,varid,name=name)
+ call broadcast(parallel_inquire_variable)
+ call broadcast(name)
+ if (parallel_inquire_variable/=nf90_noerr) return
+ end if
+ if (present(dimids)) then
+ if (main_task) parallel_inquire_variable = &
+ nf90_inquire_variable(ncid,varid,dimids=dimids)
+ call broadcast(parallel_inquire_variable)
+ call broadcast(dimids)
+ if (parallel_inquire_variable/=nf90_noerr) return
+ end if
+ if (main_task) parallel_inquire_variable = &
+ nf90_inquire_variable(ncid,varid,ndims=nd,natts=na)
+ call broadcast(parallel_inquire_variable)
+ if (present(ndims)) then
+ call broadcast(nd)
+ ndims = nd
+ end if
+ if (present(natts)) then
+ call broadcast(na)
+ natts = na
+ end if
+ end function parallel_inquire_variable
+
+ function parallel_open(path,mode,ncid)
+ implicit none
+ integer :: mode,ncid,parallel_open
+ character(len=*) :: path
+ ! begin
+ if (main_task) parallel_open = nf90_open(path,mode,ncid)
+ call broadcast(parallel_open)
+ end function parallel_open
+
+ subroutine parallel_print_all(name,values)
+ implicit none
+ character(*) :: name
+ real(8),dimension(:,:,:) :: values
+
+ integer,parameter :: u = 33
+ integer :: i,j,t
+ ! begin
+ if (main_task) then
+ open(unit=u,file=name,form="formatted",status="replace")
+ close(u)
+ end if
+ do t = 0,tasks-1
+ call parallel_barrier
+ if (t==this_rank) then
+ open(unit=u,file=name,form="formatted",position="append")
+ do j = 1,size(values,3)
+ do i = 1,size(values,2)
+ write(u,'(2i5,100g15.5e3)') nslb+j-1,ewlb+i-1,values(:,i,j)
+ end do
+ write(u,'()')
+ end do
+ write(u,'(//)')
+ close(u)
+ end if
+ end do
+ end subroutine parallel_print_all
+
+ subroutine parallel_print_integer_2d(name,values)
+ implicit none
+ character(*) :: name
+ integer,dimension(:,:) :: values
+
+ integer,parameter :: u = 33
+ character(3) :: ts
+ integer :: i,j
+
+ ! begin
+ if (main_task) then
+ write(ts,'(i3.3)') tasks
+ open(unit=u,file=name//ts//".txt",form="formatted",status="replace")
+ do j = lbound(values,2),ubound(values,2)
+ do i = lbound(values,1),ubound(values,1)
+ write(u,*) j,i,values(i,j)
+ end do
+ write(u,'()')
+ end do
+ close(u)
+ end if
+
+ call parallel_barrier ! Only the main_task writes the variable. Rest wait here.
+ ! automatic deallocation
+ end subroutine parallel_print_integer_2d
+
+ subroutine parallel_print_real8_2d(name,values)
+ implicit none
+ character(*) :: name
+ real(8),dimension(:,:) :: values
+
+ integer,parameter :: u = 33
+ character(3) :: ts
+ integer :: i,j
+
+ ! begin
+ if (main_task) then
+ write(ts,'(i3.3)') tasks
+ open(unit=u,file=name//ts//".txt",form="formatted",status="replace")
+ do j = lbound(values,2),ubound(values,2)
+ do i = lbound(values,1),ubound(values,1)
+ write(u,*) j,i,values(i,j)
+ end do
+ write(u,'()')
+ end do
+ close(u)
+ end if
+
+ call parallel_barrier ! Only the main_task writes the variable. Rest wait here.
+ end subroutine parallel_print_real8_2d
+
+ subroutine parallel_print_real8_3d(name,values)
+ implicit none
+ character(*) :: name
+ real(8),dimension(:,:,:) :: values
+
+ integer,parameter :: u = 33
+ character(3) :: ts
+ integer :: i,j
+
+ ! begin
+ if (main_task) then
+ write(ts,'(i3.3)') tasks
+ open(unit=u,file=name//ts//".txt",form="formatted",status="replace")
+ do j = lbound(values,3),ubound(values,3)
+ do i = lbound(values,2),ubound(values,2)
+ write(u,'(2i6,100g15.5e3)') j,i,values(:,i,j)
+ end do
+ write(u,'()')
+ end do
+ close(u)
+ end if
+
+ call parallel_barrier ! Only the main_task writes the variable. Rest wait here.
+ end subroutine parallel_print_real8_3d
+
+ function parallel_put_att_character(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_put_att_character,varid
+ character(len=*) :: name,values
+ ! begin
+ if (main_task) parallel_put_att_character = nf90_put_att(ncid,varid,name,values)
+ call broadcast(parallel_put_att_character)
+ end function parallel_put_att_character
+
+ function parallel_put_att_real4(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_put_att_real4,varid
+ character(len=*) :: name
+ real(4) :: values
+ ! begin
+ if (main_task) parallel_put_att_real4 = nf90_put_att(ncid,varid,name,values)
+ call broadcast(parallel_put_att_real4)
+ end function parallel_put_att_real4
+
+ function parallel_put_att_real4_1d(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_put_att_real4_1d,varid
+ character(len=*) :: name
+ real(4),dimension(:) :: values
+ ! begin
+ if (main_task) parallel_put_att_real4_1d = nf90_put_att(ncid,varid,name,values)
+ call broadcast(parallel_put_att_real4_1d)
+ end function parallel_put_att_real4_1d
+
+ function parallel_put_att_real8(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_put_att_real8,varid
+ character(len=*) :: name
+ real(8) :: values
+ ! begin
+ if (main_task) parallel_put_att_real8 = nf90_put_att(ncid,varid,name,values)
+ call broadcast(parallel_put_att_real8)
+ end function parallel_put_att_real8
+
+ function parallel_put_att_real8_1d(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_put_att_real8_1d,varid
+ character(len=*) :: name
+ real(8),dimension(:) :: values
+ ! begin
+ if (main_task) parallel_put_att_real8_1d = nf90_put_att(ncid,varid,name,values)
+ call broadcast(parallel_put_att_real8_1d)
+ end function parallel_put_att_real8_1d
+
+ function parallel_put_var_real4(ncid,varid,values,start)
+ implicit none
+ integer :: ncid,parallel_put_var_real4,varid
+ integer,dimension(:) :: start
+ real(4) :: values
+ ! begin
+ if (main_task) parallel_put_var_real4 = &
+ nf90_put_var(ncid,varid,values,start)
+ call broadcast(parallel_put_var_real4)
+ end function parallel_put_var_real4
+
+ function parallel_put_var_real8(ncid,varid,values,start)
+ implicit none
+ integer :: ncid,parallel_put_var_real8,varid
+ integer,dimension(:) :: start
+ real(8) :: values
+ ! begin
+ if (main_task) parallel_put_var_real8 = &
+ nf90_put_var(ncid,varid,values,start)
+ call broadcast(parallel_put_var_real8)
+ end function parallel_put_var_real8
+
+ function parallel_put_var_real8_1d(ncid,varid,values,start)
+ implicit none
+ integer :: ncid,parallel_put_var_real8_1d,varid
+ integer,dimension(:),optional :: start
+ real(8),dimension(:) :: values
+ ! begin
+ if (main_task) then
+ if (present(start)) then
+ parallel_put_var_real8_1d = nf90_put_var(ncid,varid,values,start)
+ else
+ parallel_put_var_real8_1d = nf90_put_var(ncid,varid,values)
+ end if
+ end if
+ call broadcast(parallel_put_var_real8_1d)
+ end function parallel_put_var_real8_1d
+
+ function parallel_redef(ncid)
+ implicit none
+ integer :: ncid,parallel_redef
+ ! begin
+ if (main_task) parallel_redef = nf90_redef(ncid)
+ call broadcast(parallel_redef)
+ end function parallel_redef
+
+! ------------------------------------------
+! functions for parallel_reduce_sum interface
+! ------------------------------------------
+ function parallel_reduce_sum_integer(x)
+ use mpi_mod
+ implicit none
+ integer :: x
+ integer :: ierror
+ integer :: recvbuf,sendbuf, parallel_reduce_sum_integer
+ ! begin
+ sendbuf = x
+ call mpi_allreduce(sendbuf,recvbuf,1,mpi_integer,mpi_sum,comm,ierror)
+ parallel_reduce_sum_integer = recvbuf
+ return
+ end function parallel_reduce_sum_integer
+
+ function parallel_reduce_sum_real4(x)
+ use mpi_mod
+ implicit none
+ real(4) :: x
+ integer :: ierror
+ real(4) :: recvbuf,sendbuf, parallel_reduce_sum_real4
+ ! begin
+ sendbuf = x
+ call mpi_allreduce(sendbuf,recvbuf,1,mpi_real4,mpi_sum,comm,ierror)
+ parallel_reduce_sum_real4 = recvbuf
+ return
+ end function parallel_reduce_sum_real4
+
+ function parallel_reduce_sum_real8(x)
+ use mpi_mod
+ implicit none
+ real(8) :: x
+ integer :: ierror
+ real(8) :: recvbuf,sendbuf, parallel_reduce_sum_real8
+ ! begin
+ sendbuf = x
+ call mpi_allreduce(sendbuf,recvbuf,1,mpi_real8,mpi_sum,comm,ierror)
+ parallel_reduce_sum_real8 = recvbuf
+ return
+ end function parallel_reduce_sum_real8
+
+ function parallel_reduce_sum_real8_nvar(x)
+ use mpi_mod
+ implicit none
+ real(8) :: x(:)
+ integer :: ierror, nvar
+ real(8), dimension(size(x)) :: recvbuf,sendbuf, parallel_reduce_sum_real8_nvar
+ ! begin
+ nvar = size(x)
+ sendbuf = x
+ call mpi_allreduce(sendbuf,recvbuf,nvar,mpi_real8,mpi_sum,comm,ierror)
+ parallel_reduce_sum_real8_nvar = recvbuf
+ return
+ end function parallel_reduce_sum_real8_nvar
+
+! ------------------------------------------
+! functions for parallel_reduce_max interface
+! ------------------------------------------
+ function parallel_reduce_max_integer(x)
+ use mpi_mod
+ implicit none
+ integer :: x
+
+ integer :: ierror
+ integer :: recvbuf,sendbuf, parallel_reduce_max_integer
+ ! begin
+ sendbuf = x
+ call mpi_allreduce(sendbuf,recvbuf,1,mpi_integer,mpi_max,comm,ierror)
+ parallel_reduce_max_integer = recvbuf
+ return
+ end function parallel_reduce_max_integer
+
+ function parallel_reduce_max_real4(x)
+ use mpi_mod
+ implicit none
+ real(4) :: x
+
+ integer :: ierror
+ real(4) :: recvbuf,sendbuf, parallel_reduce_max_real4
+ ! begin
+ sendbuf = x
+ call mpi_allreduce(sendbuf,recvbuf,1,mpi_real4,mpi_max,comm,ierror)
+ parallel_reduce_max_real4 = recvbuf
+ return
+ end function parallel_reduce_max_real4
+
+ function parallel_reduce_max_real8(x)
+ use mpi_mod
+ implicit none
+ real(8) :: x
+
+ integer :: ierror
+ real(8) :: recvbuf,sendbuf, parallel_reduce_max_real8
+ ! begin
+ sendbuf = x
+ call mpi_allreduce(sendbuf,recvbuf,1,mpi_real8,mpi_max,comm,ierror)
+ parallel_reduce_max_real8 = recvbuf
+ return
+ end function parallel_reduce_max_real8
+
+! ------------------------------------------
+! routines for parallel_reduce_maxloc interface
+! ------------------------------------------
+ subroutine parallel_reduce_maxloc_integer(xin, xout, xprocout)
+ use mpi_mod
+ implicit none
+ integer, intent(in) :: xin ! variable to reduce
+ integer, intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ integer :: ierror
+ integer, dimension(2,1) :: recvbuf, sendbuf
+ ! begin
+ sendbuf(1,1) = xin
+ sendbuf(2,1) = this_rank ! This is the processor number associated with the value x
+ call mpi_allreduce(sendbuf,recvbuf,1,MPI_2INTEGER,mpi_maxloc,comm,ierror)
+ xout = recvbuf(1,1)
+ xprocout = recvbuf(2,1)
+ end subroutine parallel_reduce_maxloc_integer
+
+ subroutine parallel_reduce_maxloc_real4(xin, xout, xprocout)
+ use mpi_mod
+ implicit none
+ real(4), intent(in) :: xin ! variable to reduce
+ real(4), intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ integer :: ierror
+ real(4), dimension(2,1) :: recvbuf, sendbuf
+ ! begin
+ sendbuf(1,1) = xin
+ sendbuf(2,1) = this_rank ! This is the processor number associated with the value x (coerced to a real)
+ call mpi_allreduce(sendbuf,recvbuf,1,MPI_2REAL,mpi_maxloc,comm,ierror)
+ xout = recvbuf(1,1)
+ xprocout = recvbuf(2,1) ! coerced back to integer
+ end subroutine parallel_reduce_maxloc_real4
+
+ subroutine parallel_reduce_maxloc_real8(xin, xout, xprocout)
+ use mpi_mod
+ implicit none
+ real(8), intent(in) :: xin ! variable to reduce
+ real(8), intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ integer :: ierror
+ real(8), dimension(2,1) :: recvbuf, sendbuf
+ ! begin
+ sendbuf(1,1) = xin
+ sendbuf(2,1) = this_rank ! This is the processor number associated with the value x (coerced to a real)
+ call mpi_allreduce(sendbuf,recvbuf,1,MPI_2DOUBLE_PRECISION,mpi_maxloc,comm,ierror)
+ xout = recvbuf(1,1)
+ xprocout = recvbuf(2,1) ! coerced back to integer
+ end subroutine parallel_reduce_maxloc_real8
+
+! ------------------------------------------
+! functions for parallel_reduce_min interface
+! ------------------------------------------
+ function parallel_reduce_min_integer(x)
+ use mpi_mod
+ implicit none
+ integer :: x
+
+ integer :: ierror
+ integer :: recvbuf,sendbuf, parallel_reduce_min_integer
+ ! begin
+ sendbuf = x
+ call mpi_allreduce(sendbuf,recvbuf,1,mpi_integer,mpi_min,comm,ierror)
+ parallel_reduce_min_integer = recvbuf
+ return
+ end function parallel_reduce_min_integer
+
+ function parallel_reduce_min_real4(x)
+ use mpi_mod
+ implicit none
+ real(4) :: x
+
+ integer :: ierror
+ real(4) :: recvbuf,sendbuf, parallel_reduce_min_real4
+ ! begin
+ sendbuf = x
+ call mpi_allreduce(sendbuf,recvbuf,1,mpi_real4,mpi_min,comm,ierror)
+ parallel_reduce_min_real4 = recvbuf
+ return
+ end function parallel_reduce_min_real4
+
+ function parallel_reduce_min_real8(x)
+ use mpi_mod
+ implicit none
+ real(8) :: x
+
+ integer :: ierror
+ real(8) :: recvbuf,sendbuf, parallel_reduce_min_real8
+ ! begin
+ sendbuf = x
+ call mpi_allreduce(sendbuf,recvbuf,1,mpi_real8,mpi_min,comm,ierror)
+ parallel_reduce_min_real8 = recvbuf
+ return
+ end function parallel_reduce_min_real8
+
+! ------------------------------------------
+! routines for parallel_reduce_minloc interface
+! ------------------------------------------
+ subroutine parallel_reduce_minloc_integer(xin, xout, xprocout)
+ use mpi_mod
+ implicit none
+ integer, intent(in) :: xin ! variable to reduce
+ integer, intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ integer :: ierror
+ integer, dimension(2,1) :: recvbuf, sendbuf
+ ! begin
+ sendbuf(1,1) = xin
+ sendbuf(2,1) = this_rank ! This is the processor number associated with the value x
+ call mpi_allreduce(sendbuf,recvbuf,1,MPI_2INTEGER,mpi_minloc,comm,ierror)
+ xout = recvbuf(1,1)
+ xprocout = recvbuf(2,1)
+ end subroutine parallel_reduce_minloc_integer
+
+ subroutine parallel_reduce_minloc_real4(xin, xout, xprocout)
+ use mpi_mod
+ implicit none
+ real(4), intent(in) :: xin ! variable to reduce
+ real(4), intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ integer :: ierror
+ real(4), dimension(2,1) :: recvbuf, sendbuf
+ ! begin
+ sendbuf(1,1) = xin
+ sendbuf(2,1) = this_rank ! This is the processor number associated with the value x (coerced to a real)
+ call mpi_allreduce(sendbuf,recvbuf,1,MPI_2REAL,mpi_minloc,comm,ierror)
+ xout = recvbuf(1,1)
+ xprocout = recvbuf(2,1) ! coerced back to integer
+ end subroutine parallel_reduce_minloc_real4
+
+ subroutine parallel_reduce_minloc_real8(xin, xout, xprocout)
+ use mpi_mod
+ implicit none
+ real(8), intent(in) :: xin ! variable to reduce
+ real(8), intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ integer :: ierror
+ real(8), dimension(2,1) :: recvbuf, sendbuf
+ ! begin
+ sendbuf(1,1) = xin
+ sendbuf(2,1) = this_rank ! This is the processor number associated with the value x (coerced to a real)
+ call mpi_allreduce(sendbuf,recvbuf,1,MPI_2DOUBLE_PRECISION,mpi_minloc,comm,ierror)
+ xout = recvbuf(1,1)
+ xprocout = recvbuf(2,1) ! coerced back to integer
+ end subroutine parallel_reduce_minloc_real8
+
+
+ ! Andy removed support for returnownedvector in October 2011.
+ ! subroutine parallel_set_trilinos_return_vect
+ ! Trilinos can return the full solution to each node or just the owned portion
+ ! For parallel_mpi mode only the owned portion is expected
+ ! call returnownedvector() ! in trilinosLinearSolver.cpp
+ ! end subroutine parallel_set_trilinos_return_vect
+
+ subroutine parallel_show_minmax(label,values)
+ use mpi_mod
+ implicit none
+ character(*) :: label
+ real(8),dimension(:,:,:) :: values
+
+ integer :: ierror
+ real(8) :: allmin,allmax,mymin,mymax
+ ! begin
+ mymin = minval(values(:,1+lhalo:size(values,2)-uhalo,&
+ 1+lhalo:size(values,3)-uhalo))
+ mymax = maxval(values(:,1+lhalo:size(values,2)-uhalo,&
+ 1+lhalo:size(values,3)-uhalo))
+ call mpi_reduce(mymin,allmin,1,mpi_real8,mpi_min,main_rank,comm,ierror)
+ call mpi_reduce(mymax,allmax,1,mpi_real8,mpi_max,main_rank,comm,ierror)
+ if (main_task) print *,label,allmin,allmax
+ end subroutine parallel_show_minmax
+
+ subroutine parallel_stop(file,line)
+ use mpi_mod
+ implicit none
+ integer :: line
+ character(len=*) :: file
+ integer :: ierror
+ ! begin
+ if (main_task) write(0,*) "PARALLEL STOP in ",file," at line ",line
+ call mpi_abort(MPI_COMM_WORLD, 1001, ierror)
+ stop "PARALLEL STOP"
+ end subroutine parallel_stop
+
+ function parallel_sync(ncid)
+ implicit none
+ integer :: ncid,parallel_sync
+ ! begin
+ if (main_task) parallel_sync = nf90_sync(ncid)
+ call broadcast(parallel_sync)
+ end function parallel_sync
+
+ !TODO - Remove subroutine parallel_velo_halo?
+ ! This subroutine is called only from periodic_boundaries subroutine, which is no longer used.
+
+ subroutine parallel_velo_halo(a)
+ use mpi_mod
+ implicit none
+ real(8),dimension(:,:) :: a
+
+ integer :: ierror,nrequest,erequest
+ real(8),dimension(size(a,2)-lhalo-uhalo+1) :: wsend,erecv
+ real(8),dimension(size(a,1)-lhalo) :: ssend,nrecv
+
+ ! begin
+ if (size(a,1)/=local_ewn-1.or.size(a,2)/=local_nsn-1) &
+ call parallel_stop(__FILE__,__LINE__)
+
+ if (uhalo==0) then
+ ! NOTE(wjs, 2014-10-16) I think that fixing this would involve replacing instances
+ ! of (-uhalo+1) with (-staggered_uhalo)
+ write(*,*) 'parallel_velo_halo currently does not work for uhalo=0'
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+
+ call mpi_irecv(erecv,size(erecv),mpi_real8,east,east,&
+ comm,erequest,ierror)
+ call mpi_irecv(nrecv,size(nrecv),mpi_real8,north,north,&
+ comm,nrequest,ierror)
+
+ wsend(:) = a(1+lhalo,1+lhalo:size(a,2)-uhalo+1)
+ call mpi_send(wsend,size(wsend),mpi_real8,west,this_rank,comm,ierror)
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ a(size(a,1),1+lhalo:size(a,2)-uhalo+1) = erecv(:)
+
+ ssend(:) = a(1+lhalo:,1+lhalo)
+ call mpi_send(ssend,size(ssend),mpi_real8,south,this_rank,comm,ierror)
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ a(1+lhalo:,size(a,2)) = nrecv(:)
+
+ end subroutine parallel_velo_halo
+
+
+ subroutine staggered_parallel_halo_extrapolate_integer_2d(a)
+
+ implicit none
+ integer,dimension(:,:) :: a
+ integer :: i, j
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,1)/=local_ewn-1 .or. size(a,2)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! Extrapolate the staggered field into halo cells along the global boundary.
+ ! Currently this is used only for kinbcmask.
+ ! Note: The extrapolation region includes locally owned cells along
+ ! the north and east boundaries of the global domain.
+
+ ! First update the halos so that we are sure the interior halos are correct
+ call staggered_parallel_halo(a)
+
+ ! MJH Note: Modified code to now copy entire east and west columns rather than
+ ! just the owned cells in those columns. This avoids having the halos have
+ ! potentially wrong information (i.e., a few cells in the corner don't get extrapolated into)
+
+ if (this_rank >= east) then ! at east edge of global domain
+ ! extrapolate eastward
+ do i = size(a,1)-staggered_uhalo, size(a,1)
+ a(i, :) = a(size(a,1)-staggered_uhalo-1, :)
+ enddo
+ endif
+
+ if (this_rank <= west) then ! at west edge of global domain
+ ! extrapolate westward
+ do i = 1, staggered_lhalo
+ a(i, :) = a(staggered_lhalo+1, :)
+ enddo
+ endif
+
+ if (this_rank >= north) then ! at north edge of global domain
+ ! extrapolate northward
+ do j = size(a,2)-staggered_uhalo, size(a,2)
+ a(:, j) = a(:, size(a,2)-staggered_uhalo-1)
+ enddo
+ endif
+
+ if (this_rank <= south) then ! at south edge of global domain
+ ! extrapolate southward
+ do j = 1, staggered_lhalo
+ a(:, j) = a(:, staggered_lhalo+1)
+ enddo
+ endif
+
+ end subroutine staggered_parallel_halo_extrapolate_integer_2d
+
+
+ subroutine staggered_parallel_halo_extrapolate_real8_2d(a)
+
+ implicit none
+ real(8),dimension(:,:) :: a
+ integer :: i, j
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,1)/=local_ewn-1 .or. size(a,2)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! Extrapolate the staggered field into halo cells along the global boundary.
+ ! Currently this is used only for kinbcmask.
+ ! Note: The extrapolation region includes locally owned cells along
+ ! the north and east boundaries of the global domain.
+
+ ! First update the halos so that we are sure the interior halos are correct
+ call staggered_parallel_halo(a)
+
+ ! MJH Note: Modified code to now copy entire east and west columns rather than
+ ! just the owned cells in those columns. This avoids having the halos have
+ ! potentially wrong information (i.e., a few cells in the corner don't get extrapolated into)
+
+! Useful for debugging small domains (the YYYY is just a tag for grepping the output, particularly if you prepend the processor number, e.g. "0YYYY")
+! do j = 1, size(a,2)
+! write(6, "(i3, 'YYYY BEFORE row ', i3, 1000e9.2)") this_rank, j, a(:,j)
+! enddo
+
+ if (this_rank >= east) then ! at east edge of global domain
+ ! extrapolate eastward
+ do i = size(a,1)-staggered_uhalo, size(a,1)
+ a(i, :) = a(size(a,1)-staggered_uhalo-1, :)
+ enddo
+ endif
+
+ if (this_rank <= west) then ! at west edge of global domain
+ ! extrapolate westward
+ do i = 1, staggered_lhalo
+ a(i, :) = a(staggered_lhalo+1, :)
+ enddo
+ endif
+
+ if (this_rank >= north) then ! at north edge of global domain
+ ! extrapolate northward
+ do j = size(a,2)-staggered_uhalo, size(a,2)
+ a(:, j) = a(:, size(a,2)-staggered_uhalo-1)
+ enddo
+ endif
+
+ if (this_rank <= south) then ! at south edge of global domain
+ ! extrapolate southward
+ do j = 1, staggered_lhalo
+ a(:, j) = a(:, staggered_lhalo+1)
+ enddo
+ endif
+
+! Useful for debugging small domains
+! do j = 1, size(a,2)
+! write(6, "(i3, 'YYYY AFTER row ', i3, 1000e9.2)") this_rank, j, a(:,j)
+! enddo
+ end subroutine staggered_parallel_halo_extrapolate_real8_2d
+
+
+ subroutine staggered_parallel_halo_integer_2d(a)
+ use mpi_mod
+ implicit none
+ integer,dimension(:,:) :: a
+
+ ! Implements a staggered grid halo update.
+ ! As the grid is staggered, the array 'a' is one smaller in both dimensions than an unstaggered array.
+
+ ! The grid is laid out from the SW, and the lower left corner is assigned to this_rank = 0.
+ ! It's eastern nbhr is task_id = 1, proceeding rowwise and starting from the western edge.
+ ! The South-most processes own one additional row of stagggered variables on the southern edge
+ ! and have one less 'southern' halo row than other processes. Likewise, the West-most processes own one
+ ! additional column of staggered variables on the western edge and have one less 'western' halo column.
+ ! This is implemented by a modification to the staggered_lhalo value on these processes.
+
+ !WHL - I don't think we need to say that the South-most processes "own" an additional row of
+ ! staggered variables on the southern edge. I think we can treat the southern edge as a halo row
+ ! and still enforce the various global BC we want.
+
+ ! Maintaining global boundary conditions are not addressed within this routine (yet).
+
+ ! integer :: erequest,ierror,one,nrequest,srequest,wrequest
+ integer :: ierror,nrequest,srequest,erequest,wrequest
+
+ integer,dimension(staggered_lhalo,size(a,2)-staggered_lhalo-staggered_uhalo) :: esend,wrecv
+ integer,dimension(staggered_uhalo,size(a,2)-staggered_lhalo-staggered_uhalo) :: erecv,wsend
+ integer,dimension(size(a,1),staggered_lhalo) :: nsend,srecv
+ integer,dimension(size(a,1),staggered_uhalo) :: nrecv,ssend
+
+ !WHL - I defined a logical variable to determine whether or not to fill halo cells
+ ! at the edge of the global domain. I am setting it to true by default to support
+ ! cyclic global BCs.
+ !TODO: Assume fill_global_halos is true in all cases? (Here and below)
+
+ logical :: fill_global_halos = .true.
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,1)/=local_ewn-1 .or. size(a,2)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! Prepost expected receives
+
+ if (this_rank < east .or. fill_global_halos) then
+ call mpi_irecv(erecv,size(erecv),mpi_integer,east,east,comm,erequest,ierror)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ call mpi_irecv(wrecv,size(wrecv),mpi_integer,west,west,comm,wrequest,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ call mpi_irecv(nrecv,size(nrecv),mpi_integer,north,north,comm,nrequest,ierror)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ call mpi_irecv(srecv,size(srecv),mpi_integer,south,south,comm,srequest,ierror)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+! wsend(:,1:size(a,2)-staggered_shalo-staggered_nhalo) = &
+! a(1+staggered_whalo:1+staggered_whalo+staggered_ehalo-1, &
+! 1+staggered_shalo:size(a,2)-staggered_nhalo)
+ wsend(:,1:size(a,2)-staggered_lhalo-staggered_uhalo) = &
+ a(1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1, &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo)
+ call mpi_send(wsend,size(wsend),mpi_integer,west,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < east .or. fill_global_halos) then
+! esend(:,1:size(a,2)-staggered_shalo-staggered_nhalo) = &
+! a(size(a,1)-staggered_ehalo-staggered_whalo+1:size(a,1)-staggered_ehalo, &
+! 1+staggered_shalo:size(a,2)-staggered_nhalo)
+ esend(:,1:size(a,2)-staggered_lhalo-staggered_uhalo) = &
+ a(size(a,1)-staggered_uhalo-staggered_lhalo+1:size(a,1)-staggered_uhalo, &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo)
+ call mpi_send(esend,size(esend),mpi_integer,east,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < east .or. fill_global_halos) then
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+! a(size(a,1)-staggered_ehalo+1:size(a,1), &
+! 1+staggered_shalo:size(a,2)-staggered_nhalo) = &
+! erecv(:,1:size(a,2)-staggered_shalo-staggered_nhalo)
+ a(size(a,1)-staggered_uhalo+1:size(a,1), &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo) = &
+ erecv(:,1:size(a,2)-staggered_lhalo-staggered_uhalo)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+! a(1:staggered_whalo, &
+! 1+staggered_shalo:size(a,2)-staggered_nhalo) = &
+! wrecv(:,1:size(a,2)-staggered_shalo-staggered_nhalo)
+ a(1:staggered_lhalo, &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo) = &
+ wrecv(:,1:size(a,2)-staggered_lhalo-staggered_uhalo)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ ssend(:,:) = &
+! a(:,1+staggered_shalo:1+staggered_shalo+staggered_nhalo-1)
+ a(:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_integer,south,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ nsend(:,:) = &
+! a(:,size(a,2)-staggered_nhalo-staggered_shalo+1:size(a,2)-staggered_nhalo)
+ a(:,size(a,2)-staggered_uhalo-staggered_lhalo+1:size(a,2)-staggered_uhalo)
+ call mpi_send(nsend,size(nsend),mpi_integer,north,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+! a(:,size(a,2)-staggered_nhalo+1:size(a,2)) = nrecv(:,:)
+ a(:,size(a,2)-staggered_uhalo+1:size(a,2)) = nrecv(:,:)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+! a(:,1:staggered_shalo) = srecv(:,:)
+ a(:,1:staggered_lhalo) = srecv(:,:)
+ endif
+
+ end subroutine staggered_parallel_halo_integer_2d
+
+
+ subroutine staggered_parallel_halo_integer_3d(a)
+ use mpi_mod
+ implicit none
+ integer,dimension(:,:,:) :: a
+
+ ! Implements a staggered grid halo update for a 3D field.
+ ! As the grid is staggered, the array 'a' is one smaller in both dimensions than an unstaggered array.
+ ! The vertical dimension is assumed to be the first index, i.e., a(k,i,j).
+
+ ! The grid is laid out from the SW, and the lower left corner is assigned to this_rank = 0.
+ ! It's eastern nbhr is task_id = 1, proceeding rowwise and starting from the western edge.
+ ! The South-most processes own one additional row of stagggered variables on the southern edge
+ ! and have one less 'southern' halo row than other processes. Likewise, the West-most processes own one
+ ! additional column of staggered variables on the western edge and have one less 'western' halo column.
+ ! This is implemented by a modification to the staggered_lhalo value on these processes.
+
+ ! Maintaining global boundary conditions are not addressed within this routine (yet).
+
+ ! integer :: erequest,ierror,one,nrequest,srequest,wrequest
+ integer :: ierror,nrequest,srequest,erequest,wrequest
+
+ integer,dimension(size(a,1),staggered_lhalo,size(a,3)-staggered_lhalo-staggered_uhalo) :: esend,wrecv
+ integer,dimension(size(a,1),staggered_uhalo,size(a,3)-staggered_lhalo-staggered_uhalo) :: erecv,wsend
+ integer,dimension(size(a,1),size(a,2),staggered_lhalo) :: nsend,srecv
+ integer,dimension(size(a,1),size(a,2),staggered_uhalo) :: nrecv,ssend
+
+ !WHL - I defined a logical variable to determine whether or not to fill halo cells
+ ! at the edge of the global domain. I am setting it to true by default to support
+ ! cyclic global BCs.
+
+ logical :: fill_global_halos = .true.
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,2)/=local_ewn-1.or.size(a,3)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! Prepost expected receives
+
+ if (this_rank < east .or. fill_global_halos) then
+ call mpi_irecv(erecv,size(erecv),mpi_integer,east,east,comm,erequest,ierror)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ call mpi_irecv(wrecv,size(wrecv),mpi_integer,west,west,comm,wrequest,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ call mpi_irecv(nrecv,size(nrecv),mpi_integer,north,north,comm,nrequest,ierror)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ call mpi_irecv(srecv,size(srecv),mpi_integer,south,south,comm,srequest,ierror)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ wsend(:,:,1:size(a,3)-staggered_lhalo-staggered_uhalo) = &
+ a(:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1, &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo)
+ call mpi_send(wsend,size(wsend),mpi_integer,west,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < east .or. fill_global_halos) then
+ esend(:,:,1:size(a,3)-staggered_lhalo-staggered_uhalo) = &
+ a(:,size(a,2)-staggered_uhalo-staggered_lhalo+1:size(a,2)-staggered_uhalo, &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo)
+ call mpi_send(esend,size(esend),mpi_integer,east,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < east .or. fill_global_halos) then
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ a(:,size(a,2)-staggered_uhalo+1:size(a,2), &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo) = &
+ erecv(:,:,1:size(a,3)-staggered_lhalo-staggered_uhalo)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+ a(:,1:staggered_lhalo, &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo) = &
+ wrecv(:,:,1:size(a,3)-staggered_lhalo-staggered_uhalo)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ ssend(:,:,:) = &
+ a(:,:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_integer,south,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ nsend(:,:,:) = &
+ a(:,:,size(a,3)-staggered_uhalo-staggered_lhalo+1:size(a,3)-staggered_uhalo)
+ call mpi_send(nsend,size(nsend),mpi_integer,north,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ a(:,:,size(a,3)-staggered_uhalo+1:size(a,3)) = nrecv(:,:,:)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+ a(:,:,1:staggered_lhalo) = srecv(:,:,:)
+ endif
+
+ end subroutine staggered_parallel_halo_integer_3d
+
+
+ !WHL - Edited the original subroutine so that values from N and E edges
+ ! of global domain can be written to halo cells at the S and W edges,
+ ! to allow cyclic BCs for staggered variables
+
+ subroutine staggered_parallel_halo_real8_2d(a)
+
+ use mpi_mod
+ implicit none
+ real(8),dimension(:,:) :: a
+
+ ! Implements a staggered grid halo update.
+ ! As the grid is staggered, the array 'a' is one smaller in both dimensions than an unstaggered array.
+
+ ! The grid is laid out from the SW, and the lower left corner is assigned to this_rank = 0.
+ ! It's eastern nbhr is task_id = 1, proceeding rowwise and starting from the western edge.
+ ! The South-most processes own one additional row of stagggered variables on the southern edge
+ ! and have one less 'southern' halo row than other processes. Likewise, the West-most processes own one
+ ! additional column of staggered variables on the western edge and have one less 'western' halo column.
+ ! This is implemented by a modification to the staggered_lhalo value on these processes.
+
+ ! Maintaining global boundary conditions are not addressed within this routine (yet).
+
+ ! integer :: erequest,ierror,one,nrequest,srequest,wrequest
+ integer :: ierror,nrequest,srequest,erequest,wrequest
+! real(8),dimension(staggered_whalo,size(a,2)-staggered_shalo-staggered_nhalo) :: esend,wrecv
+! real(8),dimension(staggered_ehalo,size(a,2)-staggered_shalo-staggered_nhalo) :: erecv,wsend
+! real(8),dimension(size(a,1),staggered_shalo) :: nsend,srecv
+! real(8),dimension(size(a,1),staggered_nhalo) :: nrecv,ssend
+ real(8),dimension(staggered_lhalo,size(a,2)-staggered_lhalo-staggered_uhalo) :: esend,wrecv
+ real(8),dimension(staggered_uhalo,size(a,2)-staggered_lhalo-staggered_uhalo) :: erecv,wsend
+ real(8),dimension(size(a,1),staggered_lhalo) :: nsend,srecv
+ real(8),dimension(size(a,1),staggered_uhalo) :: nrecv,ssend
+
+ !WHL - I defined a logical variable to determine whether or not to fill halo cells
+ ! at the edge of the global domain. I am setting it to true by default to support
+ ! cyclic global BCs.
+
+ logical :: fill_global_halos = .true.
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,1)/=local_ewn-1 .or. size(a,2)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! Prepost expected receives
+
+ if (this_rank < east .or. fill_global_halos) then
+ call mpi_irecv(erecv,size(erecv),mpi_real8,east,east,comm,erequest,ierror)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ call mpi_irecv(wrecv,size(wrecv),mpi_real8,west,west,comm,wrequest,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ call mpi_irecv(nrecv,size(nrecv),mpi_real8,north,north,comm,nrequest,ierror)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ call mpi_irecv(srecv,size(srecv),mpi_real8,south,south,comm,srequest,ierror)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+! wsend(:,1:size(a,2)-staggered_shalo-staggered_nhalo) = &
+! a(1+staggered_whalo:1+staggered_whalo+staggered_ehalo-1, &
+! 1+staggered_shalo:size(a,2)-staggered_nhalo)
+ wsend(:,1:size(a,2)-staggered_lhalo-staggered_uhalo) = &
+ a(1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1, &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo)
+ call mpi_send(wsend,size(wsend),mpi_real8,west,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < east .or. fill_global_halos) then
+! esend(:,1:size(a,2)-staggered_shalo-staggered_nhalo) = &
+! a(size(a,1)-staggered_ehalo-staggered_whalo+1:size(a,1)-staggered_ehalo, &
+! 1+staggered_shalo:size(a,2)-staggered_nhalo)
+ esend(:,1:size(a,2)-staggered_lhalo-staggered_uhalo) = &
+ a(size(a,1)-staggered_uhalo-staggered_lhalo+1:size(a,1)-staggered_uhalo, &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo)
+ call mpi_send(esend,size(esend),mpi_real8,east,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < east .or. fill_global_halos) then
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+! a(size(a,1)-staggered_ehalo+1:size(a,1), &
+! 1+staggered_shalo:size(a,2)-staggered_nhalo) = &
+! erecv(:,1:size(a,2)-staggered_shalo-staggered_nhalo)
+ a(size(a,1)-staggered_uhalo+1:size(a,1), &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo) = &
+ erecv(:,1:size(a,2)-staggered_lhalo-staggered_uhalo)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+! a(1:staggered_whalo, &
+! 1+staggered_shalo:size(a,2)-staggered_nhalo) = &
+! wrecv(:,1:size(a,2)-staggered_shalo-staggered_nhalo)
+ a(1:staggered_lhalo, &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo) = &
+ wrecv(:,1:size(a,2)-staggered_lhalo-staggered_uhalo)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ ssend(:,:) = &
+! a(:,1+staggered_shalo:1+staggered_shalo+staggered_nhalo-1)
+ a(:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_real8,south,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ nsend(:,:) = &
+! a(:,size(a,2)-staggered_nhalo-staggered_shalo+1:size(a,2)-staggered_nhalo)
+ a(:,size(a,2)-staggered_uhalo-staggered_lhalo+1:size(a,2)-staggered_uhalo)
+ call mpi_send(nsend,size(nsend),mpi_real8,north,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+! a(:,size(a,2)-staggered_nhalo+1:size(a,2)) = nrecv(:,:)
+ a(:,size(a,2)-staggered_uhalo+1:size(a,2)) = nrecv(:,:)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+! a(:,1:staggered_shalo) = srecv(:,:)
+ a(:,1:staggered_lhalo) = srecv(:,:)
+ endif
+
+ end subroutine staggered_parallel_halo_real8_2d
+
+ !WHL - Edited the original subroutine so that values from N and E edges
+ ! of global domain can be written to halo cells at the S and W edges,
+ ! to allow cyclic BCs for staggered variables
+
+ subroutine staggered_parallel_halo_real8_3d(a)
+
+ use mpi_mod
+ implicit none
+ real(8),dimension(:,:,:) :: a
+
+ ! Implements a staggered grid halo update for a 3D field.
+ ! As the grid is staggered, the array 'a' is one smaller in both dimensions than an unstaggered array.
+ ! The vertical dimension is assumed to be the first index, i.e., a(k,i,j).
+
+ ! The grid is laid out from the SW, and the lower left corner is assigned to this_rank = 0.
+ ! It's eastern nbhr is task_id = 1, proceeding rowwise and starting from the western edge.
+ ! The South-most processes own one additional row of stagggered variables on the southern edge
+ ! and have one less 'southern' halo row than other processes. Likewise, the West-most processes own one
+ ! additional column of staggered variables on the western edge and have one less 'western' halo column.
+ ! This is implemented by a modification to the staggered_lhalo value on these processes.
+
+ ! Maintaining global boundary conditions are not addressed within this routine (yet).
+
+ ! integer :: erequest,ierror,one,nrequest,srequest,wrequest
+ integer :: ierror,nrequest,srequest,erequest,wrequest
+
+! real(8),dimension(size(a,1),staggered_whalo,size(a,3)-staggered_shalo-staggered_nhalo) :: esend,wrecv
+! real(8),dimension(size(a,1),staggered_ehalo,size(a,3)-staggered_shalo-staggered_nhalo) :: erecv,wsend
+! real(8),dimension(size(a,1),size(a,2),staggered_shalo) :: nsend,srecv
+! real(8),dimension(size(a,1),size(a,2),staggered_nhalo) :: nrecv,ssend
+ real(8),dimension(size(a,1),staggered_lhalo,size(a,3)-staggered_lhalo-staggered_uhalo) :: esend,wrecv
+ real(8),dimension(size(a,1),staggered_uhalo,size(a,3)-staggered_lhalo-staggered_uhalo) :: erecv,wsend
+ real(8),dimension(size(a,1),size(a,2),staggered_lhalo) :: nsend,srecv
+ real(8),dimension(size(a,1),size(a,2),staggered_uhalo) :: nrecv,ssend
+
+ !WHL - I defined a logical variable to determine whether or not to fill halo cells
+ ! at the edge of the global domain. I am setting it to true by default to support
+ ! cyclic global BCs.
+
+ logical :: fill_global_halos = .true.
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,2)/=local_ewn-1 .or. size(a,3)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! Prepost expected receives
+
+ if (this_rank < east .or. fill_global_halos) then
+ call mpi_irecv(erecv,size(erecv),mpi_real8,east,east,comm,erequest,ierror)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ call mpi_irecv(wrecv,size(wrecv),mpi_real8,west,west,comm,wrequest,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ call mpi_irecv(nrecv,size(nrecv),mpi_real8,north,north,comm,nrequest,ierror)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ call mpi_irecv(srecv,size(srecv),mpi_real8,south,south,comm,srequest,ierror)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+! wsend(:,:,1:size(a,3)-staggered_shalo-staggered_nhalo) = &
+! a(:,1+staggered_whalo:1+staggered_whalo+staggered_ehalo-1, &
+! 1+staggered_shalo:size(a,3)-staggered_nhalo)
+ wsend(:,:,1:size(a,3)-staggered_lhalo-staggered_uhalo) = &
+ a(:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1, &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo)
+ call mpi_send(wsend,size(wsend),mpi_real8,west,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < east .or. fill_global_halos) then
+! esend(:,:,1:size(a,3)-staggered_shalo-staggered_nhalo) = &
+! a(:,size(a,2)-staggered_ehalo-staggered_whalo+1:size(a,2)-staggered_ehalo, &
+! 1+staggered_shalo:size(a,3)-staggered_nhalo)
+ esend(:,:,1:size(a,3)-staggered_lhalo-staggered_uhalo) = &
+ a(:,size(a,2)-staggered_uhalo-staggered_lhalo+1:size(a,2)-staggered_uhalo, &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo)
+ call mpi_send(esend,size(esend),mpi_real8,east,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < east .or. fill_global_halos) then
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+! a(:,size(a,2)-staggered_ehalo+1:size(a,2), &
+! 1+staggered_shalo:size(a,3)-staggered_nhalo) = &
+! erecv(:,:,1:size(a,3)-staggered_shalo-staggered_nhalo)
+ a(:,size(a,2)-staggered_uhalo+1:size(a,2), &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo) = &
+ erecv(:,:,1:size(a,3)-staggered_lhalo-staggered_uhalo)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+! a(:,1:staggered_whalo, &
+! 1+staggered_shalo:size(a,3)-staggered_nhalo) = &
+! wrecv(:,:,1:size(a,3)-staggered_shalo-staggered_nhalo)
+ a(:,1:staggered_lhalo, &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo) = &
+ wrecv(:,:,1:size(a,3)-staggered_lhalo-staggered_uhalo)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+! ssend(:,:,:) = &
+! a(:,:,1+staggered_shalo:1+staggered_shalo+staggered_nhalo-1)
+ ssend(:,:,:) = &
+ a(:,:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_real8,south,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ nsend(:,:,:) = &
+! a(:,:,size(a,3)-staggered_nhalo-staggered_shalo+1:size(a,3)-staggered_nhalo)
+ a(:,:,size(a,3)-staggered_uhalo-staggered_lhalo+1:size(a,3)-staggered_uhalo)
+ call mpi_send(nsend,size(nsend),mpi_real8,north,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+! a(:,:,size(a,3)-staggered_nhalo+1:size(a,3)) = nrecv(:,:,:)
+ a(:,:,size(a,3)-staggered_uhalo+1:size(a,3)) = nrecv(:,:,:)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+! a(:,:,1:staggered_shalo) = srecv(:,:,:)
+ a(:,:,1:staggered_lhalo) = srecv(:,:,:)
+ endif
+
+ end subroutine staggered_parallel_halo_real8_3d
+
+!WHL - New subroutine for 4D arrays.
+ subroutine staggered_parallel_halo_real8_4d(a)
+
+ use mpi_mod
+ implicit none
+ real(8),dimension(:,:,:,:) :: a
+
+ ! Implements a staggered grid halo update for a 4D field.
+ ! This subroutine is used for the 4D arrays that hold matrix entries.
+
+ ! As the grid is staggered, the array 'a' is one smaller in x and y dimensions than an unstaggered array.
+ ! The vertical dimension is assumed to precede the i and j indices, i.e., a(:,k,i,j).
+ ! The first dimension holds matrix elements for a single row.
+
+ ! The grid is laid out from the SW, and the lower left corner is assigned to this_rank = 0.
+ ! It's eastern neighbor is task_id = 1, proceeding rowwise and starting from the western edge.
+ ! The South-most processes own one additional row of stagggered variables on the southern edge
+ ! and have one less 'southern' halo row than other processes. Likewise, the West-most processes own one
+ ! additional column of staggered variables on the western edge and have one less 'western' halo column.
+ ! This is implemented by a modification to the staggered_lhalo value on these processes.
+
+ ! Maintaining global boundary conditions are not addressed within this routine (yet).
+
+ ! integer :: erequest,ierror,one,nrequest,srequest,wrequest
+ integer :: ierror,nrequest,srequest,erequest,wrequest
+
+ real(8),dimension(size(a,1),size(a,2), &
+ staggered_lhalo,size(a,4)-staggered_lhalo-staggered_uhalo) :: esend,wrecv
+ real(8),dimension(size(a,1),size(a,2), &
+ staggered_uhalo,size(a,4)-staggered_lhalo-staggered_uhalo) :: erecv,wsend
+ real(8),dimension(size(a,1),size(a,2),size(a,3),staggered_lhalo) :: nsend,srecv
+ real(8),dimension(size(a,1),size(a,2),size(a,3),staggered_uhalo) :: nrecv,ssend
+
+ !WHL - I defined a logical variable to determine whether or not to fill halo cells
+ ! at the edge of the global domain. I am setting it to true by default to support
+ ! cyclic global BCs.
+
+ logical :: fill_global_halos = .true.
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,3)/=local_ewn-1 .or. size(a,4)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! Prepost expected receives
+
+ if (this_rank < east .or. fill_global_halos) then
+ call mpi_irecv(erecv,size(erecv),mpi_real8,east,east,comm,erequest,ierror)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ call mpi_irecv(wrecv,size(wrecv),mpi_real8,west,west,comm,wrequest,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ call mpi_irecv(nrecv,size(nrecv),mpi_real8,north,north,comm,nrequest,ierror)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ call mpi_irecv(srecv,size(srecv),mpi_real8,south,south,comm,srequest,ierror)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ wsend(:,:,:,1:size(a,4)-staggered_lhalo-staggered_uhalo) = &
+ a(:,:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1, &
+ 1+staggered_lhalo:size(a,4)-staggered_uhalo)
+ call mpi_send(wsend,size(wsend),mpi_real8,west,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < east .or. fill_global_halos) then
+ esend(:,:,:,1:size(a,4)-staggered_lhalo-staggered_uhalo) = &
+ a(:,:,size(a,3)-staggered_uhalo-staggered_lhalo+1:size(a,3)-staggered_uhalo, &
+ 1+staggered_lhalo:size(a,4)-staggered_uhalo)
+ call mpi_send(esend,size(esend),mpi_real8,east,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < east .or. fill_global_halos) then
+ call mpi_wait(erequest,mpi_status_ignore,ierror)
+ a(:,:,size(a,3)-staggered_uhalo+1:size(a,3), &
+ 1+staggered_lhalo:size(a,4)-staggered_uhalo) = &
+ erecv(:,:,:,1:size(a,4)-staggered_lhalo-staggered_uhalo)
+ endif
+
+ if (this_rank > west .or. fill_global_halos) then
+ call mpi_wait(wrequest,mpi_status_ignore,ierror)
+ a(:,:,1:staggered_lhalo, &
+ 1+staggered_lhalo:size(a,4)-staggered_uhalo) = &
+ wrecv(:,:,:,1:size(a,4)-staggered_lhalo-staggered_uhalo)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ ssend(:,:,:,:) = &
+ a(:,:,:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1)
+ call mpi_send(ssend,size(ssend),mpi_real8,south,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ nsend(:,:,:,:) = &
+ a(:,:,:,size(a,4)-staggered_uhalo-staggered_lhalo+1:size(a,4)-staggered_uhalo)
+ call mpi_send(nsend,size(nsend),mpi_real8,north,this_rank,comm,ierror)
+ endif
+
+ if (this_rank < north .or. fill_global_halos) then
+ call mpi_wait(nrequest,mpi_status_ignore,ierror)
+ a(:,:,:,size(a,4)-staggered_uhalo+1:size(a,4)) = nrecv(:,:,:,:)
+ endif
+
+ if (this_rank > south .or. fill_global_halos) then
+ call mpi_wait(srequest,mpi_status_ignore,ierror)
+ a(:,:,:,1:staggered_lhalo) = srecv(:,:,:,:)
+ endif
+
+ end subroutine staggered_parallel_halo_real8_4d
+
+! Following routines imported from the Community Earth System Model
+! (models/utils/mct/mpeu.m_FcComms.F90)
+!BOP -------------------------------------------------------------------
+!
+! !IROUTINE: fc_gather_int - Gather an array of type integer
+!
+! !DESCRIPTION:
+! This routine gathers a {\em distributed} array of type {\em integer}
+! to the {\tt root} process. Explicit handshaking messages are used
+! to control the number of processes communicating with the root
+! at any one time.
+!
+! If flow_cntl optional parameter
+! < 0 : use MPI_Gather
+! >= 0: use point-to-point with handshaking messages and
+! preposting receive requests up to
+! max(min(1,flow_cntl),max_gather_block_size)
+! ahead if optional flow_cntl parameter is present.
+! Otherwise, fc_gather_flow_cntl is used in its place.
+! Default value is max_gather_block_size.
+! !INTERFACE:
+!
+ subroutine fc_gather_int (sendbuf, sendcnt, sendtype, &
+ recvbuf, recvcnt, recvtype, &
+ root, comm, flow_cntl )
+!
+! !USES:
+!
+ use mpi_mod
+
+!
+! !INPUT PARAMETERS:
+!
+ integer, intent(in) :: sendbuf(*)
+ integer, intent(in) :: sendcnt
+ integer, intent(in) :: sendtype
+ integer, intent(in) :: recvcnt
+ integer, intent(in) :: recvtype
+ integer, intent(in) :: root
+ integer, intent(in) :: comm
+ integer, optional, intent(in) :: flow_cntl
+
+! !OUTPUT PARAMETERS:
+!
+ integer, intent(out) :: recvbuf(*)
+
+!EOP ___________________________________________________________________
+
+ integer :: signal
+ logical :: fc_gather ! use explicit flow control?
+ integer :: gather_block_size ! number of preposted receive requests
+
+ integer :: mytid, mysize, mtag, p, i, count, displs
+ integer :: preposts, head, tail
+ integer :: rcvid(max_gather_block_size)
+ integer :: status(MPI_STATUS_SIZE)
+ integer :: ier ! MPI error code
+
+ signal = 1
+ if ( present(flow_cntl) ) then
+ if (flow_cntl >= 0) then
+ gather_block_size = min(max(1,flow_cntl),max_gather_block_size)
+ fc_gather = .true.
+ else
+ fc_gather = .false.
+ endif
+ else
+ gather_block_size = max(1,max_gather_block_size)
+ fc_gather = .true.
+ endif
+
+ if (fc_gather) then
+
+ call mpi_comm_rank (comm, mytid, ier)
+ call mpi_comm_size (comm, mysize, ier)
+ mtag = 0
+ if (root .eq. mytid) then
+
+ ! prepost gather_block_size irecvs, and start receiving data
+ preposts = min(mysize-1, gather_block_size)
+ head = 0
+ count = 0
+ do p=0, mysize-1
+ if (p .ne. root) then
+ if (recvcnt > 0) then
+ count = count + 1
+ if (count > preposts) then
+ tail = mod(head,preposts) + 1
+ call mpi_wait (rcvid(tail), status, ier)
+ end if
+ head = mod(head,preposts) + 1
+ displs = p*recvcnt
+ call mpi_irecv ( recvbuf(displs+1), recvcnt, &
+ recvtype, p, mtag, comm, rcvid(head), &
+ ier )
+ call mpi_send ( signal, 1, recvtype, p, mtag, comm, ier )
+ end if
+ end if
+ end do
+
+ ! copy local data
+ displs = mytid*recvcnt
+ do i=1,sendcnt
+ recvbuf(displs+i) = sendbuf(i)
+ enddo
+
+ ! wait for final data
+ do i=1,min(count,preposts)
+ call mpi_wait (rcvid(i), status, ier)
+ enddo
+
+ else
+
+ if (sendcnt > 0) then
+ call mpi_recv ( signal, 1, sendtype, root, mtag, comm, &
+ status, ier )
+ call mpi_send ( sendbuf, sendcnt, sendtype, root, mtag, &
+ comm, ier )
+ end if
+
+ endif
+
+ else
+
+ call mpi_gather (sendbuf, sendcnt, sendtype, &
+ recvbuf, recvcnt, recvtype, &
+ root, comm, ier)
+ endif
+
+ return
+ end subroutine fc_gather_int
+
+!BOP -------------------------------------------------------------------
+!
+! !IROUTINE: fc_gatherv_int - Gather an array of type integer
+!
+! !DESCRIPTION:
+! This routine gathers a {\em distributed} array of type {\em integer}
+! to the {\tt root} process. Explicit handshaking messages are uesd
+! to control the number of processes communicating with the root
+! at any one time.
+!
+! If flow_cntl optional parameter
+! < 0 : use MPI_Gatherv
+! >= 0: use point-to-point with handshaking messages and
+! preposting receive requests up to
+! max(min(1,flow_cntl),max_gather_block_size)
+! ahead if optional flow_cntl parameter is present.
+! Otherwise, fc_gather_flow_cntl is used in its place.
+! Default value is max_gather_block_size.
+! !INTERFACE:
+!
+ subroutine fc_gatherv_int (sendbuf, sendcnt, sendtype, &
+ recvbuf, recvcnts, displs, recvtype, &
+ root, comm, flow_cntl )
+!
+! !USES:
+!
+ use mpi_mod
+
+!
+! !INPUT PARAMETERS:
+!
+ integer, intent(in) :: sendbuf(*)
+ integer, intent(in) :: sendcnt
+ integer, intent(in) :: sendtype
+ integer, dimension(:), intent(in) :: recvcnts
+ integer, dimension(:), intent(in) :: displs
+ integer, intent(in) :: recvtype
+ integer, intent(in) :: root
+ integer, intent(in) :: comm
+ integer, optional, intent(in) :: flow_cntl
+
+! !OUTPUT PARAMETERS:
+!
+ integer, intent(out) :: recvbuf(*)
+
+!EOP ___________________________________________________________________
+
+ integer :: signal
+ logical :: fc_gather ! use explicit flow control?
+ integer :: gather_block_size ! number of preposted receive requests
+
+ integer :: mytid, mysize, mtag, p, q, i, count
+ integer :: preposts, head, tail
+ integer :: rcvid(max_gather_block_size)
+ integer :: status(MPI_STATUS_SIZE)
+ integer :: ier ! MPI error code
+
+ signal = 1
+ if ( present(flow_cntl) ) then
+ if (flow_cntl >= 0) then
+ gather_block_size = min(max(1,flow_cntl),max_gather_block_size)
+ fc_gather = .true.
+ else
+ fc_gather = .false.
+ endif
+ else
+ gather_block_size = max(1,max_gather_block_size)
+ fc_gather = .true.
+ endif
+
+ if (fc_gather) then
+
+ call mpi_comm_rank (comm, mytid, ier)
+ call mpi_comm_size (comm, mysize, ier)
+ mtag = 0
+ if (root .eq. mytid) then
+
+ ! prepost gather_block_size irecvs, and start receiving data
+ preposts = min(mysize-1, gather_block_size)
+ head = 0
+ count = 0
+ do p=0, mysize-1
+ if (p .ne. root) then
+ q = p+1
+ if (recvcnts(q) > 0) then
+ count = count + 1
+ if (count > preposts) then
+ tail = mod(head,preposts) + 1
+ call mpi_wait (rcvid(tail), status, ier)
+ end if
+ head = mod(head,preposts) + 1
+ call mpi_irecv ( recvbuf(displs(q)+1), recvcnts(q), &
+ recvtype, p, mtag, comm, rcvid(head), &
+ ier )
+ call mpi_send ( signal, 1, recvtype, p, mtag, comm, ier )
+ end if
+ end if
+ end do
+
+ ! copy local data
+ q = mytid+1
+ do i=1,sendcnt
+ recvbuf(displs(q)+i) = sendbuf(i)
+ enddo
+
+ ! wait for final data
+ do i=1,min(count,preposts)
+ call mpi_wait (rcvid(i), status, ier)
+ enddo
+
+ else
+
+ if (sendcnt > 0) then
+ call mpi_recv ( signal, 1, sendtype, root, mtag, comm, &
+ status, ier )
+ call mpi_send ( sendbuf, sendcnt, sendtype, root, mtag, &
+ comm, ier )
+ end if
+
+ endif
+
+ else
+
+ call mpi_gatherv (sendbuf, sendcnt, sendtype, &
+ recvbuf, recvcnts, displs, recvtype, &
+ root, comm, ier)
+
+ endif
+
+ return
+
+ end subroutine fc_gatherv_int
+
+!BOP -------------------------------------------------------------------
+!
+! !IROUTINE: fc_gatherv_real4 - Gather an array of type real*4
+!
+! !DESCRIPTION:
+! This routine gathers a {\em distributed} array of type {\em real*4} to
+! the {\tt root} process. Explicit handshaking messages are uesd
+! to control the number of processes communicating with the root
+! at any one time.
+!
+! If flow_cntl optional parameter
+! < 0 : use MPI_Gatherv
+! >= 0: use point-to-point with handshaking messages and
+! preposting receive requests up to
+! max(min(1,flow_cntl),max_gather_block_size)
+! ahead if optional flow_cntl parameter is present.
+! Otherwise, fc_gather_flow_cntl is used in its place.
+! Default value is max_gather_block_size.
+! !INTERFACE:
+!
+ subroutine fc_gatherv_real4 (sendbuf, sendcnt, sendtype, &
+ recvbuf, recvcnts, displs, recvtype, &
+ root, comm, flow_cntl )
+!
+! !USES:
+!
+ use mpi_mod
+
+!
+! !INPUT PARAMETERS:
+!
+ real(4), intent(in) :: sendbuf(*)
+ integer, intent(in) :: sendcnt
+ integer, intent(in) :: sendtype
+ integer, dimension(:), intent(in) :: recvcnts
+ integer, dimension(:), intent(in) :: displs
+ integer, intent(in) :: recvtype
+ integer, intent(in) :: root
+ integer, intent(in) :: comm
+ integer, optional, intent(in) :: flow_cntl
+
+! !OUTPUT PARAMETERS:
+!
+ real(4), intent(out) :: recvbuf(*)
+
+!EOP ___________________________________________________________________
+
+ real(4) :: signal
+ logical :: fc_gather ! use explicit flow control?
+ integer :: gather_block_size ! number of preposted receive requests
+
+ integer :: mytid, mysize, mtag, p, q, i, count
+ integer :: preposts, head, tail
+ integer :: rcvid(max_gather_block_size)
+ integer :: status(MPI_STATUS_SIZE)
+ integer :: ier ! MPI error code
+
+ signal = 1.0
+ if ( present(flow_cntl) ) then
+ if (flow_cntl >= 0) then
+ gather_block_size = min(max(1,flow_cntl),max_gather_block_size)
+ fc_gather = .true.
+ else
+ fc_gather = .false.
+ endif
+ else
+ gather_block_size = max(1,max_gather_block_size)
+ fc_gather = .true.
+ endif
+
+ if (fc_gather) then
+
+ call mpi_comm_rank (comm, mytid, ier)
+ call mpi_comm_size (comm, mysize, ier)
+ mtag = 0
+ if (root .eq. mytid) then
+
+ ! prepost gather_block_size irecvs, and start receiving data
+ preposts = min(mysize-1, gather_block_size)
+ head = 0
+ count = 0
+ do p=0, mysize-1
+ if (p .ne. root) then
+ q = p+1
+ if (recvcnts(q) > 0) then
+ count = count + 1
+ if (count > preposts) then
+ tail = mod(head,preposts) + 1
+ call mpi_wait (rcvid(tail), status, ier)
+ end if
+ head = mod(head,preposts) + 1
+ call mpi_irecv ( recvbuf(displs(q)+1), recvcnts(q), &
+ recvtype, p, mtag, comm, rcvid(head), &
+ ier )
+ call mpi_send ( signal, 1, recvtype, p, mtag, comm, ier )
+ end if
+ end if
+ end do
+
+ ! copy local data
+ q = mytid+1
+ do i=1,sendcnt
+ recvbuf(displs(q)+i) = sendbuf(i)
+ enddo
+
+ ! wait for final data
+ do i=1,min(count,preposts)
+ call mpi_wait (rcvid(i), status, ier)
+ enddo
+
+ else
+
+ if (sendcnt > 0) then
+ call mpi_recv ( signal, 1, sendtype, root, mtag, comm, &
+ status, ier )
+ call mpi_send ( sendbuf, sendcnt, sendtype, root, mtag, &
+ comm, ier )
+ end if
+
+ endif
+
+ else
+
+ call mpi_gatherv (sendbuf, sendcnt, sendtype, &
+ recvbuf, recvcnts, displs, recvtype, &
+ root, comm, ier)
+
+ endif
+
+ return
+
+ end subroutine fc_gatherv_real4
+
+!BOP -------------------------------------------------------------------
+!
+! !IROUTINE: fc_gatherv_real8 - Gather an array of type real*4
+!
+! !DESCRIPTION:
+! This routine gathers a {\em distributed} array of type {\em real*8} to
+! the {\tt root} process. Explicit handshaking messages are uesd
+! to control the number of processes communicating with the root
+! at any one time.
+!
+! If flow_cntl optional parameter
+! < 0 : use MPI_Gatherv
+! >= 0: use point-to-point with handshaking messages and
+! preposting receive requests up to
+! max(min(1,flow_cntl),max_gather_block_size)
+! ahead if optional flow_cntl parameter is present.
+! Otherwise, fc_gather_flow_cntl is used in its place.
+! Default value is max_gather_block_size.
+! !INTERFACE:
+!
+ subroutine fc_gatherv_real8 (sendbuf, sendcnt, sendtype, &
+ recvbuf, recvcnts, displs, recvtype, &
+ root, comm, flow_cntl )
+!
+! !USES:
+!
+ use mpi_mod
+
+!
+! !INPUT PARAMETERS:
+!
+ real(8), intent(in) :: sendbuf(*)
+ integer, intent(in) :: sendcnt
+ integer, intent(in) :: sendtype
+ integer, dimension(:), intent(in) :: recvcnts
+ integer, dimension(:), intent(in) :: displs
+ integer, intent(in) :: recvtype
+ integer, intent(in) :: root
+ integer, intent(in) :: comm
+ integer, optional, intent(in) :: flow_cntl
+
+! !OUTPUT PARAMETERS:
+!
+ real(8), intent(out) :: recvbuf(*)
+
+!EOP ___________________________________________________________________
+
+ real(8) :: signal
+ logical :: fc_gather ! use explicit flow control?
+ integer :: gather_block_size ! number of preposted receive requests
+
+ integer :: mytid, mysize, mtag, p, q, i, count
+ integer :: preposts, head, tail
+ integer :: rcvid(max_gather_block_size)
+ integer :: status(MPI_STATUS_SIZE)
+ integer :: ier ! MPI error code
+
+ signal = 1.0
+ if ( present(flow_cntl) ) then
+ if (flow_cntl >= 0) then
+ gather_block_size = min(max(1,flow_cntl),max_gather_block_size)
+ fc_gather = .true.
+ else
+ fc_gather = .false.
+ endif
+ else
+ gather_block_size = max(1,max_gather_block_size)
+ fc_gather = .true.
+ endif
+
+ if (fc_gather) then
+
+ call mpi_comm_rank (comm, mytid, ier)
+ call mpi_comm_size (comm, mysize, ier)
+ mtag = 0
+ if (root .eq. mytid) then
+
+ ! prepost gather_block_size irecvs, and start receiving data
+ preposts = min(mysize-1, gather_block_size)
+ head = 0
+ count = 0
+ do p=0, mysize-1
+ if (p .ne. root) then
+ q = p+1
+ if (recvcnts(q) > 0) then
+ count = count + 1
+ if (count > preposts) then
+ tail = mod(head,preposts) + 1
+ call mpi_wait (rcvid(tail), status, ier)
+ end if
+ head = mod(head,preposts) + 1
+ call mpi_irecv ( recvbuf(displs(q)+1), recvcnts(q), &
+ recvtype, p, mtag, comm, rcvid(head), &
+ ier )
+ call mpi_send ( signal, 1, recvtype, p, mtag, comm, ier )
+ end if
+ end if
+ end do
+
+ ! copy local data
+ q = mytid+1
+ do i=1,sendcnt
+ recvbuf(displs(q)+i) = sendbuf(i)
+ enddo
+
+ ! wait for final data
+ do i=1,min(count,preposts)
+ call mpi_wait (rcvid(i), status, ier)
+ enddo
+
+ else
+
+ if (sendcnt > 0) then
+ call mpi_recv ( signal, 1, sendtype, root, mtag, comm, &
+ status, ier )
+ call mpi_send ( sendbuf, sendcnt, sendtype, root, mtag, &
+ comm, ier )
+ end if
+
+ endif
+
+ else
+
+ call mpi_gatherv (sendbuf, sendcnt, sendtype, &
+ recvbuf, recvcnts, displs, recvtype, &
+ root, comm, ier)
+
+ endif
+
+ return
+
+ end subroutine fc_gatherv_real8
+
+!BOP -------------------------------------------------------------------
+!
+! !IROUTINE: fc_gatherv_log - Gather an array of type logical
+!
+! !DESCRIPTION:
+! This routine gathers a {\em distributed} array of type {\em logical}
+! to the {\tt root} process. Explicit handshaking messages are uesd
+! to control the number of processes communicating with the root
+! at any one time.
+!
+! If flow_cntl optional parameter
+! < 0 : use MPI_Gatherv
+! >= 0: use point-to-point with handshaking messages and
+! preposting receive requests up to
+! max(min(1,flow_cntl),max_gather_block_size)
+! ahead if optional flow_cntl parameter is present.
+! Otherwise, fc_gather_flow_cntl is used in its place.
+! Default value is max_gather_block_size.
+! !INTERFACE:
+!
+ subroutine fc_gatherv_log (sendbuf, sendcnt, sendtype, &
+ recvbuf, recvcnts, displs, recvtype, &
+ root, comm, flow_cntl )
+!
+! !USES:
+!
+ use mpi_mod
+
+!
+! !INPUT PARAMETERS:
+!
+ logical, intent(in) :: sendbuf(*)
+ integer, intent(in) :: sendcnt
+ integer, intent(in) :: sendtype
+ integer, dimension(:), intent(in) :: recvcnts
+ integer, dimension(:), intent(in) :: displs
+ integer, intent(in) :: recvtype
+ integer, intent(in) :: root
+ integer, intent(in) :: comm
+ integer, optional, intent(in) :: flow_cntl
+
+! !OUTPUT PARAMETERS:
+!
+ logical, intent(out) :: recvbuf(*)
+
+!EOP ___________________________________________________________________
+
+ logical :: signal
+ logical :: fc_gather ! use explicit flow control?
+ integer :: gather_block_size ! number of preposted receive requests
+
+ integer :: mytid, mysize, mtag, p, q, i, count
+ integer :: preposts, head, tail
+ integer :: rcvid(max_gather_block_size)
+ integer :: status(MPI_STATUS_SIZE)
+ integer :: ier ! MPI error code
+
+ signal = .true.
+ if ( present(flow_cntl) ) then
+ if (flow_cntl >= 0) then
+ gather_block_size = min(max(1,flow_cntl),max_gather_block_size)
+ fc_gather = .true.
+ else
+ fc_gather = .false.
+ endif
+ else
+ gather_block_size = max(1,max_gather_block_size)
+ fc_gather = .true.
+ endif
+
+ if (fc_gather) then
+
+ call mpi_comm_rank (comm, mytid, ier)
+ call mpi_comm_size (comm, mysize, ier)
+ mtag = 0
+ if (root .eq. mytid) then
+
+ ! prepost gather_block_size irecvs, and start receiving data
+ preposts = min(mysize-1, gather_block_size)
+ head = 0
+ count = 0
+ do p=0, mysize-1
+ if (p .ne. root) then
+ q = p+1
+ if (recvcnts(q) > 0) then
+ count = count + 1
+ if (count > preposts) then
+ tail = mod(head,preposts) + 1
+ call mpi_wait (rcvid(tail), status, ier)
+ end if
+ head = mod(head,preposts) + 1
+ call mpi_irecv ( recvbuf(displs(q)+1), recvcnts(q), &
+ recvtype, p, mtag, comm, rcvid(head), &
+ ier )
+ call mpi_send ( signal, 1, recvtype, p, mtag, comm, ier )
+ end if
+ end if
+ end do
+
+ ! copy local data
+ q = mytid+1
+ do i=1,sendcnt
+ recvbuf(displs(q)+i) = sendbuf(i)
+ enddo
+
+ ! wait for final data
+ do i=1,min(count,preposts)
+ call mpi_wait (rcvid(i), status, ier)
+ enddo
+
+ else
+
+ if (sendcnt > 0) then
+ call mpi_recv ( signal, 1, sendtype, root, mtag, comm, &
+ status, ier )
+ call mpi_send ( sendbuf, sendcnt, sendtype, root, mtag, &
+ comm, ier )
+ end if
+
+ endif
+
+ else
+
+ call mpi_gatherv (sendbuf, sendcnt, sendtype, &
+ recvbuf, recvcnts, displs, recvtype, &
+ root, comm, ier)
+
+ endif
+
+ return
+
+ end subroutine fc_gatherv_log
+
+end module parallel
diff --git a/components/cism/glimmer-cism/libglimmer/parallel_slap.F90 b/components/cism/glimmer-cism/libglimmer/parallel_slap.F90
new file mode 100644
index 0000000000..445700fb31
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/parallel_slap.F90
@@ -0,0 +1,2699 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! parallel_slap.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+module parallel
+
+ use netcdf
+ implicit none
+
+ !NOTE: The glam/glissade dycore currently requires nhalo = 2,
+ ! whereas the glide dycore requires nhalo = 0.
+ ! For glide simulations, we set nhalo = 0 by calling distributed_grid
+ ! with optional argument nhalo = 0.
+
+ integer, save :: nhalo = 2
+
+ !TODO - Define lhalo and uhalo in terms of nhalo.
+
+ integer, save :: lhalo = 2
+ integer, save :: uhalo = 2
+
+ integer, save :: staggered_lhalo = 2
+ integer, save :: staggered_uhalo = 1
+
+#ifdef _USE_MPI_WITH_SLAP
+ logical,save :: main_task
+ integer,save :: this_rank
+ integer,save :: tasks
+ integer,save :: comm
+#else
+ logical,parameter :: main_task = .true.
+ integer,parameter :: this_rank = 0
+ integer,parameter :: tasks = 1
+#endif
+
+ ! distributed grid
+ integer,save :: global_ewn,global_nsn,local_ewn,local_nsn,own_ewn,own_nsn
+ integer,save :: global_col_offset, global_row_offset
+
+ integer,save :: ewlb,ewub,nslb,nsub
+ integer,save :: east,north,south,west
+
+ ! global boundary conditions
+ logical,save :: periodic_bc ! doubly periodic
+ logical,save :: outflow_bc ! if true, set scalars in global halo to zero
+ ! does not apply to staggered variables (e.g., uvel, vvel)
+
+ ! global IDs
+ integer,parameter :: ProcsEW = 1
+
+ !TODO - Remove these gathered_* declarations. No longer used.
+
+ ! JEFF Declarations for undistributed variables on main_task.
+ ! Later move to separate module? These are only temporary until code is completely distributed.
+ real(8),dimension(:,:,:),allocatable :: gathered_efvs ! Output var from glam_velo_fordsiapstr(), used often
+ real(8),dimension(:,:,:),allocatable :: gathered_efvs2 ! Variable for testing that scatter/gather are inverses
+ real(8),dimension(:,:,:),allocatable :: gathered_uvel ! Output var from glam_velo_fordsiapstr(), used often
+ real(8),dimension(:,:,:),allocatable :: gathered_vvel ! Output var from glam_velo_fordsiapstr(), used often
+ real(8),dimension(:,:),allocatable :: gathered_uflx ! Output var from glam_velo_fordsiapstr(), used often
+ real(8),dimension(:,:),allocatable :: gathered_vflx ! Output var from glam_velo_fordsiapstr(), used often
+ real(8),dimension(:,:,:),allocatable :: gathered_velnorm ! Variable calculated in run_ho_diagnostic(), is this used?
+ real(8),dimension(:,:),allocatable :: gathered_thck ! Used in horizontal_remap_in()
+ real(8),dimension(:,:),allocatable :: gathered_stagthck ! Used in horizontal_remap_in()
+ real(4),dimension(:,:),allocatable :: gathered_acab ! Used in horizontal_remap_in()
+ real(8),dimension(:,:,:),allocatable :: gathered_temp ! Used in horizontal_remap_in()
+ real(8),dimension(:,:),allocatable :: gathered_dusrfdew ! Used in glide_stress()
+ real(8),dimension(:,:),allocatable :: gathered_dusrfdns ! Used in glide_stress()
+ real(8),dimension(:,:),allocatable :: gathered_dthckdew ! Used in glide_stress()
+ real(8),dimension(:,:),allocatable :: gathered_dthckdns ! Used in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauxx ! Calculated in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauyy ! Calculated in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauxy ! Calculated in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauscalar ! Calculated in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauxz ! Calculated in glide_stress()
+ real(8),dimension(:,:,:),allocatable :: gathered_tauyz ! Calculated in glide_stress()
+ real(8),dimension(:,:),allocatable :: gathered_topg ! Bedrock topology, Used in glide_set_mask()
+ integer,dimension(:,:),allocatable :: gathered_thkmask ! Calculated in glide_set_mask()
+ real(8),dimension(:,:),allocatable :: gathered_marine_bc_normal ! Calculated in glide_marine_margin_normal()
+ real(8),dimension(:,:,:),allocatable :: gathered_surfvel ! Used in calc_gline_flux()
+ real(8),dimension(:,:),allocatable :: gathered_gline_flux ! Calculated in calc_gline_flux()
+ real(8),dimension(:,:),allocatable :: gathered_ubas ! Used in calc_gline_flux()
+ real(8),dimension(:,:),allocatable :: gathered_vbas ! Used in calc_gline_flux()
+ real(8),dimension(:,:),allocatable :: gathered_relx ! Used in glide_marinlim()
+ real(8),dimension(:,:,:),allocatable :: gathered_flwa ! Used in glide_marinlim()
+ real(4),dimension(:,:),allocatable :: gathered_calving ! Used in glide_marinlim()
+ real(4),dimension(:,:),allocatable :: gathered_backstress ! Used in glide_marinlim()
+ real(8),dimension(:,:),allocatable :: gathered_usrf ! Used in glide_marinlim()
+ logical,dimension(:,:),allocatable :: gathered_backstressmap ! Used in glide_marinlim()
+ real(8),dimension(:,:),allocatable :: gathered_tau_x ! Calculated in calc_basal_shear()
+ real(8),dimension(:,:),allocatable :: gathered_tau_y ! Calculated in calc_basal_shear()
+ real(8),dimension(:,:),allocatable :: gathered_lsrf ! Used in glide_marinlim()
+
+ interface broadcast
+ module procedure broadcast_character
+ module procedure broadcast_integer
+ module procedure broadcast_integer_1d
+ module procedure broadcast_logical
+ module procedure broadcast_real4
+ module procedure broadcast_real4_1d
+ module procedure broadcast_real8
+ module procedure broadcast_real8_1d
+ end interface
+
+ interface distributed_gather_var
+ module procedure distributed_gather_var_integer_2d
+ module procedure distributed_gather_var_logical_2d
+ module procedure distributed_gather_var_real4_2d
+ module procedure distributed_gather_var_real4_3d
+ module procedure distributed_gather_var_real8_2d
+ module procedure distributed_gather_var_real8_3d
+ end interface
+
+ interface distributed_get_var
+ module procedure distributed_get_var_integer_2d
+ module procedure distributed_get_var_real4_1d
+ module procedure distributed_get_var_real4_2d
+ module procedure distributed_get_var_real8_1d
+ module procedure distributed_get_var_real8_2d
+ module procedure distributed_get_var_real8_3d
+ end interface
+
+ interface distributed_print
+ ! Gathers a distributed variable and writes to file
+ module procedure distributed_print_integer_2d
+ module procedure distributed_print_real8_2d
+ module procedure distributed_print_real8_3d
+ end interface
+
+ interface distributed_put_var
+ module procedure distributed_put_var_integer_2d
+ module procedure distributed_put_var_real4_1d
+ module procedure distributed_put_var_real4_2d
+ module procedure distributed_put_var_real8_1d
+ module procedure distributed_put_var_real8_2d
+ module procedure distributed_put_var_real8_3d
+
+ !TODO - Should the parallel_put_var routines be part of this interface?
+ module procedure parallel_put_var_real4
+ module procedure parallel_put_var_real8
+ end interface
+
+ interface parallel_convert_haloed_to_nonhaloed
+ module procedure parallel_convert_haloed_to_nonhaloed_real4_2d
+ module procedure parallel_convert_haloed_to_nonhaloed_real8_2d
+ end interface parallel_convert_haloed_to_nonhaloed
+
+ interface parallel_convert_nonhaloed_to_haloed
+ module procedure parallel_convert_nonhaloed_to_haloed_real4_2d
+ module procedure parallel_convert_nonhaloed_to_haloed_real8_2d
+ end interface parallel_convert_nonhaloed_to_haloed
+
+ interface parallel_def_var
+ module procedure parallel_def_var_dimids
+ module procedure parallel_def_var_nodimids
+ end interface
+
+ interface parallel_get_att
+ module procedure parallel_get_att_character
+ module procedure parallel_get_att_real4
+ module procedure parallel_get_att_real4_1d
+ module procedure parallel_get_att_real8
+ module procedure parallel_get_att_real8_1d
+ end interface
+
+ interface distributed_scatter_var
+ module procedure distributed_scatter_var_integer_2d
+ module procedure distributed_scatter_var_logical_2d
+ module procedure distributed_scatter_var_real4_2d
+ module procedure distributed_scatter_var_real4_3d
+ module procedure distributed_scatter_var_real8_2d
+ module procedure distributed_scatter_var_real8_3d
+ end interface
+
+ interface global_sum
+ module procedure global_sum_real8_scalar
+ module procedure global_sum_real8_1d
+ end interface
+
+ interface parallel_get_var
+ module procedure parallel_get_var_integer_1d
+ module procedure parallel_get_var_real4_1d
+ module procedure parallel_get_var_real8_1d
+ end interface
+
+ interface parallel_halo
+ module procedure parallel_halo_integer_2d
+ module procedure parallel_halo_logical_2d
+ module procedure parallel_halo_real4_2d
+ module procedure parallel_halo_real8_2d
+ module procedure parallel_halo_real8_3d
+ end interface
+
+ interface parallel_halo_verify
+ module procedure parallel_halo_verify_integer_2d
+ module procedure parallel_halo_verify_real8_2d
+ module procedure parallel_halo_verify_real8_3d
+ end interface
+
+ interface parallel_print
+ module procedure parallel_print_integer_2d
+ module procedure parallel_print_real8_2d
+ module procedure parallel_print_real8_3d
+ end interface
+
+ interface parallel_put_att
+ module procedure parallel_put_att_character
+ module procedure parallel_put_att_real4
+ module procedure parallel_put_att_real4_1d
+ module procedure parallel_put_att_real8
+ module procedure parallel_put_att_real8_1d
+ end interface
+
+ interface parallel_put_var
+ module procedure parallel_put_var_real4
+ module procedure parallel_put_var_real8
+ module procedure parallel_put_var_real8_1d
+ end interface
+
+ interface parallel_reduce_sum
+ module procedure parallel_reduce_sum_integer
+ module procedure parallel_reduce_sum_real4
+ module procedure parallel_reduce_sum_real8
+ module procedure parallel_reduce_sum_real8_nvar
+ end interface
+
+ interface parallel_reduce_max
+ module procedure parallel_reduce_max_integer
+ module procedure parallel_reduce_max_real4
+ module procedure parallel_reduce_max_real8
+ end interface
+
+ interface parallel_reduce_min
+ module procedure parallel_reduce_min_integer
+ module procedure parallel_reduce_min_real4
+ module procedure parallel_reduce_min_real8
+ end interface
+
+ ! This reduce interface determines the global min value and the processor on which it occurs
+ interface parallel_reduce_maxloc
+ module procedure parallel_reduce_maxloc_integer
+ module procedure parallel_reduce_maxloc_real4
+ module procedure parallel_reduce_maxloc_real8
+ end interface
+
+ ! This reduce interface determines the global min value and the processor on which it occurs
+ interface parallel_reduce_minloc
+ module procedure parallel_reduce_minloc_integer
+ module procedure parallel_reduce_minloc_real4
+ module procedure parallel_reduce_minloc_real8
+ end interface
+
+ interface staggered_parallel_halo
+ module procedure staggered_parallel_halo_integer_2d
+ module procedure staggered_parallel_halo_integer_3d
+ module procedure staggered_parallel_halo_real8_2d
+ module procedure staggered_parallel_halo_real8_3d
+ module procedure staggered_parallel_halo_real8_4d
+ end interface
+
+ interface staggered_parallel_halo_extrapolate
+ module procedure staggered_parallel_halo_extrapolate_integer_2d
+ module procedure staggered_parallel_halo_extrapolate_real8_2d
+ end interface
+
+contains
+
+ subroutine broadcast_character(c, proc)
+ implicit none
+ character(len=*) :: c
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from - not relevant to serial version
+ end subroutine broadcast_character
+
+ subroutine broadcast_integer(i, proc)
+ implicit none
+ integer :: i
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from - not relevant to serial version
+ end subroutine broadcast_integer
+
+ subroutine broadcast_integer_1d(a, proc)
+ implicit none
+ integer,dimension(:) :: a
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from - not relevant to serial version
+ end subroutine broadcast_integer_1d
+
+ subroutine broadcast_logical(l, proc)
+ implicit none
+ logical :: l
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from - not relevant to serial version
+ end subroutine broadcast_logical
+
+ subroutine broadcast_real4(r, proc)
+ implicit none
+ real(4) :: r
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from - not relevant to serial version
+ end subroutine broadcast_real4
+
+ subroutine broadcast_real4_1d(a, proc)
+ real(4),dimension(:) :: a
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from - not relevant to serial version
+ end subroutine broadcast_real4_1d
+
+ subroutine broadcast_real8(r, proc)
+ implicit none
+ real(8) :: r
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from - not relevant to serial version
+ end subroutine broadcast_real8
+
+ subroutine broadcast_real8_1d(a, proc)
+ implicit none
+ real(8),dimension(:) :: a
+ integer, intent(in), optional :: proc ! optional argument indicating which processor to broadcast from - not relevant to serial version
+ end subroutine broadcast_real8_1d
+
+ function distributed_get_var_integer_2d(ncid,varid,values,start)
+
+ implicit none
+ integer :: distributed_get_var_integer_2d,ncid,varid
+ integer,dimension(:) :: start
+ integer,dimension(:,:) :: values
+
+ integer :: ilo, ihi, jlo, jhi
+
+ ! begin
+
+ if (main_task) then
+
+ if (size(values,1)==local_ewn) then
+ ilo = 1 + lhalo
+ ihi = local_ewn - uhalo
+ jlo = 1 + lhalo
+ jhi = local_nsn - uhalo
+ else if (size(values,1)==local_ewn-1) then
+ ilo = 1 + staggered_lhalo
+ ihi = local_ewn - 1 - uhalo
+ jlo = 1 + staggered_lhalo
+ jhi = local_nsn - 1 - uhalo
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+
+ distributed_get_var_integer_2d = &
+ nf90_get_var(ncid,varid,values(ilo:ihi,jlo:jhi),start)
+
+ endif
+
+ end function distributed_get_var_integer_2d
+
+ function distributed_get_var_real4_1d(ncid,varid,values,start)
+
+ implicit none
+ integer :: distributed_get_var_real4_1d,ncid,varid
+ integer,dimension(:) :: start
+ real(4),dimension(:) :: values
+
+ integer :: status, x1id, y1id
+ integer :: ilo, ihi
+
+ ! begin
+
+ if (main_task) then
+
+ status = nf90_inq_varid(ncid,"x1",x1id)
+ status = nf90_inq_varid(ncid,"y1",y1id)
+ if (varid==x1id) then
+ ilo = 1+lhalo
+ ihi = local_ewn - uhalo
+ else if (varid==y1id) then
+ ilo = 1+lhalo
+ ihi = local_nsn - uhalo
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+
+ distributed_get_var_real4_1d = &
+ nf90_get_var(ncid,varid,values(ilo:ihi),start)
+
+ endif
+
+ end function distributed_get_var_real4_1d
+
+ function distributed_get_var_real4_2d(ncid,varid,values,start)
+
+ implicit none
+ integer :: distributed_get_var_real4_2d,ncid,varid
+ integer,dimension(:) :: start
+ real(4),dimension(:,:) :: values
+
+ integer :: ilo, ihi, jlo, jhi
+
+ ! begin
+
+ if (main_task) then
+
+ if (size(values,1)==local_ewn) then
+ ilo = 1 + lhalo
+ ihi = local_ewn - uhalo
+ jlo = 1 + lhalo
+ jhi = local_nsn - uhalo
+ else if (size(values,1)==local_ewn-1) then
+ ilo = 1 + lhalo
+ ihi = local_ewn - 1 - uhalo
+ jlo = 1 + lhalo
+ jhi = local_nsn - 1 - uhalo
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+
+ distributed_get_var_real4_2d = &
+ nf90_get_var(ncid,varid,values(ilo:ihi,jlo:jhi),start)
+
+ endif
+
+ end function distributed_get_var_real4_2d
+
+ !WHL - added this function
+
+ function distributed_get_var_real8_1d(ncid,varid,values,start)
+
+ implicit none
+ integer :: distributed_get_var_real8_1d,ncid,varid
+ integer,dimension(:) :: start
+ real(8),dimension(:) :: values
+
+ integer :: status, x1id, y1id
+ integer :: ilo, ihi
+
+ ! begin
+
+ if (main_task) then
+
+ status = nf90_inq_varid(ncid,"x1",x1id)
+ status = nf90_inq_varid(ncid,"y1",y1id)
+ if (varid==x1id) then
+ ilo = 1+lhalo
+ ihi = local_ewn - uhalo
+ else if (varid==y1id) then
+ ilo = 1+lhalo
+ ihi = local_nsn - uhalo
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+
+ distributed_get_var_real8_1d = &
+ nf90_get_var(ncid,varid,values(ilo:ihi),start)
+
+ endif
+
+ end function distributed_get_var_real8_1d
+
+ function distributed_get_var_real8_2d(ncid,varid,values,start)
+ implicit none
+ integer :: distributed_get_var_real8_2d,ncid,varid
+ integer,dimension(:) :: start
+ real(8),dimension(:,:) :: values
+
+ integer :: ilo, ihi, jlo, jhi
+
+ ! begin
+
+ if (main_task) then
+
+ if (size(values,1)==local_ewn) then
+ ilo = 1 + lhalo
+ ihi = local_ewn - uhalo
+ jlo = 1 + lhalo
+ jhi = local_nsn - uhalo
+ else if (size(values,1)==local_ewn-1) then
+ ilo = 1 + lhalo
+ ihi = local_ewn - 1 - uhalo
+ jlo = 1 + lhalo
+ jhi = local_nsn - 1 - uhalo
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+
+ distributed_get_var_real8_2d = &
+ nf90_get_var(ncid,varid,values(ilo:ihi,jlo:jhi),start)
+
+ endif
+
+ end function distributed_get_var_real8_2d
+
+ function distributed_get_var_real8_3d(ncid,varid,values,start)
+
+ implicit none
+ integer :: distributed_get_var_real8_3d,ncid,varid
+ integer,dimension(:) :: start
+ real(8),dimension(:,:,:) :: values
+
+ integer :: ilo, ihi, jlo, jhi
+
+ ! begin
+
+ if (main_task) then
+
+ if (size(values,1)==local_ewn) then
+ ilo = 1 + lhalo
+ ihi = local_ewn - uhalo
+ jlo = 1 + lhalo
+ jhi = local_nsn - uhalo
+ else if (size(values,1)==local_ewn-1) then
+ ilo = 1 + lhalo
+ ihi = local_ewn - 1 - uhalo
+ jlo = 1 + lhalo
+ jhi = local_nsn - 1 - uhalo
+ else
+ call parallel_stop(__FILE__,__LINE__)
+ end if
+
+ distributed_get_var_real8_3d = &
+ nf90_get_var(ncid,varid,values(ilo:ihi,jlo:jhi,:),start)
+
+ endif
+
+ end function distributed_get_var_real8_3d
+
+
+ subroutine distributed_grid(ewn, nsn, nhalo_in, periodic_bc_in, outflow_bc_in)
+
+ implicit none
+
+ integer, intent(inout) :: ewn, nsn ! global grid dimensions
+ integer, intent(in), optional :: nhalo_in ! number of rows of halo cells
+ logical, intent(in), optional :: periodic_bc_in ! true for periodic global BCs
+ logical, intent(in), optional :: outflow_bc_in ! true for outflow global BCs
+ ! (scalars in global halo set to zero)
+
+ integer :: ewrank,ewtasks,nsrank,nstasks
+
+ ! Optionally, change the halo values
+ ! Note: The higher-order dycores (glam, glissade) currently require nhalo = 2.
+ ! The Glide SIA dycore requires nhalo = 0.
+ ! The default halo values at the top of the module are appropriate for
+ ! the higher-order dycores. Here they can be reset to zero for Glide.
+
+ if (present(nhalo_in)) then
+ if (main_task) then
+ write(*,*) 'Setting halo values: nhalo =', nhalo_in
+ if (nhalo_in < 0) then
+ write(*,*) 'ERROR: nhalo must be >= 0'
+ call parallel_stop(__FILE__, __LINE__)
+ endif
+ endif
+ nhalo = nhalo_in
+ lhalo = nhalo
+ uhalo = nhalo
+ staggered_lhalo = lhalo
+ staggered_uhalo = max(uhalo-1, 0)
+ endif
+
+ ! initialize some grid quantities to be consistent with parallel_mpi
+
+ global_ewn = ewn
+ global_nsn = nsn
+
+ global_row_offset = 0
+ global_col_offset = 0
+
+ ewrank = 0
+ nsrank = 0
+ ewtasks = 1
+ nstasks = 1
+
+ east = 0 ! all halo updates are local copies by the main task
+ west = 0
+ north = 0
+ south = 0
+
+! Trey's original code
+! ewlb = 1
+! ewub = global_ewn
+! local_ewn = ewub-ewlb+1
+! own_ewn = local_ewn-lhalo-uhalo
+! ewn = local_ewn
+
+! nslb = 1
+! nsub = global_nsn
+! local_nsn = nsub-nslb+1
+! own_nsn = local_nsn-lhalo-uhalo
+! nsn = local_nsn
+
+!WHL - modified code for nonzero halo values
+ ewlb = 1 - lhalo
+ ewub = global_ewn + uhalo
+ local_ewn = ewub - ewlb + 1
+ own_ewn = local_ewn - lhalo - uhalo
+ ewn = local_ewn
+
+ nslb = 1 - lhalo
+ nsub = global_nsn + uhalo
+ local_nsn = nsub - nslb + 1
+ own_nsn = local_nsn - lhalo - uhalo
+ nsn = local_nsn
+
+ !WHL - added global boundary conditions
+
+ periodic_bc = .true. ! this is the default
+ outflow_bc = .false.
+
+ if (present(outflow_bc_in)) then
+ outflow_bc = outflow_bc_in
+ if (outflow_bc) periodic_bc = .false.
+ endif
+
+ if (present(periodic_bc_in)) then
+ periodic_bc = periodic_bc_in
+ if (periodic_bc) outflow_bc = .false.
+ endif
+
+ !WHL - debug
+ if (outflow_bc) write(*,*) "Outflow global boundary conditions"
+ if (periodic_bc) write(*,*) "Periodic global boundary conditions"
+
+ ! Print grid geometry
+ write(*,*) "Process ", this_rank, " Total = ", tasks, " ewtasks = ", ewtasks, " nstasks = ", nstasks
+ write(*,*) "Process ", this_rank, " ewrank = ", ewrank, " nsrank = ", nsrank
+ write(*,*) "Process ", this_rank, " l_ewn = ", local_ewn, " o_ewn = ", own_ewn
+ write(*,*) "Process ", this_rank, " l_nsn = ", local_nsn, " o_nsn = ", own_nsn
+ write(*,*) "Process ", this_rank, " ewlb = ", ewlb, " ewub = ", ewub
+ write(*,*) "Process ", this_rank, " nslb = ", nslb, " nsub = ", nsub
+ write(*,*) "Process ", this_rank, " east = ", east, " west = ", west
+ write(*,*) "Process ", this_rank, " north = ", north, " south = ", south
+ write(*,*) "Process ", this_rank, " ew_vars = ", own_ewn, " ns_vars = ", own_nsn
+
+ end subroutine distributed_grid
+
+ function distributed_execution()
+ ! Returns if running distributed or not.
+ logical distributed_execution
+
+ distributed_execution = .false.
+ end function distributed_execution
+
+ subroutine distributed_gather_var_integer_2d(values, global_values)
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+ ! Variables are assumed to lie on the scalar grid (at cell centers).
+ implicit none
+ integer,dimension(:,:),intent(in) :: values
+ integer,dimension(:,:),allocatable,intent(inout) :: global_values
+
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+
+ !WHL - Commented code will not work if the local arrays include halo cells
+!! allocate(global_values(size(values,1), size(values,2)))
+!! global_values(:,:) = values(:,:)
+ allocate(global_values(size(values,1)-uhalo-lhalo, size(values,2)-uhalo-lhalo))
+ global_values(:,:) = values(1+lhalo:local_ewn-uhalo, 1+lhalo:local_nsn-uhalo)
+
+ end subroutine distributed_gather_var_integer_2d
+
+ subroutine distributed_gather_var_logical_2d(values, global_values)
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+ ! Variables are assumed to lie on the scalar grid (at cell centers).
+ implicit none
+ logical,dimension(:,:),intent(in) :: values
+ logical,dimension(:,:),allocatable,intent(inout) :: global_values
+
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+
+ !WHL - Commented code will not work if the local arrays include halo cells
+!! allocate(global_values(size(values,1), size(values,2)))
+!! global_values(:,:) = values(:,:)
+ allocate(global_values(size(values,1)-uhalo-lhalo, size(values,2)-uhalo-lhalo))
+ global_values(:,:) = values(1+lhalo:local_ewn-uhalo, 1+lhalo:local_nsn-uhalo)
+
+ end subroutine distributed_gather_var_logical_2d
+
+ subroutine distributed_gather_var_real4_2d(values, global_values)
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+ ! Variables are assumed to lie on the scalar grid (at cell centers).
+ implicit none
+ real(4),dimension(:,:),intent(in) :: values
+ real(4),dimension(:,:),allocatable,intent(inout) :: global_values
+
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+
+ !WHL - Commented code will not work if the local arrays include halo cells
+!! allocate(global_values(size(values,1), size(values,2)))
+!! global_values(:,:) = values(:,:)
+ allocate(global_values(size(values,1)-uhalo-lhalo, size(values,2)-uhalo-lhalo))
+ global_values(:,:) = values(1+lhalo:local_ewn-uhalo, 1+lhalo:local_nsn-uhalo)
+
+ end subroutine distributed_gather_var_real4_2d
+
+ subroutine distributed_gather_var_real4_3d(values, global_values, ld1, ud1)
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+ ! Variables are assumed to lie on the scalar grid (at cell centers).
+ implicit none
+ real(4),dimension(:,:,:),intent(in) :: values
+ real(4),dimension(:,:,:),allocatable,intent(inout) :: global_values
+ integer,optional,intent(in) :: ld1, ud1
+
+ integer :: d1l,d1u
+
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ if (present(ld1)) then
+ d1l = ld1
+ else
+ d1l = 1
+ endif
+ if (present(ud1)) then
+ d1u = ud1
+ else
+ d1u = size(values,1)
+ endif
+ if (size(values,1) /= d1u-d1l+1) then
+ write(*,*) "size(values,1) .ne. d1u-d1l+1 in gather call"
+ call parallel_stop(__FILE__, __LINE__)
+ endif
+
+ !WHL - Commented code will not work if the local arrays include halo cells
+!! allocate(global_values(d1l:d1u, size(values,2), size(values,3)))
+!! global_values(d1l:d1u,:,:) = values(1:size(values,1),:,:)
+ allocate(global_values(d1l:d1u, size(values,2)-uhalo-lhalo, size(values,3)-uhalo-lhalo))
+ global_values(d1l:d1u,:,:) = values(1:size(values,1), 1+lhalo:local_ewn-uhalo, 1+lhalo:local_nsn-uhalo)
+
+ end subroutine distributed_gather_var_real4_3d
+
+ subroutine distributed_gather_var_real8_2d(values, global_values)
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+ ! Variables are assumed to lie on the scalar grid (at cell centers).
+ implicit none
+ real(8),dimension(:,:),intent(in) :: values
+ real(8),dimension(:,:),allocatable,intent(inout) :: global_values
+
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+
+ !WHL - Commented code will not work if the local arrays include halo cells
+!! allocate(global_values(size(values,1), size(values,2)))
+!! global_values(:,:) = values(:,:)
+ allocate(global_values(size(values,1)-uhalo-lhalo, size(values,2)-uhalo-lhalo))
+ global_values(:,:) = values(1+lhalo:local_ewn-uhalo, 1+lhalo:local_nsn-uhalo)
+
+ end subroutine distributed_gather_var_real8_2d
+
+ subroutine distributed_gather_var_real8_3d(values, global_values, ld1, ud1)
+ ! JEFF Gather a distributed variable back to main_task node
+ ! values = local portion of distributed variable
+ ! global_values = reference to allocateable array into which the main_task will store the variable.
+ ! If global_values is allocated, then it will be deallocated and reallocated. It will be unused on other nodes.
+ ! Variables are assumed to lie on the scalar grid (at cell centers).
+ implicit none
+ real(8),dimension(:,:,:),intent(in) :: values
+ real(8),dimension(:,:,:),allocatable,intent(inout) :: global_values
+ integer,optional,intent(in) :: ld1, ud1
+
+ integer :: d1l,d1u
+
+ if (allocated(global_values)) then
+ deallocate(global_values)
+ endif
+ if (present(ld1)) then
+ d1l = ld1
+ else
+ d1l = 1
+ endif
+ if (present(ud1)) then
+ d1u = ud1
+ else
+ d1u = size(values,1)
+ endif
+ if (size(values,1) /= d1u-d1l+1) then
+ write(*,*) "size(values,1) .ne. d1u-d1l+1 in gather call"
+ call parallel_stop(__FILE__, __LINE__)
+ endif
+
+ !WHL - Commented code will not work if the local arrays include halo cells
+!! allocate(global_values(d1l:d1u, size(values,2), size(values,3)))
+!! global_values(d1l:d1u,:,:) = values(1:size(values,1),:,:)
+ allocate(global_values(d1l:d1u, size(values,2)-uhalo-lhalo, size(values,3)-uhalo-lhalo))
+ global_values(d1l:d1u,:,:) = values(1:size(values,1), 1+lhalo:local_ewn-uhalo, 1+lhalo:local_nsn-uhalo)
+
+ end subroutine distributed_gather_var_real8_3d
+
+ function distributed_isparallel()
+ implicit none
+ logical :: distributed_isparallel
+
+ distributed_isparallel = .false.
+ end function distributed_isparallel
+
+ function distributed_owner(ew,ewn,ns,nsn)
+ implicit none
+ logical :: distributed_owner
+ integer :: ew,ewn,ns,nsn
+ ! begin
+ distributed_owner = .true.
+ end function distributed_owner
+
+ subroutine distributed_print_integer_2d(name,values)
+ implicit none
+ character(*) :: name
+ integer,dimension(:,:) :: values
+
+ integer,parameter :: u = 33
+ character(3) :: ts
+ integer :: i,j,k
+
+ write(ts,'(i3.3)') tasks
+ open(unit=u,file=name//ts//".txt",form="formatted",status="replace")
+ if (size(values,1) lhalo .and. ilocal <= lhalo + own_ewn) &
+ .and. &
+ (jlocal > lhalo .and. jlocal <= lhalo + own_nsn) ) then
+ ! global indices are valid
+ else ! global indices are invalid
+ if (main_task) then
+ write(*,*) 'Invalid global indices: iglobal, jglobal =', iglobal, jglobal
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+ endif
+ end subroutine parallel_localindex
+
+
+ subroutine parallel_halo_integer_2d(a)
+
+ implicit none
+ integer,dimension(:,:) :: a
+
+ integer,dimension(lhalo,local_nsn-lhalo-uhalo) :: ecopy
+ integer,dimension(uhalo,local_nsn-lhalo-uhalo) :: wcopy
+ integer,dimension(local_ewn,lhalo) :: ncopy
+ integer,dimension(local_ewn,uhalo) :: scopy
+
+ ! begin
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1 .and. size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,1)/=local_ewn .or. size(a,2)/=local_nsn) then
+ write(*,*) "Unknown Grid: Size a=(", size(a,1), ",", size(a,2), ") and local_ewn and local_nsn = ", local_ewn, ",", local_nsn
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ if (outflow_bc) then
+
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = 0
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = 0
+ a(:,:lhalo) = 0
+ a(:,local_nsn-uhalo+1:) = 0
+
+ else ! periodic BC
+
+ ecopy(:,:) = a(local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ wcopy(:,:) = a(1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = ecopy(:,:)
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = wcopy(:,:)
+
+ ncopy(:,:) = a(:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ scopy(:,:) = a(:,1+lhalo:1+lhalo+uhalo-1)
+ a(:,:lhalo) = ncopy(:,:)
+ a(:,local_nsn-uhalo+1:) = scopy(:,:)
+
+ endif
+
+ end subroutine parallel_halo_integer_2d
+
+
+ subroutine parallel_halo_logical_2d(a)
+
+ implicit none
+ logical,dimension(:,:) :: a
+
+ logical,dimension(lhalo,local_nsn-lhalo-uhalo) :: ecopy
+ logical,dimension(uhalo,local_nsn-lhalo-uhalo) :: wcopy
+ logical,dimension(local_ewn,lhalo) :: ncopy
+ logical,dimension(local_ewn,uhalo) :: scopy
+
+ ! begin
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1 .and. size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,1)/=local_ewn .or. size(a,2)/=local_nsn) then
+ write(*,*) "Unknown Grid: Size a=(", size(a,1), ",", size(a,2), ") and local_ewn and local_nsn = ", local_ewn, ",", local_nsn
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ if (outflow_bc) then
+
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = .false.
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = .false.
+ a(:,:lhalo) = .false.
+ a(:,local_nsn-uhalo+1:) = .false.
+
+ else ! periodic BC
+
+ ecopy(:,:) = a(local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ wcopy(:,:) = a(1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = ecopy(:,:)
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = wcopy(:,:)
+
+ ncopy(:,:) = a(:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ scopy(:,:) = a(:,1+lhalo:1+lhalo+uhalo-1)
+ a(:,:lhalo) = ncopy(:,:)
+ a(:,local_nsn-uhalo+1:) = scopy(:,:)
+
+ endif
+
+ end subroutine parallel_halo_logical_2d
+
+
+ subroutine parallel_halo_real4_2d(a)
+
+ implicit none
+ real(4),dimension(:,:) :: a
+
+ real(4),dimension(lhalo,local_nsn-lhalo-uhalo) :: ecopy
+ real(4),dimension(uhalo,local_nsn-lhalo-uhalo) :: wcopy
+ real(4),dimension(local_ewn,lhalo) :: ncopy
+ real(4),dimension(local_ewn,uhalo) :: scopy
+
+ ! begin
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1 .and. size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,1)/=local_ewn .or. size(a,2)/=local_nsn) then
+ write(*,*) "Unknown Grid: Size a=(", size(a,1), ",", size(a,2), ") and local_ewn and local_nsn = ", local_ewn, ",", local_nsn
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ if (outflow_bc) then
+
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = 0.
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = 0.
+ a(:,:lhalo) = 0.
+ a(:,local_nsn-uhalo+1:) = 0.
+
+ else ! periodic BC
+
+ ecopy(:,:) = a(local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ wcopy(:,:) = a(1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = ecopy(:,:)
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = wcopy(:,:)
+
+ ncopy(:,:) = a(:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ scopy(:,:) = a(:,1+lhalo:1+lhalo+uhalo-1)
+ a(:,:lhalo) = ncopy(:,:)
+ a(:,local_nsn-uhalo+1:) = scopy(:,:)
+
+ endif
+
+ end subroutine parallel_halo_real4_2d
+
+
+ subroutine parallel_halo_real8_2d(a, periodic_offset_ew, periodic_offset_ns)
+
+ !WHL - added optional arguments for periodic offsets, to support ismip-hom test cases
+
+ implicit none
+ real(8),dimension(:,:) :: a
+ real(8), intent(in), optional :: &
+ periodic_offset_ew, &! offset halo values by this amount
+ ! if positive, the offset is positive for W halo, negative for E halo
+ periodic_offset_ns ! offset halo values by this amount
+ ! if positive, the offset is positive for S halo, negative for N halo
+
+ real(8),dimension(lhalo,local_nsn-lhalo-uhalo) :: ecopy
+ real(8),dimension(uhalo,local_nsn-lhalo-uhalo) :: wcopy
+ real(8),dimension(local_ewn,lhalo) :: ncopy
+ real(8),dimension(local_ewn,uhalo) :: scopy
+
+ ! begin
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1 .and. size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,1)/=local_ewn .or. size(a,2)/=local_nsn) then
+ write(*,*) "Unknown Grid: Size a=(", size(a,1), ",", size(a,2), ") and local_ewn and local_nsn = ", local_ewn, ",", local_nsn
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ if (outflow_bc) then
+
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = 0.d0
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = 0.d0
+ a(:,:lhalo) = 0.d0
+ a(:,local_nsn-uhalo+1:) = 0.d0
+
+ else ! periodic BC
+
+ ecopy(:,:) = a(local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ wcopy(:,:) = a(1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = ecopy(:,:)
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = wcopy(:,:)
+
+ if (present(periodic_offset_ew)) then
+ if (periodic_offset_ew /= 0.d0) then
+ a(:lhalo,1+lhalo:local_nsn-uhalo) = &
+ a(:lhalo,1+lhalo:local_nsn-uhalo) + periodic_offset_ew
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = &
+ a(local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) - periodic_offset_ew
+ endif
+ endif
+
+ ncopy(:,:) = a(:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ scopy(:,:) = a(:,1+lhalo:1+lhalo+uhalo-1)
+ a(:,:lhalo) = ncopy(:,:)
+ a(:,local_nsn-uhalo+1:) = scopy(:,:)
+
+ if (present(periodic_offset_ns)) then
+ if (periodic_offset_ns /= 0.d0) then
+ a(:,:lhalo) = a(:,:lhalo) + periodic_offset_ns
+ a(:,local_nsn-uhalo+1:) = a(:,local_nsn-uhalo+1:) - periodic_offset_ns
+ endif
+ endif
+
+ endif ! open or periodic BC
+
+ end subroutine parallel_halo_real8_2d
+
+
+ subroutine parallel_halo_real8_3d(a)
+
+ implicit none
+ real(8),dimension(:,:,:) :: a
+
+ real(8),dimension(size(a,1),lhalo,local_nsn-lhalo-uhalo) :: ecopy
+ real(8),dimension(size(a,1),uhalo,local_nsn-lhalo-uhalo) :: wcopy
+ real(8),dimension(size(a,1),local_ewn,lhalo) :: ncopy
+ real(8),dimension(size(a,1),local_ewn,uhalo) :: scopy
+
+ ! begin
+
+ ! staggered grid
+ if (size(a,1)==local_ewn-1 .and. size(a,2)==local_nsn-1) return
+
+ ! unknown grid
+ if (size(a,2)/=local_ewn .or. size(a,3)/=local_nsn) then
+ write(*,*) "Unknown Grid: Size a=(", size(a,2), ",", size(a,3), ") and local_ewn and local_nsn = ", local_ewn, ",", local_nsn
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ if (outflow_bc) then
+
+ a(:,:lhalo,1+lhalo:local_nsn-uhalo) = 0.d0
+ a(:,local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = 0.d0
+ a(:,:,:lhalo) = 0.d0
+ a(:,:,local_nsn-uhalo+1:) = 0.d0
+
+ else ! periodic BC
+
+ ecopy(:,:,:) = a(:,local_ewn-uhalo-lhalo+1:local_ewn-uhalo,1+lhalo:local_nsn-uhalo)
+ wcopy(:,:,:) = a(:,1+lhalo:1+lhalo+uhalo-1,1+lhalo:local_nsn-uhalo)
+ a(:,:lhalo,1+lhalo:local_nsn-uhalo) = ecopy(:,:,:)
+ a(:,local_ewn-uhalo+1:,1+lhalo:local_nsn-uhalo) = wcopy(:,:,:)
+
+ ncopy(:,:,:) = a(:,:,local_nsn-uhalo-lhalo+1:local_nsn-uhalo)
+ scopy(:,:,:) = a(:,:,1+lhalo:1+lhalo+uhalo-1)
+ a(:,:,:lhalo) = ncopy(:,:,:)
+ a(:,:,local_nsn-uhalo+1:) = scopy(:,:,:)
+
+ endif
+
+ end subroutine parallel_halo_real8_3d
+
+ function parallel_halo_verify_integer_2d(a)
+ implicit none
+ integer,dimension(:,:) :: a
+ logical :: parallel_halo_verify_integer_2d
+ parallel_halo_verify_integer_2d = .true.
+ end function parallel_halo_verify_integer_2d
+
+ function parallel_halo_verify_real8_2d(a)
+ implicit none
+ real(8),dimension(:,:) :: a
+ logical :: parallel_halo_verify_real8_2d
+ parallel_halo_verify_real8_2d = .true.
+ end function parallel_halo_verify_real8_2d
+
+ function parallel_halo_verify_real8_3d(a)
+ implicit none
+ real(8),dimension(:,:,:) :: a
+ logical :: parallel_halo_verify_real8_3d
+ parallel_halo_verify_real8_3d = .true.
+ end function parallel_halo_verify_real8_3d
+
+#ifdef _USE_MPI_WITH_SLAP
+ ! parallel_initialise should generally just be called by standalone cism drivers
+ ! When cism is nested inside a climate model (so mpi_init has already been called) use parallel_set_info instead
+ subroutine parallel_initialise
+ use mpi_mod
+ implicit none
+ integer :: ierror
+ integer, parameter :: my_main_rank = 0
+ ! begin
+ call mpi_init(ierror)
+ call parallel_set_info(mpi_comm_world, my_main_rank)
+ end subroutine parallel_initialise
+
+ ! parallel_set_info should be called directly when cism is nested inside a climate model
+ ! (then, mpi_init has already been called, so do NOT use parallel_initialise)
+
+ subroutine parallel_set_info(my_comm, my_main_rank)
+ use mpi_mod
+ implicit none
+ integer, intent(in) :: my_comm ! CISM's global communicator
+ integer, intent(in) :: my_main_rank ! rank of the master task (ignored for parallel_slap)
+ integer :: ierror
+ ! begin
+ comm = my_comm
+ call mpi_comm_size(comm,tasks,ierror)
+ call mpi_comm_rank(comm,this_rank,ierror)
+ main_task = .true. !For parallel_slap, each node duplicates all of the calculations.
+ end subroutine parallel_set_info
+
+#else
+ subroutine parallel_initialise
+ implicit none
+ end subroutine parallel_initialise
+
+ subroutine parallel_set_info(my_comm, my_main_rank)
+ implicit none
+ integer, intent(in) :: my_comm ! CISM's global communicator (IGNORED)
+ integer, intent(in) :: my_main_rank ! rank of the master task (IGNORED)
+ end subroutine parallel_set_info
+
+#endif
+
+ subroutine parallel_print_integer_2d(name,values)
+ implicit none
+ character(*) :: name
+ integer,dimension(:,:) :: values
+
+ integer,parameter :: u = 33
+ integer :: i,j
+ ! begin
+ open(unit=u,file=name//".txt",form="formatted",status="replace")
+ do j = 1,size(values,2)
+ do i = 1,size(values,1)
+ write(u,*) j,i,values(i,j)
+ end do
+ write(u,'()')
+ end do
+ close(u)
+ end subroutine parallel_print_integer_2d
+
+ function parallel_inq_attname(ncid,varid,attnum,name)
+ implicit none
+ integer :: attnum,ncid,parallel_inq_attname,varid
+ character(len=*) :: name
+ ! begin
+ if (main_task) parallel_inq_attname = &
+ nf90_inq_attname(ncid,varid,attnum,name)
+ call broadcast(parallel_inq_attname)
+ call broadcast(name)
+ end function parallel_inq_attname
+
+ function parallel_inq_dimid(ncid,name,dimid)
+ implicit none
+ integer :: dimid,ncid,parallel_inq_dimid
+ character(len=*) :: name
+ ! begin
+ if (main_task) parallel_inq_dimid = nf90_inq_dimid(ncid,name,dimid)
+ call broadcast(parallel_inq_dimid)
+ call broadcast(dimid)
+ end function parallel_inq_dimid
+
+ function parallel_inq_varid(ncid,name,varid)
+ implicit none
+ integer :: ncid,parallel_inq_varid,varid
+ character(len=*) :: name
+ ! begin
+ if (main_task) parallel_inq_varid = nf90_inq_varid(ncid,name,varid)
+ call broadcast(parallel_inq_varid)
+ call broadcast(varid)
+ end function parallel_inq_varid
+
+ function parallel_inquire(ncid,nvariables)
+ implicit none
+ integer :: ncid,parallel_inquire,nvariables
+ ! begin
+ if (main_task) parallel_inquire = nf90_inquire(ncid,nvariables=nvariables)
+ call broadcast(parallel_inquire)
+ call broadcast(nvariables)
+ end function parallel_inquire
+
+ function parallel_inquire_dimension(ncid,dimid,name,len)
+ implicit none
+ integer :: dimid,ncid,parallel_inquire_dimension
+ integer,optional :: len
+ character(len=*),optional :: name
+
+ integer :: l
+
+ ! begin
+
+ if (present(name)) then
+ if (main_task) parallel_inquire_dimension = &
+ nf90_inquire_dimension(ncid,dimid,name,len=l)
+ call broadcast(name)
+ else
+ if (main_task) parallel_inquire_dimension = &
+ nf90_inquire_dimension(ncid,dimid,len=l)
+ end if
+ call broadcast(parallel_inquire_dimension)
+ if (present(len)) then
+ call broadcast(l)
+ len = l
+ end if
+ end function parallel_inquire_dimension
+
+ function parallel_inquire_variable(ncid,varid,name,ndims,dimids,natts)
+ implicit none
+ integer :: ncid,parallel_inquire_variable,varid
+ integer,optional :: ndims,natts
+ character(len=*),optional :: name
+ integer,dimension(:),optional :: dimids
+
+ integer :: nd,na
+ ! begin
+ if (present(name)) then
+ if (main_task) parallel_inquire_variable = &
+ nf90_inquire_variable(ncid,varid,name=name)
+ call broadcast(parallel_inquire_variable)
+ call broadcast(name)
+ if (parallel_inquire_variable/=nf90_noerr) return
+ end if
+ if (present(dimids)) then
+ if (main_task) parallel_inquire_variable = &
+ nf90_inquire_variable(ncid,varid,dimids=dimids)
+ call broadcast(parallel_inquire_variable)
+ call broadcast(dimids)
+ if (parallel_inquire_variable/=nf90_noerr) return
+ end if
+ if (main_task) parallel_inquire_variable = &
+ nf90_inquire_variable(ncid,varid,ndims=nd,natts=na)
+ call broadcast(parallel_inquire_variable)
+ if (present(ndims)) then
+ call broadcast(nd)
+ ndims = nd
+ end if
+ if (present(natts)) then
+ call broadcast(na)
+ natts = na
+ end if
+ end function parallel_inquire_variable
+
+ function parallel_open(path,mode,ncid)
+ implicit none
+ integer :: mode,ncid,parallel_open
+ character(len=*) :: path
+ ! begin
+ if (main_task) parallel_open = nf90_open(path,mode,ncid)
+ call broadcast(parallel_open)
+ end function parallel_open
+
+ subroutine parallel_print_real8_2d(name,values)
+ implicit none
+ character(*) :: name
+ real(8),dimension(:,:) :: values
+
+ integer,parameter :: u = 33
+ integer :: i,j
+ ! begin
+ open(unit=u,file=name//".txt",form="formatted",status="replace")
+ do j = 1,size(values,2)
+ do i = 1,size(values,1)
+ write(u,*) j,i,values(i,j)
+ end do
+ write(u,'()')
+ end do
+ close(u)
+ end subroutine parallel_print_real8_2d
+
+ subroutine parallel_print_real8_3d(name,values)
+ implicit none
+ character(*) :: name
+ real(8),dimension(:,:,:) :: values
+
+ integer,parameter :: u = 33
+ integer :: i,j
+ ! begin
+ open(unit=u,file=name//".txt",form="formatted",status="replace")
+ do j = 1,size(values,3)
+ do i = 1,size(values,2)
+ write(u,'(2i6,100g15.5e3)') j,i,values(:,i,j)
+ end do
+ write(u,'()')
+ end do
+ close(u)
+ end subroutine parallel_print_real8_3d
+
+ function parallel_put_att_character(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_put_att_character,varid
+ character(len=*) :: name,values
+ ! begin
+ if (main_task) parallel_put_att_character = nf90_put_att(ncid,varid,name,values)
+ call broadcast(parallel_put_att_character)
+ end function parallel_put_att_character
+
+ function parallel_put_att_real4(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_put_att_real4,varid
+ character(len=*) :: name
+ real(4) :: values
+ ! begin
+ if (main_task) parallel_put_att_real4 = nf90_put_att(ncid,varid,name,values)
+ call broadcast(parallel_put_att_real4)
+ end function parallel_put_att_real4
+
+ function parallel_put_att_real4_1d(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_put_att_real4_1d,varid
+ character(len=*) :: name
+ real(4),dimension(:) :: values
+ ! begin
+ if (main_task) parallel_put_att_real4_1d = nf90_put_att(ncid,varid,name,values)
+ call broadcast(parallel_put_att_real4_1d)
+ end function parallel_put_att_real4_1d
+
+ function parallel_put_att_real8(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_put_att_real8,varid
+ character(len=*) :: name
+ real(8) :: values
+ ! begin
+ if (main_task) parallel_put_att_real8 = nf90_put_att(ncid,varid,name,values)
+ call broadcast(parallel_put_att_real8)
+ end function parallel_put_att_real8
+
+ function parallel_put_att_real8_1d(ncid,varid,name,values)
+ implicit none
+ integer :: ncid,parallel_put_att_real8_1d,varid
+ character(len=*) :: name
+ real(8),dimension(:) :: values
+ ! begin
+ if (main_task) parallel_put_att_real8_1d = nf90_put_att(ncid,varid,name,values)
+ call broadcast(parallel_put_att_real8_1d)
+ end function parallel_put_att_real8_1d
+
+ function parallel_put_var_real4(ncid,varid,values,start)
+ implicit none
+ integer :: ncid,parallel_put_var_real4,varid
+ integer,dimension(:) :: start
+ real(4) :: values
+ ! begin
+ if (main_task) parallel_put_var_real4 = &
+ nf90_put_var(ncid,varid,values,start)
+ call broadcast(parallel_put_var_real4)
+ end function parallel_put_var_real4
+
+ function parallel_put_var_real8(ncid,varid,values,start)
+ implicit none
+ integer :: ncid,parallel_put_var_real8,varid
+ integer,dimension(:) :: start
+ real(8) :: values
+ ! begin
+ if (main_task) parallel_put_var_real8 = &
+ nf90_put_var(ncid,varid,values,start)
+ call broadcast(parallel_put_var_real8)
+ end function parallel_put_var_real8
+
+ function parallel_put_var_real8_1d(ncid,varid,values,start)
+ implicit none
+ integer :: ncid,parallel_put_var_real8_1d,varid
+ integer,dimension(:),optional :: start
+ real(8),dimension(:) :: values
+ ! begin
+ if (main_task) then
+ if (present(start)) then
+ parallel_put_var_real8_1d = nf90_put_var(ncid,varid,values,start)
+ else
+ parallel_put_var_real8_1d = nf90_put_var(ncid,varid,values)
+ end if
+ end if
+ call broadcast(parallel_put_var_real8_1d)
+ end function parallel_put_var_real8_1d
+
+ function parallel_redef(ncid)
+ implicit none
+ integer :: ncid,parallel_redef
+ ! begin
+ if (main_task) parallel_redef = nf90_redef(ncid)
+ call broadcast(parallel_redef)
+ end function parallel_redef
+
+! ------------------------------------------
+! functions for parallel_reduce_sum interface
+! ------------------------------------------
+ function parallel_reduce_sum_integer(x)
+ ! Sum x across all of the nodes.
+ ! In parallel_slap mode just return x.
+ implicit none
+ integer :: x, parallel_reduce_sum_integer
+
+ parallel_reduce_sum_integer = x
+ return
+ end function parallel_reduce_sum_integer
+
+ function parallel_reduce_sum_real4(x)
+ ! Sum x across all of the nodes.
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(4) :: x, parallel_reduce_sum_real4
+
+ parallel_reduce_sum_real4 = x
+ return
+ end function parallel_reduce_sum_real4
+
+ function parallel_reduce_sum_real8(x)
+ ! Sum x across all of the nodes.
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(8) :: x, parallel_reduce_sum_real8
+
+ parallel_reduce_sum_real8 = x
+ return
+ end function parallel_reduce_sum_real8
+
+ function parallel_reduce_sum_real8_nvar(x)
+ ! Sum x across all of the nodes.
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(8) :: x(:)
+ real(8), dimension(size(x)) :: parallel_reduce_sum_real8_nvar
+
+ parallel_reduce_sum_real8_nvar(:) = x(:)
+ return
+ end function parallel_reduce_sum_real8_nvar
+
+! ------------------------------------------
+! functions for parallel_reduce_max interface
+! ------------------------------------------
+ function parallel_reduce_max_integer(x)
+ ! Max x across all of the nodes.
+ ! In parallel_slap mode just return x.
+ implicit none
+ integer :: x, parallel_reduce_max_integer
+
+ parallel_reduce_max_integer = x
+ return
+ end function parallel_reduce_max_integer
+
+ function parallel_reduce_max_real4(x)
+ ! Max x across all of the nodes.
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(4) :: x, parallel_reduce_max_real4
+
+ parallel_reduce_max_real4 = x
+ return
+ end function parallel_reduce_max_real4
+
+ function parallel_reduce_max_real8(x)
+ ! Max x across all of the nodes.
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(8) :: x, parallel_reduce_max_real8
+
+ parallel_reduce_max_real8 = x
+ return
+ end function parallel_reduce_max_real8
+
+! ------------------------------------------
+! routines for parallel_reduce_maxloc interface
+! ------------------------------------------
+ subroutine parallel_reduce_maxloc_integer(xin, xout, xprocout)
+ ! Max x across all of the nodes and its proc number
+ ! In parallel_slap mode just return x.
+ implicit none
+ integer, intent(in) :: xin ! variable to reduce
+ integer, intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ xout = xin
+ xprocout = this_rank
+ end subroutine parallel_reduce_maxloc_integer
+
+ subroutine parallel_reduce_maxloc_real4(xin, xout, xprocout)
+ ! Max x across all of the nodes and its proc number
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(4), intent(in) :: xin ! variable to reduce
+ real(4), intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ xout = xin
+ xprocout = this_rank
+ end subroutine parallel_reduce_maxloc_real4
+
+ subroutine parallel_reduce_maxloc_real8(xin, xout, xprocout)
+ ! Max x across all of the nodes and its proc number
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(8), intent(in) :: xin ! variable to reduce
+ real(8), intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ xout = xin
+ xprocout = this_rank
+ end subroutine parallel_reduce_maxloc_real8
+
+! ------------------------------------------
+! functions for parallel_reduce_min interface
+! ------------------------------------------
+ function parallel_reduce_min_integer(x)
+ ! Min x across all of the nodes.
+ ! In parallel_slap mode just return x.
+ implicit none
+ integer :: x, parallel_reduce_min_integer
+
+ parallel_reduce_min_integer = x
+ return
+ end function parallel_reduce_min_integer
+
+ function parallel_reduce_min_real4(x)
+ ! Min x across all of the nodes.
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(4) :: x, parallel_reduce_min_real4
+
+ parallel_reduce_min_real4 = x
+ return
+ end function parallel_reduce_min_real4
+
+ function parallel_reduce_min_real8(x)
+ ! Min x across all of the nodes.
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(8) :: x, parallel_reduce_min_real8
+
+ parallel_reduce_min_real8 = x
+ return
+ end function parallel_reduce_min_real8
+
+! ------------------------------------------
+! routines for parallel_reduce_minloc interface
+! ------------------------------------------
+ subroutine parallel_reduce_minloc_integer(xin, xout, xprocout)
+ ! Min x across all of the nodes and its proc number
+ ! In parallel_slap mode just return x.
+ implicit none
+ integer, intent(in) :: xin ! variable to reduce
+ integer, intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ xout = xin
+ xprocout = this_rank
+ end subroutine parallel_reduce_minloc_integer
+
+ subroutine parallel_reduce_minloc_real4(xin, xout, xprocout)
+ ! Min x across all of the nodes and its proc number
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(4), intent(in) :: xin ! variable to reduce
+ real(4), intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ xout = xin
+ xprocout = this_rank
+ end subroutine parallel_reduce_minloc_real4
+
+ subroutine parallel_reduce_minloc_real8(xin, xout, xprocout)
+ ! Min x across all of the nodes and its proc number
+ ! In parallel_slap mode just return x.
+ implicit none
+ real(8), intent(in) :: xin ! variable to reduce
+ real(8), intent(out) :: xout ! value resulting from the reduction
+ integer, intent(out) :: xprocout ! processor on which reduced value occurs
+
+ xout = xin
+ xprocout = this_rank
+ end subroutine parallel_reduce_minloc_real8
+
+
+ subroutine parallel_show_minmax(label,values)
+ implicit none
+ character(*) :: label
+ real(8),dimension(:,:,:) :: values
+ ! begin
+ print *,label,minval(values),maxval(values)
+ end subroutine parallel_show_minmax
+
+ subroutine parallel_stop(file,line)
+ implicit none
+ integer :: line
+ character(len=*) :: file
+ ! begin
+ write(0,*) "STOP in ",file," at line ",line
+ stop
+ end subroutine parallel_stop
+
+ function parallel_sync(ncid)
+ implicit none
+ integer :: ncid,parallel_sync
+ ! begin
+ if (main_task) parallel_sync = nf90_sync(ncid)
+ call broadcast(parallel_sync)
+ end function parallel_sync
+
+ subroutine staggered_parallel_halo_extrapolate_integer_2d(a)
+
+ implicit none
+ integer,dimension(:,:) :: a
+ integer :: i, j
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,1)/=local_ewn-1 .or. size(a,2)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! Extrapolate the staggered field into halo cells along the global boundary.
+ ! Currently this is used only for kinbcmask.
+ ! Note: The extrapolation region includes locally owned cells along
+ ! the north and east boundaries of the global domain.
+
+ ! extrapolate westward
+ do i = 1, staggered_lhalo
+ a(i, staggered_lhalo+1:size(a,2)-staggered_uhalo-1) = &
+ a(staggered_lhalo+1, staggered_lhalo+1:size(a,2)-staggered_uhalo-1)
+ enddo
+
+ ! extrapolate eastward
+ do i = size(a,1)-staggered_uhalo, size(a,1)
+ a(i, staggered_lhalo+1:size(a,2)-staggered_uhalo-1) = &
+ a(size(a,1)-staggered_uhalo-1, staggered_lhalo+1:size(a,2)-staggered_uhalo-1)
+ enddo
+
+ ! extrapolate southward
+ do j = 1, staggered_lhalo
+ a(1:size(a,1), j) = a(1:size(a,1), staggered_lhalo+1)
+ enddo
+
+ ! extrapolate northward
+ do j = size(a,2)-staggered_uhalo, size(a,2)
+ a(1:size(a,1), j) = a(1:size(a,1), size(a,2)-staggered_uhalo-1)
+ enddo
+
+ end subroutine staggered_parallel_halo_extrapolate_integer_2d
+
+
+ subroutine staggered_parallel_halo_extrapolate_real8_2d(a)
+
+ implicit none
+ real(8),dimension(:,:) :: a
+ integer :: i, j
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,1)/=local_ewn-1 .or. size(a,2)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ ! Extrapolate the staggered field into halo cells along the global boundary.
+ ! Currently this is used only for kinbcmask.
+ ! Note: The extrapolation region includes locally owned cells along
+ ! the north and east boundaries of the global domain.
+
+ ! extrapolate westward
+ do i = 1, staggered_lhalo
+ a(i, staggered_lhalo+1:size(a,2)-staggered_uhalo-1) = &
+ a(staggered_lhalo+1, staggered_lhalo+1:size(a,2)-staggered_uhalo-1)
+ enddo
+
+ ! extrapolate eastward
+ do i = size(a,1)-staggered_uhalo, size(a,1)
+ a(i, staggered_lhalo+1:size(a,2)-staggered_uhalo-1) = &
+ a(size(a,1)-staggered_uhalo-1, staggered_lhalo+1:size(a,2)-staggered_uhalo-1)
+ enddo
+
+ ! extrapolate southward
+ do j = 1, staggered_lhalo
+ a(1:size(a,1), j) = a(1:size(a,1), staggered_lhalo+1)
+ enddo
+
+ ! extrapolate northward
+ do j = size(a,2)-staggered_uhalo, size(a,2)
+ a(1:size(a,1), j) = a(1:size(a,1), size(a,2)-staggered_uhalo-1)
+ enddo
+
+ end subroutine staggered_parallel_halo_extrapolate_real8_2d
+
+
+ subroutine staggered_parallel_halo_integer_2d(a)
+
+ implicit none
+ integer,dimension(:,:) :: a
+
+ integer,dimension(staggered_lhalo,size(a,2)-staggered_lhalo-staggered_uhalo) :: ecopy
+ integer,dimension(staggered_uhalo,size(a,2)-staggered_lhalo-staggered_uhalo) :: wcopy
+ integer,dimension(size(a,1),staggered_lhalo) :: ncopy
+ integer,dimension(size(a,1),staggered_uhalo) :: scopy
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,1)/=local_ewn-1 .or. size(a,2)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ wcopy(:, 1:size(a,2)-staggered_lhalo-staggered_uhalo) = &
+ a(1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1, &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo)
+
+ ecopy(:, 1:size(a,2)-staggered_lhalo-staggered_uhalo) = &
+ a(size(a,1)-staggered_uhalo-staggered_lhalo+1:size(a,1)-staggered_uhalo, &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo)
+
+ a(size(a,1)-staggered_uhalo+1:size(a,1), 1+staggered_lhalo:size(a,2)-staggered_uhalo) = &
+ wcopy(:, 1:size(a,2)-staggered_lhalo-staggered_uhalo)
+
+ a(1:staggered_lhalo, 1+staggered_lhalo:size(a,2)-staggered_uhalo) = &
+ ecopy(:, 1:size(a,2)-staggered_lhalo-staggered_uhalo)
+
+ scopy(:,:) = a(:, 1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1)
+ ncopy(:,:) = a(:, size(a,2)-staggered_uhalo-staggered_lhalo+1:size(a,2)-staggered_uhalo)
+
+ a(:, size(a,2)-staggered_uhalo+1:size(a,2)) = scopy(:,:)
+ a(:, 1:staggered_lhalo) = ncopy(:,:)
+
+ end subroutine staggered_parallel_halo_integer_2d
+
+
+ subroutine staggered_parallel_halo_integer_3d(a)
+
+ implicit none
+ integer,dimension(:,:,:) :: a
+
+ integer,dimension(size(a,1),staggered_lhalo,size(a,3)-staggered_lhalo-staggered_uhalo) :: ecopy
+ integer,dimension(size(a,1),staggered_uhalo,size(a,3)-staggered_lhalo-staggered_uhalo) :: wcopy
+ integer,dimension(size(a,1),size(a,2),staggered_lhalo) :: ncopy
+ integer,dimension(size(a,1),size(a,2),staggered_uhalo) :: scopy
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,2)/=local_ewn-1 .or. size(a,3)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ wcopy(:,:, 1:size(a,3)-staggered_lhalo-staggered_uhalo) = &
+ a(:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1, &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo)
+
+ ecopy(:,:, 1:size(a,3)-staggered_lhalo-staggered_uhalo) = &
+ a(:,size(a,2)-staggered_uhalo-staggered_lhalo+1:size(a,2)-staggered_uhalo, &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo)
+
+ a(:, size(a,2)-staggered_uhalo+1:size(a,2), 1+staggered_lhalo:size(a,3)-staggered_uhalo) = &
+ wcopy(:,:, 1:size(a,3)-staggered_lhalo-staggered_uhalo)
+
+ a(:, 1:staggered_lhalo, 1+staggered_lhalo:size(a,3)-staggered_uhalo) = &
+ ecopy(:,:, 1:size(a,3)-staggered_lhalo-staggered_uhalo)
+
+ scopy(:,:,:) = a(:,:, 1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1)
+ ncopy(:,:,:) = a(:,:, size(a,3)-staggered_uhalo-staggered_lhalo+1:size(a,3)-staggered_uhalo)
+
+ a(:,:,size(a,3)-staggered_uhalo+1:size(a,3)) = scopy(:,:,:)
+ a(:,:,1:staggered_lhalo) = ncopy(:,:,:)
+
+ end subroutine staggered_parallel_halo_integer_3d
+
+
+ subroutine staggered_parallel_halo_real8_2d(a)
+
+ implicit none
+ real(8),dimension(:,:) :: a
+
+ real(8),dimension(staggered_lhalo,size(a,2)-staggered_lhalo-staggered_uhalo) :: ecopy
+ real(8),dimension(staggered_uhalo,size(a,2)-staggered_lhalo-staggered_uhalo) :: wcopy
+ real(8),dimension(size(a,1),staggered_lhalo) :: ncopy
+ real(8),dimension(size(a,1),staggered_uhalo) :: scopy
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,1)/=local_ewn-1 .or. size(a,2)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ wcopy(:, 1:size(a,2)-staggered_lhalo-staggered_uhalo) = &
+ a(1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1, &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo)
+
+ ecopy(:, 1:size(a,2)-staggered_lhalo-staggered_uhalo) = &
+ a(size(a,1)-staggered_uhalo-staggered_lhalo+1:size(a,1)-staggered_uhalo, &
+ 1+staggered_lhalo:size(a,2)-staggered_uhalo)
+
+ a(size(a,1)-staggered_uhalo+1:size(a,1), 1+staggered_lhalo:size(a,2)-staggered_uhalo) = &
+ wcopy(:, 1:size(a,2)-staggered_lhalo-staggered_uhalo)
+
+ a(1:staggered_lhalo, 1+staggered_lhalo:size(a,2)-staggered_uhalo) = &
+ ecopy(:, 1:size(a,2)-staggered_lhalo-staggered_uhalo)
+
+ scopy(:,:) = a(:, 1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1)
+ ncopy(:,:) = a(:, size(a,2)-staggered_uhalo-staggered_lhalo+1:size(a,2)-staggered_uhalo)
+
+ a(:, size(a,2)-staggered_uhalo+1:size(a,2)) = scopy(:,:)
+ a(:, 1:staggered_lhalo) = ncopy(:,:)
+
+ end subroutine staggered_parallel_halo_real8_2d
+
+
+ subroutine staggered_parallel_halo_real8_3d(a)
+
+ implicit none
+ real(8),dimension(:,:,:) :: a
+
+ real(8),dimension(size(a,1),staggered_lhalo,size(a,3)-staggered_lhalo-staggered_uhalo) :: ecopy
+ real(8),dimension(size(a,1),staggered_uhalo,size(a,3)-staggered_lhalo-staggered_uhalo) :: wcopy
+ real(8),dimension(size(a,1),size(a,2),staggered_lhalo) :: ncopy
+ real(8),dimension(size(a,1),size(a,2),staggered_uhalo) :: scopy
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,2)/=local_ewn-1 .or. size(a,3)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ wcopy(:,:, 1:size(a,3)-staggered_lhalo-staggered_uhalo) = &
+ a(:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1, &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo)
+
+ ecopy(:,:, 1:size(a,3)-staggered_lhalo-staggered_uhalo) = &
+ a(:,size(a,2)-staggered_uhalo-staggered_lhalo+1:size(a,2)-staggered_uhalo, &
+ 1+staggered_lhalo:size(a,3)-staggered_uhalo)
+
+ a(:, size(a,2)-staggered_uhalo+1:size(a,2), 1+staggered_lhalo:size(a,3)-staggered_uhalo) = &
+ wcopy(:,:, 1:size(a,3)-staggered_lhalo-staggered_uhalo)
+
+ a(:, 1:staggered_lhalo, 1+staggered_lhalo:size(a,3)-staggered_uhalo) = &
+ ecopy(:,:, 1:size(a,3)-staggered_lhalo-staggered_uhalo)
+
+ scopy(:,:,:) = a(:,:, 1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1)
+ ncopy(:,:,:) = a(:,:, size(a,3)-staggered_uhalo-staggered_lhalo+1:size(a,3)-staggered_uhalo)
+
+ a(:,:,size(a,3)-staggered_uhalo+1:size(a,3)) = scopy(:,:,:)
+ a(:,:,1:staggered_lhalo) = ncopy(:,:,:)
+
+ end subroutine staggered_parallel_halo_real8_3d
+
+!WHL - New subroutine for 4D arrays
+ subroutine staggered_parallel_halo_real8_4d(a)
+
+ ! Implements a staggered grid halo update for a 4D field.
+ ! This subroutine is used for the 4D arrays that hold matrix entries.
+
+ ! As the grid is staggered, the array 'a' is one smaller in x and y dimensions than an unstaggered array.
+ ! The vertical dimension is assumed to precede the i and j indices, i.e., a(:,k,i,j).
+ ! The first dimension holds matrix elements for a single row.
+
+ implicit none
+ real(8),dimension(:,:,:,:) :: a
+
+ real(8),dimension(size(a,1),size(a,2),staggered_lhalo,size(a,4)-staggered_lhalo-staggered_uhalo) :: ecopy
+ real(8),dimension(size(a,1),size(a,2),staggered_uhalo,size(a,4)-staggered_lhalo-staggered_uhalo) :: wcopy
+ real(8),dimension(size(a,1),size(a,2),size(a,3),staggered_lhalo) :: ncopy
+ real(8),dimension(size(a,1),size(a,2),size(a,3),staggered_uhalo) :: scopy
+
+ ! begin
+
+ ! Confirm staggered array
+ if (size(a,3)/=local_ewn-1 .or. size(a,4)/=local_nsn-1) then
+ write(*,*) "staggered_parallel_halo() requires staggered arrays."
+ call parallel_stop(__FILE__,__LINE__)
+ endif
+
+ wcopy(:,:,:, 1:size(a,4)-staggered_lhalo-staggered_uhalo) = &
+ a(:,:,1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1, &
+ 1+staggered_lhalo:size(a,4)-staggered_uhalo)
+
+ ecopy(:,:,:, 1:size(a,4)-staggered_lhalo-staggered_uhalo) = &
+ a(:,:,size(a,3)-staggered_uhalo-staggered_lhalo+1:size(a,3)-staggered_uhalo, &
+ 1+staggered_lhalo:size(a,4)-staggered_uhalo)
+
+ a(:,:, size(a,3)-staggered_uhalo+1:size(a,3), 1+staggered_lhalo:size(a,4)-staggered_uhalo) = &
+ wcopy(:,:,:, 1:size(a,4)-staggered_lhalo-staggered_uhalo)
+
+ a(:,:, 1:staggered_lhalo, 1+staggered_lhalo:size(a,4)-staggered_uhalo) = &
+ ecopy(:,:,:, 1:size(a,4)-staggered_lhalo-staggered_uhalo)
+
+ scopy(:,:,:,:) = a(:,:,:, 1+staggered_lhalo:1+staggered_lhalo+staggered_uhalo-1)
+ ncopy(:,:,:,:) = a(:,:,:, size(a,4)-staggered_uhalo-staggered_lhalo+1:size(a,4)-staggered_uhalo)
+
+ a(:,:,:,size(a,4)-staggered_uhalo+1:size(a,4)) = scopy(:,:,:,:)
+ a(:,:,:,1:staggered_lhalo) = ncopy(:,:,:,:)
+
+ end subroutine staggered_parallel_halo_real8_4d
+
+end module parallel
diff --git a/components/cism/glimmer-cism/libglimmer/profile.F90 b/components/cism/glimmer-cism/libglimmer/profile.F90
new file mode 100644
index 0000000000..7934a3a44c
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/profile.F90
@@ -0,0 +1,218 @@
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! profile.F90 - part of the Community Ice Sheet Model (CISM)
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+!
+! Copyright (C) 2005-2014
+! CISM contributors - see AUTHORS file for list of contributors
+!
+! This file is part of CISM.
+!
+! CISM is free software: you can redistribute it and/or modify it
+! under the terms of the Lesser GNU General Public License as published
+! by the Free Software Foundation, either version 3 of the License, or
+! (at your option) any later version.
+!
+! CISM is distributed in the hope that it will be useful,
+! but WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+! Lesser GNU General Public License for more details.
+!
+! You should have received a copy of the Lesser GNU General Public License
+! along with CISM. If not, see .
+!
+!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+#ifdef HAVE_CONFIG_H
+#include "config.inc"
+#endif
+
+!> module for profiling programs
+!! \author Magnus Hagdorn
+!! \date January 2005
+module profile
+
+#if (defined CCSMCOUPLED || defined CESMTIMERS)
+ use perf_mod
+ use parallel
+#endif
+
+ use glimmer_global, only: dp
+ implicit none
+
+#if (defined CCSMCOUPLED || defined CESMTIMERS)
+ logical, public :: output_thispe = .false.
+#endif
+
+ integer, private :: current_unit = 200
+ integer, private,parameter :: max_prof = 100
+
+ !> the profiling type
+ type profile_type
+ integer :: profile_unit=0 !< file unit to be written to
+ real(dp) :: start_time !< CPU time at start of log
+ integer :: nump=0 !< number of profiles
+
+ real(dp), dimension(max_prof) :: pstart,ptotal !< for each log store start and totals
+ character(len=50), dimension(max_prof) :: pname !< name for each profile
+ end type profile_type
+
+contains
+
+ !> initialise a profile
+ subroutine profile_init(prof,name)
+ implicit none
+ type(profile_type), intent(out) :: prof !< structure storing profile definitions
+ character(len=*), intent(in) :: name !< name of file
+ ! local variables
+ character(len=8) :: date
+ character(len=10) :: time
+
+ prof%profile_unit = current_unit
+ current_unit = current_unit + 1
+#if (defined PROFILING && ! defined CCSMCOUPLED && ! defined CESMTIMERS)
+ prof%pstart(:) = 0.
+ prof%ptotal(:) = 0.
+ call cpu_time(prof%start_time)
+ call date_and_time (date, time)
+ open(unit=prof%profile_unit,file=name,status='unknown')
+ write(unit=prof%profile_unit,fmt="(a,a4,'-',a2,'-',a2,' ',a2,':',a2,':',a6)") '# Started profile on ',&
+ date(1:4),date(5:6),date(7:8),time(1:2),time(3:4),time(5:10)
+#endif
+
+#if (! defined CCSMCOUPLED && defined CESMTIMERS)
+ call t_initf("perf_in", mpicom=comm, MasterTask=main_task)
+#endif
+ end subroutine profile_init
+
+ !> register a new series of meassurements
+ function profile_register(prof,msg)
+ use glimmer_log
+ implicit none
+ type(profile_type) :: prof !< structure storing profile definitions
+ character(len=*), intent(in) :: msg !< the message to be associated
+ integer profile_register
+
+ prof%nump = prof%nump+1
+ if (prof%nump > max_prof) then
+ call write_log('Maximum number of profiles reached',type=GM_FATAL, &
+ file=__FILE__,line=__LINE__)
+ end if
+ profile_register = prof%nump
+ prof%pname(prof%nump) = trim(msg)
+ end function profile_register
+
+ !> start profiling
+ subroutine profile_start(prof,profn)
+ implicit none
+ type(profile_type) :: prof !< structure storing profile definitions
+ integer, intent(in) :: profn !< the profile ID
+
+#if (defined PROFILING && ! defined CCSMCOUPLED && ! defined CESMTIMERS)
+ call cpu_time(prof%pstart(profn))
+#endif
+
+#if (defined CCSMCOUPLED || defined CESMTIMERS)
+ call t_startf(prof%pname(profn))
+#endif
+ end subroutine profile_start
+
+ !> stop profiling
+ subroutine profile_stop(prof,profn)
+ implicit none
+ type(profile_type) :: prof !< structure storing profile definitions
+ integer, intent(in) :: profn !< the profile ID
+
+#if (defined PROFILING && ! defined CCSMCOUPLED && ! defined CESMTIMERS)
+ real(dp) :: t
+
+ call cpu_time(t)
+ prof%ptotal(profn) = prof%ptotal(profn) + t-prof%pstart(profn)
+#endif
+
+#if (defined CCSMCOUPLED || defined CESMTIMERS)
+ call t_stopf(prof%pname(profn))
+#endif
+ end subroutine profile_stop
+
+ !> log a message to profile
+ subroutine profile_log(prof,profn,msg)
+ implicit none
+ type(profile_type) :: prof !< structure storing profile definitions
+ integer, intent(in) :: profn !< the profile ID
+ character(len=*), intent(in), optional :: msg !< message to be written to profile
+
+#if (defined PROFILING && ! defined CCSMCOUPLED && ! defined CESMTIMERS)
+ real(dp) :: t
+
+ call cpu_time(t)
+ if (present(msg)) then
+ write(prof%profile_unit,*) t-prof%start_time,prof%ptotal(profn),profn,trim(msg)//' '//trim(prof%pname(profn))
+ else
+ write(prof%profile_unit,*) t-prof%start_time,prof%ptotal(profn),profn,trim(prof%pname(profn))
+ end if
+ prof%ptotal(profn) = 0.
+ prof%pstart(profn) = 0.
+#endif
+ end subroutine profile_log
+
+ !> close profile
+ subroutine profile_close(prof)
+ implicit none
+ type(profile_type), intent(in) :: prof !< structure storing profile definitions
+#if (defined PROFILING && ! defined CCSMCOUPLED && ! defined CESMTIMERS)
+ ! local variables
+ character(len=8) :: date
+ character(len=10) :: time
+ real(dp) :: t
+
+ call cpu_time(t)
+ call date_and_time (date, time)
+ write(prof%profile_unit,*) '# total elapse cpu time: ',t-prof%start_time
+ write(unit=prof%profile_unit,fmt="(a,a4,'-',a2,'-',a2,' ',a2,':',a2,':',a6)") '# Finished profile on ',&
+ date(1:4),date(5:6),date(7:8),time(1:2),time(3:4),time(5:10)
+ close(prof%profile_unit)
+#endif
+
+#if (! defined CCSMCOUPLED && defined CESMTIMERS)
+ integer :: ewrank, nsrank, nstasks
+
+ ! Sample performance data from process in middle of
+ ! computational domain
+ ewrank = mod(this_rank,ProcsEW)
+ nsrank = this_rank/ProcsEW
+ nstasks = tasks/ProcsEW
+ if ((ewrank .eq. (ProcsEW+1)/2) .and. (nsrank .eq. (nstasks+1)/2)) then
+ call t_prf('cism_timing', num_outpe=1, global_stats=.true., &
+ output_thispe=.true.)
+ else
+ call t_prf('cism_timing', num_outpe=1, global_stats=.true.)
+ endif
+
+ call t_finalizef ()
+#endif
+ end subroutine profile_close
+
+#if (! defined CCSMCOUPLED && ! defined CESMTIMERS)
+ subroutine t_startf(event, handle)
+ integer,parameter :: SHR_KIND_I8 = selected_int_kind (13) ! 8 byte integer
+ character(len=*), intent(in) :: event
+ integer(shr_kind_i8), optional :: handle
+ return
+ end subroutine t_startf
+
+ subroutine t_stopf(event, handle)
+ integer,parameter :: SHR_KIND_I8 = selected_int_kind (13) ! 8 byte integer
+ character(len=*), intent(in) :: event
+ integer(shr_kind_i8), optional :: handle
+ return
+ end subroutine t_stopf
+
+ subroutine t_adj_detailf(detail_adjustment)
+ integer, intent(in) :: detail_adjustment
+ return
+ end subroutine t_adj_detailf
+#endif
+
+end module profile
diff --git a/components/cism/glimmer-cism/libglimmer/writestats.c b/components/cism/glimmer-cism/libglimmer/writestats.c
new file mode 100644
index 0000000000..9bf384cf91
--- /dev/null
+++ b/components/cism/glimmer-cism/libglimmer/writestats.c
@@ -0,0 +1,129 @@
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ + +
+ + writestats.c - part of the CISM ice model +
+ + +
+ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ Copyright (C) 2009, 2010
+ CISM contributors - see AUTHORS file for list of contributors
+
+ This file is part of CISM.
+
+ CISM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or (at
+ your option) any later version.
+
+ CISM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with CISM. If not, see .
+
+ CISM is hosted on BerliOS.de:
+ https://developer.berlios.de/projects/glimmer-cism/
+
+ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+
+#include "writestats.h"
+#include "config.inc"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include