diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 99dcf64150..8ca241ca36 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -2,7 +2,7 @@
name: Bug report
about: Fix something that's not working
title: 'Bugfix: '
-labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED PROJECT ASSIGNMENT, type: bug'
+labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: bug'
assignees: ''
---
@@ -49,7 +49,7 @@ Describe the steps to reproduce the behavior:
### Projects and Milestone ###
- [ ] Select **Organization** level **Project** for support of the current coordinated release
-- [ ] Select **Repository** level **Project** for development toward the next official release or add **alert: NEED PROJECT ASSIGNMENT** label
+- [ ] Select **Repository** level **Project** for development toward the next official release or add **alert: NEED CYCLE ASSIGNMENT** label
- [ ] Select **Milestone** as the next bugfix version
## Define Related Issue(s) ##
diff --git a/.github/ISSUE_TEMPLATE/enhancement_request.md b/.github/ISSUE_TEMPLATE/enhancement_request.md
index 0b82c047c5..df95cc1587 100644
--- a/.github/ISSUE_TEMPLATE/enhancement_request.md
+++ b/.github/ISSUE_TEMPLATE/enhancement_request.md
@@ -2,7 +2,7 @@
name: Enhancement request
about: Improve something that it's currently doing
title: ''
-labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED PROJECT ASSIGNMENT, type: enhancement'
+labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: enhancement'
assignees: ''
---
@@ -38,7 +38,7 @@ Consider breaking the enhancement down into sub-issues.
- [ ] Select **requestor(s)**
### Projects and Milestone ###
-- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED PROJECT ASSIGNMENT** label
+- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label
- [ ] Select **Milestone** as the next official version or **Future Versions**
## Define Related Issue(s) ##
diff --git a/.github/ISSUE_TEMPLATE/new_feature_request.md b/.github/ISSUE_TEMPLATE/new_feature_request.md
index 4b1ae69aff..c2e0179c28 100644
--- a/.github/ISSUE_TEMPLATE/new_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/new_feature_request.md
@@ -2,7 +2,7 @@
name: New feature request
about: Make it do something new
title: ''
-labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED PROJECT ASSIGNMENT, type: new feature'
+labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: new feature'
assignees: ''
---
@@ -42,7 +42,7 @@ Consider breaking the new feature down into sub-issues.
- [ ] Select **requestor(s)**
### Projects and Milestone ###
-- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED PROJECT ASSIGNMENT** label
+- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label
- [ ] Select **Milestone** as the next official version or **Future Versions**
## Define Related Issue(s) ##
diff --git a/.github/ISSUE_TEMPLATE/sub-issue.md b/.github/ISSUE_TEMPLATE/sub-issue.md
index 77bf2b2844..855e27d83d 100644
--- a/.github/ISSUE_TEMPLATE/sub-issue.md
+++ b/.github/ISSUE_TEMPLATE/sub-issue.md
@@ -2,7 +2,7 @@
name: Sub-Issue
about: Break an issue down into smaller parts
title: ''
-labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED PROJECT ASSIGNMENT, type: sub-issue'
+labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: sub-issue'
assignees: ''
---
@@ -28,5 +28,5 @@ This is a sub-issue of #*List the parent issue number here*.
- [ ] Select **requestor(s)**
### Projects and Milestone ###
-- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED PROJECT ASSIGNMENT** label
+- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label
- [ ] Select **Milestone** as the next official version or **Future Versions**
diff --git a/.github/ISSUE_TEMPLATE/task.md b/.github/ISSUE_TEMPLATE/task.md
index 561012a7f6..f175cdb678 100644
--- a/.github/ISSUE_TEMPLATE/task.md
+++ b/.github/ISSUE_TEMPLATE/task.md
@@ -2,7 +2,7 @@
name: Task
about: Describe something that needs to be done
title: ''
-labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED PROJECT ASSIGNMENT, type: task'
+labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: task'
assignees: ''
---
@@ -38,7 +38,7 @@ Consider breaking the task down into sub-issues.
- [ ] Select **requestor(s)**
### Projects and Milestone ###
-- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED PROJECT ASSIGNMENT** label
+- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label
- [ ] Select **Milestone** as the next official version or **Future Versions**
## Define Related Issue(s) ##
diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml
index d270ba6404..c147a4ef79 100644
--- a/.github/workflows/testing.yml
+++ b/.github/workflows/testing.yml
@@ -14,7 +14,12 @@ on:
- 'bugfix_**'
paths-ignore:
- 'docs/**'
-
+ - '.github/pull_request_template.md'
+ - '.github/ISSUE_TEMPLATE/**'
+ - '.github/labels/**'
+ - '**/README.md'
+ - '**/LICENSE.md'
+
pull_request:
types: [opened, reopened, synchronize]
branches:
@@ -22,7 +27,12 @@ on:
- 'main_v**'
paths-ignore:
- 'docs/**'
-
+ - '.github/pull_request_template.md'
+ - '.github/ISSUE_TEMPLATE/**'
+ - '.github/labels/**'
+ - '**/README.md'
+ - '**/LICENSE.md'
+
workflow_dispatch:
inputs:
force_tests:
diff --git a/.github/workflows/update_truth.yml b/.github/workflows/update_truth.yml
new file mode 100644
index 0000000000..a5cc330cab
--- /dev/null
+++ b/.github/workflows/update_truth.yml
@@ -0,0 +1,59 @@
+name: Create Truth Data Update Pull Request
+
+on:
+ workflow_dispatch:
+ inputs:
+ pull_requests:
+ description: 'Pull request(s) that warranted update, e.g. "#123" or "#123 and dtcenter/MET#123"'
+ required: true
+ change_summary:
+ description: 'Summary of changes to truth data'
+ required: true
+
+jobs:
+ update_truth:
+ name: "Update or create truth reference branch"
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check if branch is develop or main_vX.Y
+ run: |
+ branch_name=${GITHUB_REF#refs/heads/}
+ echo "branch_name=$branch_name" >> $GITHUB_ENV
+ if [[ $branch_name == "develop" ]] || \
+ [[ $branch_name =~ ^main_v[0-9]+\.[0-9]+$ ]]; then
+ echo Branch is valid - $branch_name
+ exit 0
+ fi
+ echo ERROR: Branch is $branch_name - must be develop or match main_vX.Y
+ exit 1
+ - uses: actions/checkout@v3
+ name: Checkout repository
+ with:
+ fetch-depth: 0
+ token: ${{ secrets.METPLUS_BOT_TOKEN }}
+ - name: Resolve conflicts between branch and branch-ref
+ run: |
+ branch_name=${{ env.branch_name }}
+ cd ${GITHUB_WORKSPACE}
+ if [[ -z "$(git ls-remote --heads origin ${branch_name}-ref)" ]]; then
+ echo ERROR: ${branch_name}-ref does not exist
+ exit 1
+ fi
+
+ echo ${branch_name}-ref does exist -- update it
+ git config --global user.name "metplus-bot"
+ git config --global user.email "97135045+metplus-bot@users.noreply.github.com"
+ echo git checkout ${branch_name}
+ git checkout ${branch_name}
+ echo git merge -s ours origin/${branch_name}-ref
+ git merge -s ours origin/${branch_name}-ref
+ echo git push origin ${branch_name}
+ git push origin ${branch_name}
+
+ - name: Create Pull Request
+ run: gh pr create --base $BASE --body "$BODY" --title "$TITLE"
+ env:
+ GH_TOKEN: ${{ github.token }}
+ BASE: ${{ env.branch_name }}-ref
+ BODY: ${{ github.event.inputs.change_summary }}
Created by @${{ github.actor}}
+ TITLE: Update ${{ env.branch_name }}-ref after ${{ github.event.inputs.pull_requests }}
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index b08da35999..cb3aab55c7 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -9,10 +9,14 @@ version: 2
#formats: all
formats: [pdf]
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.10"
+
# Optionally set the version of Python and requirements required to build your
# docs
python:
- version: 3.8
install:
- requirements: docs/requirements.txt
diff --git a/Makefile.am b/Makefile.am
index f3c2b7a83a..a1e75367dd 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -18,7 +18,7 @@
ACLOCAL_AMFLAGS = -I m4
-SUBDIRS = data src scripts/Rscripts scripts/python scripts/utility
+SUBDIRS = data src scripts/Rscripts scripts/python
if ENABLE_DEVELOPMENT
SUBDIRS += internal/test_util
diff --git a/Makefile.in b/Makefile.in
index 8236187480..aa3a1d74ff 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -158,7 +158,7 @@ ETAGS = etags
CTAGS = ctags
CSCOPE = cscope
DIST_SUBDIRS = data src scripts/Rscripts scripts/python \
- scripts/utility internal/test_util
+ internal/test_util
am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/config.h.in INSTALL \
compile config.guess config.sub depcomp install-sh missing \
ylwrap
@@ -350,8 +350,7 @@ top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
ACLOCAL_AMFLAGS = -I m4
-SUBDIRS = data src scripts/Rscripts scripts/python scripts/utility \
- $(am__append_1)
+SUBDIRS = data src scripts/Rscripts scripts/python $(am__append_1)
MAINTAINERCLEANFILES = \
Makefile.in \
aclocal.m4 \
diff --git a/configure b/configure
index d47929b214..12f9307754 100755
--- a/configure
+++ b/configure
@@ -6051,6 +6051,9 @@ fi
CPPFLAGS=$CPPFLAGS' -DMET_BASE="\"$(pkgdatadir)\""'
+# Add -std=c++11 to CXXFLAGS
+CXXFLAGS=$CXXFLAGS' -std=c++11'
+
# Define other variables for the makefiles
FC_LIBS=-lgfortran
@@ -8874,7 +8877,7 @@ done
# Create configured files
-ac_config_files="$ac_config_files Makefile scripts/Rscripts/Makefile scripts/Rscripts/include/Makefile scripts/python/Makefile scripts/utility/Makefile data/Makefile data/climo/Makefile data/climo/seeps/Makefile data/colortables/Makefile data/colortables/NCL_colortables/Makefile data/config/Makefile data/map/Makefile data/map/admin_by_country/Makefile data/poly/Makefile data/poly/HMT_masks/Makefile data/poly/NCEP_masks/Makefile data/wrappers/Makefile data/ps/Makefile data/table_files/Makefile data/tc_data/Makefile src/Makefile src/basic/Makefile src/basic/enum_to_string/Makefile src/basic/vx_cal/Makefile src/basic/vx_config/Makefile src/basic/vx_log/Makefile src/basic/vx_math/Makefile src/basic/vx_util/Makefile src/basic/vx_util_math/Makefile src/libcode/Makefile src/libcode/vx_afm/Makefile src/libcode/vx_analysis_util/Makefile src/libcode/vx_color/Makefile src/libcode/vx_data2d/Makefile src/libcode/vx_data2d_factory/Makefile src/libcode/vx_data2d_grib/Makefile src/libcode/vx_data2d_grib2/Makefile src/libcode/vx_data2d_nc_met/Makefile src/libcode/vx_data2d_nc_pinterp/Makefile src/libcode/vx_data2d_nccf/Makefile src/libcode/vx_geodesy/Makefile src/libcode/vx_gis/Makefile src/libcode/vx_gnomon/Makefile src/libcode/vx_grid/Makefile src/libcode/vx_gsl_prob/Makefile src/libcode/vx_nav/Makefile src/libcode/vx_solar/Makefile src/libcode/vx_nc_obs/Makefile src/libcode/vx_nc_util/Makefile src/libcode/vx_pb_util/Makefile src/libcode/vx_plot_util/Makefile src/libcode/vx_ps/Makefile src/libcode/vx_pxm/Makefile src/libcode/vx_render/Makefile src/libcode/vx_shapedata/Makefile src/libcode/vx_stat_out/Makefile src/libcode/vx_statistics/Makefile src/libcode/vx_time_series/Makefile src/libcode/vx_physics/Makefile src/libcode/vx_series_data/Makefile src/libcode/vx_regrid/Makefile src/libcode/vx_tc_util/Makefile src/libcode/vx_summary/Makefile src/libcode/vx_python3_utils/Makefile src/libcode/vx_data2d_python/Makefile src/libcode/vx_bool_calc/Makefile src/libcode/vx_pointdata_python/Makefile src/libcode/vx_seeps/Makefile src/tools/Makefile src/tools/core/Makefile src/tools/core/ensemble_stat/Makefile src/tools/core/grid_stat/Makefile src/tools/core/mode/Makefile src/tools/core/mode_analysis/Makefile src/tools/core/pcp_combine/Makefile src/tools/core/point_stat/Makefile src/tools/core/series_analysis/Makefile src/tools/core/stat_analysis/Makefile src/tools/core/wavelet_stat/Makefile src/tools/other/Makefile src/tools/other/ascii2nc/Makefile src/tools/other/lidar2nc/Makefile src/tools/other/gen_ens_prod/Makefile src/tools/other/gen_vx_mask/Makefile src/tools/other/gis_utils/Makefile src/tools/other/ioda2nc/Makefile src/tools/other/madis2nc/Makefile src/tools/other/mode_graphics/Makefile src/tools/other/modis_regrid/Makefile src/tools/other/pb2nc/Makefile src/tools/other/plot_data_plane/Makefile src/tools/other/plot_point_obs/Makefile src/tools/other/wwmca_tool/Makefile src/tools/other/gsi_tools/Makefile src/tools/other/regrid_data_plane/Makefile src/tools/other/point2grid/Makefile src/tools/other/shift_data_plane/Makefile src/tools/other/mode_time_domain/Makefile src/tools/other/grid_diag/Makefile src/tools/tc_utils/Makefile src/tools/tc_utils/tc_dland/Makefile src/tools/tc_utils/tc_pairs/Makefile src/tools/tc_utils/tc_stat/Makefile src/tools/tc_utils/tc_gen/Makefile src/tools/tc_utils/rmw_analysis/Makefile src/tools/tc_utils/tc_rmw/Makefile"
+ac_config_files="$ac_config_files Makefile scripts/Rscripts/Makefile scripts/Rscripts/include/Makefile scripts/python/Makefile scripts/python/examples/Makefile scripts/python/met/Makefile scripts/python/pyembed/Makefile scripts/python/utility/Makefile data/Makefile data/climo/Makefile data/climo/seeps/Makefile data/colortables/Makefile data/colortables/NCL_colortables/Makefile data/config/Makefile data/map/Makefile data/map/admin_by_country/Makefile data/poly/Makefile data/poly/HMT_masks/Makefile data/poly/NCEP_masks/Makefile data/ps/Makefile data/table_files/Makefile data/tc_data/Makefile src/Makefile src/basic/Makefile src/basic/enum_to_string/Makefile src/basic/vx_cal/Makefile src/basic/vx_config/Makefile src/basic/vx_log/Makefile src/basic/vx_math/Makefile src/basic/vx_util/Makefile src/basic/vx_util_math/Makefile src/libcode/Makefile src/libcode/vx_afm/Makefile src/libcode/vx_analysis_util/Makefile src/libcode/vx_color/Makefile src/libcode/vx_data2d/Makefile src/libcode/vx_data2d_factory/Makefile src/libcode/vx_data2d_grib/Makefile src/libcode/vx_data2d_grib2/Makefile src/libcode/vx_data2d_nc_met/Makefile src/libcode/vx_data2d_nc_pinterp/Makefile src/libcode/vx_data2d_nccf/Makefile src/libcode/vx_geodesy/Makefile src/libcode/vx_gis/Makefile src/libcode/vx_gnomon/Makefile src/libcode/vx_grid/Makefile src/libcode/vx_gsl_prob/Makefile src/libcode/vx_nav/Makefile src/libcode/vx_solar/Makefile src/libcode/vx_nc_obs/Makefile src/libcode/vx_nc_util/Makefile src/libcode/vx_pb_util/Makefile src/libcode/vx_plot_util/Makefile src/libcode/vx_ps/Makefile src/libcode/vx_pxm/Makefile src/libcode/vx_render/Makefile src/libcode/vx_shapedata/Makefile src/libcode/vx_stat_out/Makefile src/libcode/vx_statistics/Makefile src/libcode/vx_time_series/Makefile src/libcode/vx_physics/Makefile src/libcode/vx_series_data/Makefile src/libcode/vx_regrid/Makefile src/libcode/vx_tc_util/Makefile src/libcode/vx_summary/Makefile src/libcode/vx_python3_utils/Makefile src/libcode/vx_data2d_python/Makefile src/libcode/vx_bool_calc/Makefile src/libcode/vx_pointdata_python/Makefile src/libcode/vx_seeps/Makefile src/tools/Makefile src/tools/core/Makefile src/tools/core/ensemble_stat/Makefile src/tools/core/grid_stat/Makefile src/tools/core/mode/Makefile src/tools/core/mode_analysis/Makefile src/tools/core/pcp_combine/Makefile src/tools/core/point_stat/Makefile src/tools/core/series_analysis/Makefile src/tools/core/stat_analysis/Makefile src/tools/core/wavelet_stat/Makefile src/tools/other/Makefile src/tools/other/ascii2nc/Makefile src/tools/other/lidar2nc/Makefile src/tools/other/gen_ens_prod/Makefile src/tools/other/gen_vx_mask/Makefile src/tools/other/gis_utils/Makefile src/tools/other/ioda2nc/Makefile src/tools/other/madis2nc/Makefile src/tools/other/mode_graphics/Makefile src/tools/other/modis_regrid/Makefile src/tools/other/pb2nc/Makefile src/tools/other/plot_data_plane/Makefile src/tools/other/plot_point_obs/Makefile src/tools/other/wwmca_tool/Makefile src/tools/other/gsi_tools/Makefile src/tools/other/regrid_data_plane/Makefile src/tools/other/point2grid/Makefile src/tools/other/shift_data_plane/Makefile src/tools/other/mode_time_domain/Makefile src/tools/other/grid_diag/Makefile src/tools/tc_utils/Makefile src/tools/tc_utils/tc_dland/Makefile src/tools/tc_utils/tc_pairs/Makefile src/tools/tc_utils/tc_stat/Makefile src/tools/tc_utils/tc_gen/Makefile src/tools/tc_utils/rmw_analysis/Makefile src/tools/tc_utils/tc_rmw/Makefile"
if test -n "$MET_DEVELOPMENT"; then
@@ -9762,7 +9765,10 @@ do
"scripts/Rscripts/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/Rscripts/Makefile" ;;
"scripts/Rscripts/include/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/Rscripts/include/Makefile" ;;
"scripts/python/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/python/Makefile" ;;
- "scripts/utility/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/utility/Makefile" ;;
+ "scripts/python/examples/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/python/examples/Makefile" ;;
+ "scripts/python/met/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/python/met/Makefile" ;;
+ "scripts/python/pyembed/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/python/pyembed/Makefile" ;;
+ "scripts/python/utility/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/python/utility/Makefile" ;;
"data/Makefile") CONFIG_FILES="$CONFIG_FILES data/Makefile" ;;
"data/climo/Makefile") CONFIG_FILES="$CONFIG_FILES data/climo/Makefile" ;;
"data/climo/seeps/Makefile") CONFIG_FILES="$CONFIG_FILES data/climo/seeps/Makefile" ;;
@@ -9774,7 +9780,6 @@ do
"data/poly/Makefile") CONFIG_FILES="$CONFIG_FILES data/poly/Makefile" ;;
"data/poly/HMT_masks/Makefile") CONFIG_FILES="$CONFIG_FILES data/poly/HMT_masks/Makefile" ;;
"data/poly/NCEP_masks/Makefile") CONFIG_FILES="$CONFIG_FILES data/poly/NCEP_masks/Makefile" ;;
- "data/wrappers/Makefile") CONFIG_FILES="$CONFIG_FILES data/wrappers/Makefile" ;;
"data/ps/Makefile") CONFIG_FILES="$CONFIG_FILES data/ps/Makefile" ;;
"data/table_files/Makefile") CONFIG_FILES="$CONFIG_FILES data/table_files/Makefile" ;;
"data/tc_data/Makefile") CONFIG_FILES="$CONFIG_FILES data/tc_data/Makefile" ;;
diff --git a/configure.ac b/configure.ac
index 60cc07bcd7..e54e5ea3b3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1086,6 +1086,9 @@ AM_CONDITIONAL([ENABLE_DEVELOPMENT], [test -n "$MET_DEVELOPMENT"])
CPPFLAGS=$CPPFLAGS' -DMET_BASE="\"$(pkgdatadir)\""'
+# Add -std=c++11 to CXXFLAGS
+CXXFLAGS=$CXXFLAGS' -std=c++11'
+
# Define other variables for the makefiles
AC_SUBST(FC_LIBS, [-lgfortran])
@@ -1204,7 +1207,10 @@ AC_CONFIG_FILES([Makefile
scripts/Rscripts/Makefile
scripts/Rscripts/include/Makefile
scripts/python/Makefile
- scripts/utility/Makefile
+ scripts/python/examples/Makefile
+ scripts/python/met/Makefile
+ scripts/python/pyembed/Makefile
+ scripts/python/utility/Makefile
data/Makefile
data/climo/Makefile
data/climo/seeps/Makefile
@@ -1216,7 +1222,6 @@ AC_CONFIG_FILES([Makefile
data/poly/Makefile
data/poly/HMT_masks/Makefile
data/poly/NCEP_masks/Makefile
- data/wrappers/Makefile
data/ps/Makefile
data/table_files/Makefile
data/tc_data/Makefile
diff --git a/data/Makefile.am b/data/Makefile.am
index ac1483ba59..6cebff2c79 100644
--- a/data/Makefile.am
+++ b/data/Makefile.am
@@ -24,8 +24,7 @@ SUBDIRS = \
poly \
ps \
table_files \
- tc_data \
- wrappers
+ tc_data
topdir = $(pkgdatadir)
diff --git a/data/Makefile.in b/data/Makefile.in
index acf549d2de..2bf69df44f 100644
--- a/data/Makefile.in
+++ b/data/Makefile.in
@@ -362,8 +362,7 @@ SUBDIRS = \
poly \
ps \
table_files \
- tc_data \
- wrappers
+ tc_data
topdir = $(pkgdatadir)
top_DATA = \
diff --git a/data/wrappers/read_tmp_dataplane.py b/data/wrappers/read_tmp_dataplane.py
deleted file mode 100644
index 98bbe728d8..0000000000
--- a/data/wrappers/read_tmp_dataplane.py
+++ /dev/null
@@ -1,37 +0,0 @@
-########################################################################
-#
-# Reads temporary file into memory.
-#
-# usage: /path/to/python read_tmp_dataplane.py dataplane.tmp
-#
-########################################################################
-
-import sys
-import numpy as np
-import netCDF4 as nc
-
-met_info = {}
-netcdf_filename = sys.argv[1]
-
-# read NetCDF file
-ds = nc.Dataset(netcdf_filename, 'r')
-met_data = ds['met_data'][:]
-met_attrs = {}
-
-# grid is defined as a dictionary or string
-grid = {}
-for attr, attr_val in ds.__dict__.items():
- if 'grid.' in attr:
- grid_attr = attr.split('.')[1]
- grid[grid_attr] = attr_val
- else:
- met_attrs[attr] = attr_val
-
-if grid:
- met_attrs['grid'] = grid
-
-met_attrs['name'] = met_attrs['name_str']
-del met_attrs['name_str']
-met_info['met_data'] = met_data
-met_info['attrs'] = met_attrs
-
diff --git a/data/wrappers/read_tmp_point_nc.py b/data/wrappers/read_tmp_point_nc.py
deleted file mode 100644
index 0ef8eefc3a..0000000000
--- a/data/wrappers/read_tmp_point_nc.py
+++ /dev/null
@@ -1,26 +0,0 @@
-########################################################################
-#
-# Reads temporary point obs. file into memory.
-#
-# usage: /path/to/python read_tmp_point_nc.py tmp_output_filename
-#
-########################################################################
-
-import os
-import sys
-
-# add share/met/python directory to system path to find met_point_obs
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
- os.pardir, 'python')))
-from met_point_obs import met_point_obs
-from met_point_obs_nc import nc_point_obs
-
-netcdf_filename = sys.argv[1]
-
-# read NetCDF file
-print('{p} reading{f}'.format(p=met_point_obs.get_prompt(), f=netcdf_filename))
-point_obs_data = nc_point_obs()
-point_obs_data.read_data(netcdf_filename)
-
-met_point_data = point_obs_data.get_point_data()
-met_point_data['met_point_data'] = point_obs_data
diff --git a/data/wrappers/write_tmp_dataplane.py b/data/wrappers/write_tmp_dataplane.py
deleted file mode 100644
index 476d2348c3..0000000000
--- a/data/wrappers/write_tmp_dataplane.py
+++ /dev/null
@@ -1,75 +0,0 @@
-########################################################################
-#
-# Adapted from a script provided by George McCabe
-# Adapted by Randy Bullock
-#
-# usage: /path/to/python write_tmp_dataplane.py \
-# tmp_output_filename .py
-#
-########################################################################
-
-import os
-import sys
-import importlib.util
-import netCDF4 as nc
-
-print("Python Script:\t" + repr(sys.argv[0]))
-print("User Command:\t" + repr(' '.join(sys.argv[2:])))
-print("Temporary File:\t" + repr(sys.argv[1]))
-
-netcdf_filename = sys.argv[1]
-pyembed_module_name = sys.argv[2]
-sys.argv = sys.argv[2:]
-
-# add share/met/python directory to system path to find met_point_obs
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
- os.pardir, 'python')))
-
-# append user script dir to system path
-pyembed_dir, pyembed_file = os.path.split(pyembed_module_name)
-if pyembed_dir:
- sys.path.insert(0, pyembed_dir)
-
-if not pyembed_module_name.endswith('.py'):
- pyembed_module_name += '.py'
-
-user_base = os.path.basename(pyembed_module_name).replace('.py','')
-
-spec = importlib.util.spec_from_file_location(user_base, pyembed_module_name)
-met_in = importlib.util.module_from_spec(spec)
-spec.loader.exec_module(met_in)
-
-met_info = {'met_data': met_in.met_data}
-if hasattr(met_in.met_data, 'attrs') and met_in.met_data.attrs:
- attrs = met_in.met_data.attrs
-else:
- attrs = met_in.attrs
-met_info['attrs'] = attrs
-
-# determine fill value
-try:
- fill = met_data.get_fill_value()
-except:
- fill = -9999.
-
-# write NetCDF file
-ds = nc.Dataset(netcdf_filename, 'w')
-
-# create dimensions and variable
-nx, ny = met_in.met_data.shape
-ds.createDimension('x', nx)
-ds.createDimension('y', ny)
-dp = ds.createVariable('met_data', met_in.met_data.dtype, ('x', 'y'), fill_value=fill)
-dp[:] = met_in.met_data
-
-# append attributes
-for attr, attr_val in met_info['attrs'].items():
- if attr == 'name':
- setattr(ds, 'name_str', attr_val)
- elif type(attr_val) == dict:
- for key in attr_val:
- setattr(ds, attr + '.' + key, attr_val[key])
- else:
- setattr(ds, attr, attr_val)
-
-ds.close()
diff --git a/data/wrappers/write_tmp_mpr.py b/data/wrappers/write_tmp_mpr.py
deleted file mode 100644
index 3eee0379f5..0000000000
--- a/data/wrappers/write_tmp_mpr.py
+++ /dev/null
@@ -1,43 +0,0 @@
-########################################################################
-#
-# Adapted from a script provided by George McCabe
-# Adapted by Randy Bullock
-#
-# usage: /path/to/python write_tmp_mpr.py \
-# tmp_output_filename .py
-#
-########################################################################
-
-import os
-import sys
-import importlib.util
-
-print("Python Script:\t" + repr(sys.argv[0]))
-print("User Command:\t" + repr(' '.join(sys.argv[2:])))
-print("Temporary File:\t" + repr(sys.argv[1]))
-
-tmp_filename = sys.argv[1]
-pyembed_module_name = sys.argv[2]
-sys.argv = sys.argv[2:]
-
-# add share/met/python directory to system path to find met_point_obs
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
- os.pardir, 'python')))
-
-# append user script dir to system path
-pyembed_dir, pyembed_file = os.path.split(pyembed_module_name)
-if pyembed_dir:
- sys.path.insert(0, pyembed_dir)
-
-if not pyembed_module_name.endswith('.py'):
- pyembed_module_name += '.py'
-
-user_base = os.path.basename(pyembed_module_name).replace('.py','')
-
-spec = importlib.util.spec_from_file_location(user_base, pyembed_module_name)
-met_in = importlib.util.module_from_spec(spec)
-spec.loader.exec_module(met_in)
-
-f = open(tmp_filename, 'w')
-for line in met_in.mpr_data:
- f.write(str(line) + '\n')
diff --git a/data/wrappers/write_tmp_point.py b/data/wrappers/write_tmp_point.py
deleted file mode 100644
index 916fca5549..0000000000
--- a/data/wrappers/write_tmp_point.py
+++ /dev/null
@@ -1,43 +0,0 @@
-########################################################################
-#
-# Adapted from a script provided by George McCabe
-# Adapted by Randy Bullock
-#
-# usage: /path/to/python write_tmp_point.py \
-# tmp_output_filename .py
-#
-########################################################################
-
-import os
-import sys
-import importlib.util
-
-print("Python Script:\t" + repr(sys.argv[0]))
-print("User Command:\t" + repr(' '.join(sys.argv[2:])))
-print("Temporary File:\t" + repr(sys.argv[1]))
-
-tmp_filename = sys.argv[1]
-pyembed_module_name = sys.argv[2]
-sys.argv = sys.argv[2:]
-
-# add share/met/python directory to system path to find met_point_obs
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
- os.pardir, 'python')))
-
-# append user script dir to system path
-pyembed_dir, pyembed_file = os.path.split(pyembed_module_name)
-if pyembed_dir:
- sys.path.insert(0, pyembed_dir)
-
-if not pyembed_module_name.endswith('.py'):
- pyembed_module_name += '.py'
-
-user_base = os.path.basename(pyembed_module_name).replace('.py','')
-
-spec = importlib.util.spec_from_file_location(user_base, pyembed_module_name)
-met_in = importlib.util.module_from_spec(spec)
-spec.loader.exec_module(met_in)
-
-f = open(tmp_filename, 'w')
-for line in met_in.point_data:
- f.write(str(line) + '\n')
diff --git a/data/wrappers/write_tmp_point_nc.py b/data/wrappers/write_tmp_point_nc.py
deleted file mode 100644
index 063a2e98cc..0000000000
--- a/data/wrappers/write_tmp_point_nc.py
+++ /dev/null
@@ -1,55 +0,0 @@
-########################################################################
-#
-# Adapted from a script provided by George McCabe
-# Adapted by Howard Soh
-#
-# usage: /path/to/python write_tmp_point_nc.py \
-# tmp_output_filename .py
-#
-########################################################################
-
-import os
-import sys
-import importlib.util
-
-# add share/met/python directory to system path to find met_point_obs
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
- os.pardir, 'python')))
-
-from met_point_obs import met_point_obs
-from met_point_obs_nc import nc_point_obs
-
-PROMPT = met_point_obs.get_prompt()
-print("{p} Python Script:\t".format(p=PROMPT) + repr(sys.argv[0]))
-print("{p} User Command:\t".format(p=PROMPT) + repr(' '.join(sys.argv[2:])))
-print("{p} Temporary File:\t".format(p=PROMPT) + repr(sys.argv[1]))
-
-tmp_filename = sys.argv[1]
-pyembed_module_name = sys.argv[2]
-sys.argv = sys.argv[2:]
-
-# append user script dir to system path
-pyembed_dir, pyembed_file = os.path.split(pyembed_module_name)
-if pyembed_dir:
- sys.path.insert(0, pyembed_dir)
-
-if not pyembed_module_name.endswith('.py'):
- pyembed_module_name += '.py'
-
-user_base = os.path.basename(pyembed_module_name).replace('.py','')
-
-spec = importlib.util.spec_from_file_location(user_base, pyembed_module_name)
-met_in = importlib.util.module_from_spec(spec)
-spec.loader.exec_module(met_in)
-
-if hasattr(met_in, 'point_obs_data'):
- met_in.point_obs_data.save_ncfile(tmp_filename)
-else:
- if hasattr(met_in.met_point_data, 'point_obs_data'):
- met_in.met_point_data['point_obs_data'].save_ncfile(tmp_filename)
- else:
- tmp_point_obs = nc_point_obs()
- tmp_point_obs.put_data(met_in.met_point_data)
- tmp_point_obs.save_ncfile(tmp_filename)
-
-#print('{p} writing {f}'.format(p=PROMPT, f=tmp_filename))
diff --git a/docs/Users_Guide/appendixA.rst b/docs/Users_Guide/appendixA.rst
index 6fe28f380f..099498402c 100644
--- a/docs/Users_Guide/appendixA.rst
+++ b/docs/Users_Guide/appendixA.rst
@@ -12,240 +12,252 @@ File-IO
Q. How do I improve the speed of MET tools using Gen-Vx-Mask?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ .. dropdown:: Answer
+
+ The main reason to run gen_vx_mask is to make the MET
+ statistics tools (e.g. point_stat, grid_stat, or ensemble_stat) run
+ faster. The verification masking regions in those tools can be specified
+ as Lat/Lon polyline files or the NetCDF output of gen_vx_mask. However,
+ determining which grid points are inside/outside a polyline region can be
+ slow if the polyline contains many points or the grid is dense. Running
+ gen_vx_mask once to create a binary mask is much more efficient than
+ recomputing the mask when each MET statistics tool is run. If the polyline
+ only contains a small number of points or the grid is sparse running
+ gen_vx_mask first would only save a second or two.
-A.
-The main reason to run gen_vx_mask is to make the MET
-statistics tools (e.g. point_stat, grid_stat, or ensemble_stat) run
-faster. The verification masking regions in those tools can be specified
-as Lat/Lon polyline files or the NetCDF output of gen_vx_mask. However,
-determining which grid points are inside/outside a polyline region can be
-slow if the polyline contains many points or the grid is dense. Running
-gen_vx_mask once to create a binary mask is much more efficient than
-recomputing the mask when each MET statistics tool is run. If the polyline
-only contains a small number of points or the grid is sparse running
-gen_vx_mask first would only save a second or two.
-
+
Q. How do I use map_data?
-^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-The MET repository includes several map data files. Users can modify which
-map datasets are included in the plots created by modifying the
-configuration files for those tools. The default map datasets are defined
-by the map_data dictionary in the ConfigMapData file.
+ .. dropdown:: Answer
+
+ The MET repository includes several map data files. Users can modify which
+ map datasets are included in the plots created by modifying the
+ configuration files for those tools. The default map datasets are defined
+ by the map_data dictionary in the ConfigMapData file.
-.. code-block:: none
+ .. code-block:: none
- map_data = {
+ map_data = {
- line_color = [ 25, 25, 25 ]; // rgb triple values, 0-255
- line_width = 0.5;
- line_dash = "";
+ line_color = [ 25, 25, 25 ]; // rgb triple values, 0-255
+ line_width = 0.5;
+ line_dash = "";
- source = [
- { file_name = "MET_BASE/map/country_data"; },
- { file_name = "MET_BASE/map/usa_state_data"; },
- { file_name = "MET_BASE/map/major_lakes_data"; }
- ];
- }
+ source = [
+ { file_name = "MET_BASE/map/country_data"; },
+ { file_name = "MET_BASE/map/usa_state_data"; },
+ { file_name = "MET_BASE/map/major_lakes_data"; }
+ ];
+ }
-Users can modify the ConfigMapData contents prior to running 'make install'.
-This will change the default map data for all of the MET tools which plots.
-Alternatively, users can copy/paste/modify the map_data dictionary into the
-configuration file for a MET tool. For example, you could add map_data to
-the end of the MODE configuration file to customize plots created by MODE.
+ Users can modify the ConfigMapData contents prior to running
+ 'make install'.
+ This will change the default map data for all of the MET tools which plots.
+ Alternatively, users can copy/paste/modify the map_data dictionary into the
+ configuration file for a MET tool. For example, you could add map_data to
+ the end of the MODE configuration file to customize plots created by MODE.
-Here is an example of running plot_data_plane and specifying the map_data
-in the configuration string on the command line:
+ Here is an example of running plot_data_plane and specifying the map_data
+ in the configuration string on the command line:
-.. code-block:: none
-
- plot_data_plane
- sample.grib china_tmp_2m_admin.ps \
- 'name="TMP"; level="Z2"; \
- map_data = { source = [ { file_name = \
- "${MET_BUILD_BASE}/data/map/admin_by_country/admin_China_data"; } \
- ]; }'
+ .. code-block:: none
+
+ plot_data_plane
+ sample.grib china_tmp_2m_admin.ps \
+ 'name="TMP"; level="Z2"; \
+ map_data = { source = [ { file_name = \
+ "${MET_BUILD_BASE}/data/map/admin_by_country/admin_China_data"; } \
+ ]; }'
Q. How can I understand the number of matched pairs?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Statistics are computed on matched forecast/observation pairs data.
-For example, if the dimension of the grid is 37x37 up to
-1369 matched pairs are possible. However, if the forecast or
-observation contains bad data at a point, that matched pair would
-not be included in the calculations. There are a number of reasons that
-observations could be rejected - mismatches in station id, variable names,
-valid times, bad values, data off the grid, etc.
-For example, if the forecast field contains missing data around the
-edge of the domain, then that is a reason there may be 992 matched pairs
-instead of 1369. Users can use the ncview tool to look at an example
-netCDF file or run their files through plot_data_plane to help identify
-any potential issues.
-
-One common support question is "Why am I getting 0 matched pairs from
-Point-Stat?". As mentioned above, there are many reasons why point
-observations can be excluded from your analysis. If running point_stat with
-at least verbosity level 2 (-v 2, the default value), zero matched pairs
-will result in the following type of log messages to be printed:
-
-.. code-block:: none
-
- DEBUG 2: Processing TMP/Z2 versus TMP/Z2, for observation type ADPSFC, over region FULL, for interpolation method UW_MEAN(1), using 0 pairs.
- DEBUG 2: Number of matched pairs = 0
- DEBUG 2: Observations processed = 1166
- DEBUG 2: Rejected: station id = 0
- DEBUG 2: Rejected: obs var name = 1166
- DEBUG 2: Rejected: valid time = 0
- DEBUG 2: Rejected: bad obs value = 0
- DEBUG 2: Rejected: off the grid = 0
- DEBUG 2: Rejected: topography = 0
- DEBUG 2: Rejected: level mismatch = 0
- DEBUG 2: Rejected: quality marker = 0
- DEBUG 2: Rejected: message type = 0
- DEBUG 2: Rejected: masking region = 0
- DEBUG 2: Rejected: bad fcst value = 0
- DEBUG 2: Rejected: bad climo mean = 0
- DEBUG 2: Rejected: bad climo stdev = 0
- DEBUG 2: Rejected: mpr filter = 0
- DEBUG 2: Rejected: duplicates = 0
-
-This list of the rejection reason counts above matches the order in
-which the filtering logic is applied in the code. In this example,
-none of the point observations match the variable name requested
-in the configuration file. So all of the 1166 observations are rejected
-for the same reason.
+ .. dropdown:: Answer
+
+ Statistics are computed on matched forecast/observation pairs data.
+ For example, if the dimension of the grid is 37x37 up to
+ 1369 matched pairs are possible. However, if the forecast or
+ observation contains bad data at a point, that matched pair would
+ not be included in the calculations. There are a number of reasons that
+ observations could be rejected - mismatches in station id, variable names,
+ valid times, bad values, data off the grid, etc.
+ For example, if the forecast field contains missing data around the
+ edge of the domain, then that is a reason there may be 992 matched pairs
+ instead of 1369. Users can use the ncview tool to look at an example
+ netCDF file or run their files through plot_data_plane to help identify
+ any potential issues.
+
+ One common support question is "Why am I getting 0 matched pairs from
+ Point-Stat?". As mentioned above, there are many reasons why point
+ observations can be excluded from your analysis. If running point_stat with
+ at least verbosity level 2 (-v 2, the default value), zero matched pairs
+ will result in the following type of log messages to be printed:
+
+ .. code-block:: none
+
+ DEBUG 2: Processing TMP/Z2 versus TMP/Z2, for observation type ADPSFC, over region FULL, for interpolation method UW_MEAN(1), using 0 pairs.
+ DEBUG 2: Number of matched pairs = 0
+ DEBUG 2: Observations processed = 1166
+ DEBUG 2: Rejected: station id = 0
+ DEBUG 2: Rejected: obs var name = 1166
+ DEBUG 2: Rejected: valid time = 0
+ DEBUG 2: Rejected: bad obs value = 0
+ DEBUG 2: Rejected: off the grid = 0
+ DEBUG 2: Rejected: topography = 0
+ DEBUG 2: Rejected: level mismatch = 0
+ DEBUG 2: Rejected: quality marker = 0
+ DEBUG 2: Rejected: message type = 0
+ DEBUG 2: Rejected: masking region = 0
+ DEBUG 2: Rejected: bad fcst value = 0
+ DEBUG 2: Rejected: bad climo mean = 0
+ DEBUG 2: Rejected: bad climo stdev = 0
+ DEBUG 2: Rejected: mpr filter = 0
+ DEBUG 2: Rejected: duplicates = 0
+
+ This list of the rejection reason counts above matches the order in
+ which the filtering logic is applied in the code. In this example,
+ none of the point observations match the variable name requested
+ in the configuration file. So all of the 1166 observations are rejected
+ for the same reason.
Q. What types of NetCDF files can MET read?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-There are three flavors of NetCDF that MET can read directly.
+ .. dropdown:: Answer
+
+ There are three flavors of NetCDF that MET can read directly.
-1. Gridded NetCDF output from one of the MET tools
+ 1. Gridded NetCDF output from one of the MET tools
-2. Output from the WRF model that has been post-processed using the wrf_interp utility
+ 2. Output from the WRF model that has been post-processed using
+ the wrf_interp utility
-3. NetCDF data following the `climate-forecast (CF) convention
- `_
+ 3. NetCDF data following the `climate-forecast (CF) convention
+ `_
-Lastly, users can write python scripts to pass data that's gridded to the
-MET tools in memory. If the data doesn't fall into one of those categories,
-then it's not a gridded dataset that MET can handle directly. Satellite data,
-in general, will not be gridded. Typically it contains a dense mesh of data at
-lat/lon points, but typically those lat/lon points are not evenly spaced onto
-a regular grid.
+ Lastly, users can write python scripts to pass data that's gridded to the
+ MET tools in memory. If the data doesn't fall into one of those categories,
+ then it's not a gridded dataset that MET can handle directly.
+ Satellite data, in general, will not be gridded. Typically it
+ contains a dense mesh of data at lat/lon points, but typically
+ those lat/lon points are not evenly spaced onto
+ a regular grid.
-While MET's point2grid tool does support some satellite data inputs, it is
-limited. Using python embedding is another option for handling new datasets
-not supported natively by MET.
+ While MET's point2grid tool does support some satellite data inputs, it is
+ limited. Using python embedding is another option for handling new datasets
+ not supported natively by MET.
Q. How do I choose a time slice in a NetCDF file?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-When processing NetCDF files, the level information needs to be
-specified to tell MET which 2D slice of data to use. The index is selected from
-a value when it starts with "@" for vertical level (pressure or height)
-and time. The actual time, @YYYYMMDD_HHMM, is allowed instead of selecting
-the time index.
-
-Let's use plot_data_plane as an example:
-
-.. code-block:: none
-
- plot_data_plane \
- MERGE_20161201_20170228.nc \
- obs.ps \
- 'name="APCP"; level="(5,*,*)";'
-
- plot_data_plane \
- gtg_obs_forecast.20130730.i00.f00.nc \
- altitude_20000.ps \
- 'name = "edr"; level = "(@20130730_0000,@20000,*,*)";'
-
-Assuming that the first array is the time, this will select the 6-th
-time slice of the APCP data and plot it since these indices are 0-based.
+ .. dropdown:: Answer
+
+ When processing NetCDF files, the level information needs to be
+ specified to tell MET which 2D slice of data to use.
+ The index is selected from
+ a value when it starts with "@" for vertical level (pressure or height)
+ and time. The actual time, @YYYYMMDD_HHMM, is allowed instead of selecting
+ the time index.
+
+ Let's use plot_data_plane as an example:
+
+ .. code-block:: none
+
+ plot_data_plane \
+ MERGE_20161201_20170228.nc \
+ obs.ps \
+ 'name="APCP"; level="(5,*,*)";'
+
+ plot_data_plane \
+ gtg_obs_forecast.20130730.i00.f00.nc \
+ altitude_20000.ps \
+ 'name = "edr"; level = "(@20130730_0000,@20000,*,*)";'
+
+ Assuming that the first array is the time, this will select the 6-th
+ time slice of the APCP data and plot it since these indices are 0-based.
Q. How do I use the UNIX time conversion?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Regarding the timing information in the NetCDF variable attributes:
+ .. dropdown:: Answer
+
+ Regarding the timing information in the NetCDF variable attributes:
-.. code-block:: none
-
- APCP_24:init_time_ut = 1306886400 ;
-
-“ut” stands for UNIX time, which is the number of seconds
-since Jan 1, 1970. It is a convenient way of storing timing
-information since it is easy to add/subtract. The UNIX date command
-can be used to convert back/forth between unix time and time strings:
+ .. code-block:: none
-To convert unix time to ymd_hms date:
+ APCP_24:init_time_ut = 1306886400 ;
-.. code-block:: none
-
- date -ud '1970-01-01 UTC '1306886400' seconds' +%Y%m%d_%H%M%S 20110601_000000
-
-To convert ymd_hms to unix date:
-
-.. code-block:: none
-
- date -ud ''2011-06-01' UTC '00:00:00'' +%s 1306886400
-
-Regarding TRMM data, it may be easier to work with the binary data and
-use the trmm2nc.R script described on this
-`page `_
-under observation datasets.
-
-Follow the TRMM binary links to either the 3 or 24-hour accumulations,
-save the files, and run them through that script. That is faster
-and easier than trying to get an ASCII dump. That Rscript can also
-subset the TRMM data if needed. Look for the section of it titled
-"Output domain specification" and define the lat/lon's that needs
-to be included in the output.
-
-Q. Does MET use a fixed-width output format for its ASCII output files?
+ “ut” stands for UNIX time, which is the number of seconds
+ since Jan 1, 1970. It is a convenient way of storing timing
+ information since it is easy to add/subtract. The UNIX date command
+ can be used to convert back/forth between unix time and time strings:
+
+ To convert unix time to ymd_hms date:
+
+ .. code-block:: none
+
+ date -ud '1970-01-01 UTC '1306886400' seconds' +%Y%m%d_%H%M%S 20110601_000000
+
+ To convert ymd_hms to unix date:
+
+ .. code-block:: none
+
+ date -ud ''2011-06-01' UTC '00:00:00'' +%s 1306886400
+
+ Regarding TRMM data, it may be easier to work with the binary data and
+ use the trmm2nc.R script described on this
+ `page `_
+ under observation datasets.
+
+ Follow the TRMM binary links to either the 3 or 24-hour accumulations,
+ save the files, and run them through that script. That is faster
+ and easier than trying to get an ASCII dump. That Rscript can also
+ subset the TRMM data if needed. Look for the section of it titled
+ "Output domain specification" and define the lat/lon's that needs
+ to be included in the output.
+
+Q. Does MET use a fixed-width output format for its ASCII output files?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-MET does not use the Fortran-like fixed width format in its
-ASCII output file. Instead, the column widths are adjusted for each
-run to insert at least one space between adjacent columns. The header
-columns of the MET output contain user-defined strings which may be
-of arbitrary length. For example, columns such as MODEL, OBTYPE, and
-DESC may be set by the user to any string value. Additionally, the
-amount of precision written is also configurable. The
-"output_precision" config file entry can be changed from its default
-value of 5 decimal places to up to 12 decimal places, which would also
-impact the column widths of the output.
-
-Due to these issues, it is not possible to select a reasonable fixed
-width for each column ahead of time. The AsciiTable class in MET does
-a lot of work to line up the output columns, to make sure there is
-at least one space between them.
-
-If a fixed-width format is needed, the easiest option would be
-writing a script to post-process the MET output into the fixed-width
-format that is needed or that the code expects.
+ .. dropdown:: Answer
+
+ MET does not use the Fortran-like fixed width format in its
+ ASCII output file. Instead, the column widths are adjusted for each
+ run to insert at least one space between adjacent columns. The header
+ columns of the MET output contain user-defined strings which may be
+ of arbitrary length. For example, columns such as MODEL, OBTYPE, and
+ DESC may be set by the user to any string value. Additionally, the
+ amount of precision written is also configurable. The
+ "output_precision" config file entry can be changed from its default
+ value of 5 decimal places to up to 12 decimal places, which would also
+ impact the column widths of the output.
+
+ Due to these issues, it is not possible to select a reasonable fixed
+ width for each column ahead of time. The AsciiTable class in MET does
+ a lot of work to line up the output columns, to make sure there is
+ at least one space between them.
+
+ If a fixed-width format is needed, the easiest option would be
+ writing a script to post-process the MET output into the fixed-width
+ format that is needed or that the code expects.
Q. Do the ASCII output files created by MET use scientific notation?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-By default, the ASCII output files created by MET make use of
-scientific notation when appropriate. The formatting of the
-numbers that the AsciiTable class writes is handled by a call
-to printf. The "%g" formatting option can result in
-scientific notation:
-http://www.cplusplus.com/reference/cstdio/printf/
+ .. dropdown:: Answer
+
+ By default, the ASCII output files created by MET make use of
+ scientific notation when appropriate. The formatting of the
+ numbers that the AsciiTable class writes is handled by a call
+ to printf. The "%g" formatting option can result in
+ scientific notation:
+ http://www.cplusplus.com/reference/cstdio/printf/
-It has been recommended that a configuration option be added to
-MET to disable the use of scientific notation. That enhancement
-is planned for a future release.
+ It has been recommended that a configuration option be added to
+ MET to disable the use of scientific notation. That enhancement
+ is planned for a future release.
Gen-Vx-Mask
-----------
@@ -253,67 +265,69 @@ Gen-Vx-Mask
Q. I have a list of stations to use for verification. I also have a poly region defined. If I specify both of these should the result be a union of them?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-These settings are defined in the "mask" section of the Point-Stat
-configuration file. You can define masking regions in one of 3 ways,
-as a "grid", a "poly" line file, or a "sid" list of station ID's.
+ .. dropdown:: Answer
+
+ These settings are defined in the "mask" section of the Point-Stat
+ configuration file. You can define masking regions in one of 3 ways,
+ as a "grid", a "poly" line file, or a "sid" list of station ID's.
-If you specify one entry for "poly" and one entry for "sid", you
-should see output for those two different masks. Note that each of
-these settings is an array of values, as indicated by the square
-brackets "[]" in the default config file. If you specify 5 grids,
-3 poly's, and 2 SID lists, you'd get output for those 10 separate
-masking regions. Point-Stat does not compute unions or intersections
-of masking regions. Instead, they are each processed separately.
+ If you specify one entry for "poly" and one entry for "sid", you
+ should see output for those two different masks. Note that each of
+ these settings is an array of values, as indicated by the square
+ brackets "[]" in the default config file. If you specify 5 grids,
+ 3 poly's, and 2 SID lists, you'd get output for those 10 separate
+ masking regions. Point-Stat does not compute unions or intersections
+ of masking regions. Instead, they are each processed separately.
-Is it true that you really want to use a polyline to define an area
-and then use a SID list to capture additional points outside of
-that polyline?
+ Is it true that you really want to use a polyline to define an area
+ and then use a SID list to capture additional points outside of
+ that polyline?
-If so, your options are:
+ If so, your options are:
-1. Define one single SID list which include all the points currently
- inside the polyline as well as the extra ones outside.
+ 1. Define one single SID list which include all the points currently
+ inside the polyline as well as the extra ones outside.
-2. Continue verifying using one polyline and one SID list and
- write partial sums and contingency table counts.
+ 2. Continue verifying using one polyline and one SID list and
+ write partial sums and contingency table counts.
-Then aggregate the results together by running a Stat-Analysis job.
+ Then aggregate the results together by running a Stat-Analysis job.
Q. How do I define a masking region with a GFS file?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Grab a sample GFS file:
+ .. dropdown:: Answer
+
+ Grab a sample GFS file:
+
+ .. code-block:: none
-.. code-block:: none
-
- wget
- http://www.ftp.ncep.noaa.gov/data/nccf/com/gfs/prod/gfs/2016102512/gfs.t12z.pgrb2.0p50.f000
-
-Use the MET regrid_data_plane tool to put some data on a
-lat/lon grid over Europe:
+ wget
+ http://www.ftp.ncep.noaa.gov/data/nccf/com/gfs/prod/gfs/2016102512/gfs.t12z.pgrb2.0p50.f000
-.. code-block:: none
+ Use the MET regrid_data_plane tool to put some data on a
+ lat/lon grid over Europe:
- regrid_data_plane gfs.t12z.pgrb2.0p50.f000 \
- 'latlon 100 100 25 0 0.5 0.5' gfs_euro.nc -field 'name="TMP"; level="Z2";'
+ .. code-block:: none
-Run the MET gen_vx_mask tool to apply your polyline to the European domain:
+ regrid_data_plane gfs.t12z.pgrb2.0p50.f000 \
+ 'latlon 100 100 25 0 0.5 0.5' gfs_euro.nc -field 'name="TMP"; level="Z2";'
-.. code-block:: none
+ Run the MET gen_vx_mask tool to apply your polyline to the European domain:
- gen_vx_mask gfs_euro.nc POLAND.poly POLAND_mask.nc
+ .. code-block:: none
-Run the MET plot_data_plane tool to display the resulting mask field:
+ gen_vx_mask gfs_euro.nc POLAND.poly POLAND_mask.nc
-.. code-block:: none
-
- plot_data_plane POLAND_mask.nc POLAND_mask.ps 'name="POLAND"; level="(*,*)";'
+ Run the MET plot_data_plane tool to display the resulting mask field:
-In this example, the mask is in roughly the right spot, but there
-are obvious problems with the latitude and longitude values used
-to define that mask for Poland.
+ .. code-block:: none
+
+ plot_data_plane POLAND_mask.nc POLAND_mask.ps 'name="POLAND"; level="(*,*)";'
+
+ In this example, the mask is in roughly the right spot, but there
+ are obvious problems with the latitude and longitude values used
+ to define that mask for Poland.
Grid-Stat
---------
@@ -321,277 +335,286 @@ Grid-Stat
Q. How do I define a complex masking region?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-A user can define intersections and unions of multiple fields to define masks.
-Prior to running Grid-Stat, the user can run the Gen-VX-Mask tool one or
-more times to define a more complex masking area by thresholding multiple
-fields.
+ .. dropdown:: Answer
+
+ A user can define intersections and unions of multiple fields to
+ define masks.
+ Prior to running Grid-Stat, the user can run the Gen-VX-Mask tool one or
+ more times to define a more complex masking area by thresholding multiple
+ fields.
-For example, using a forecast GRIB file (fcst.grb) which contains 2 records,
-one for 2-m temperature and a second for 6-hr accumulated precip. The only
-grid points that are desired are grid points below freezing with non-zero
-precip. The user should run Gen-Vx-Mask twice - once to define the
-temperature mask and a second time to intersect that with the precip mask:
+ For example, using a forecast GRIB file (fcst.grb) which contains
+ 2 records, one for 2-m temperature and a second for 6-hr
+ accumulated precip. The only
+ grid points that are desired are grid points below freezing with non-zero
+ precip. The user should run Gen-Vx-Mask twice - once to define the
+ temperature mask and a second time to intersect that with the precip mask:
-.. code-block:: none
+ .. code-block:: none
- gen_vx_mask fcst.grb fcst.grb tmp_mask.nc \
- -type data \
- -mask_field 'name="TMP"; level="Z2"' -thresh le273
- gen_vx_mask tmp_mask.nc fcst.grb tmp_and_precip_mask.nc \
- -type data \
- -input_field 'name="TMP_Z2"; level="(*,*)";' \
- -mask_field 'name="APCP"; level="A6";' -thresh gt0 \
- -intersection -name "FREEZING_PRECIP"
+ gen_vx_mask fcst.grb fcst.grb tmp_mask.nc \
+ -type data \
+ -mask_field 'name="TMP"; level="Z2"' -thresh le273
+ gen_vx_mask tmp_mask.nc fcst.grb tmp_and_precip_mask.nc \
+ -type data \
+ -input_field 'name="TMP_Z2"; level="(*,*)";' \
+ -mask_field 'name="APCP"; level="A6";' -thresh gt0 \
+ -intersection -name "FREEZING_PRECIP"
-The first one is pretty straight-forward.
+ The first one is pretty straight-forward.
-1. The input field (fcst.grb) defines the domain for the mask.
+ 1. The input field (fcst.grb) defines the domain for the mask.
-2. Since we're doing data masking and the data we want lives in
- fcst.grb, we pass it in again as the mask_file.
+ 2. Since we're doing data masking and the data we want lives in
+ fcst.grb, we pass it in again as the mask_file.
-3. Lastly "-mask_field" specifies the data we want from the mask file
- and "-thresh" specifies the event threshold.
+ 3. Lastly "-mask_field" specifies the data we want from the mask file
+ and "-thresh" specifies the event threshold.
-The second call is a bit tricky.
+ The second call is a bit tricky.
-1. Do data masking (-type data)
+ 1. Do data masking (-type data)
-2. Read the NetCDF variable named "TMP_Z2" from the input file (tmp_mask.nc)
+ 2. Read the NetCDF variable named "TMP_Z2" from the input file
+ (tmp_mask.nc)
-3. Define the mask by reading 6-hour precip from the mask file
- (fcst.grb) and looking for values > 0 (-mask_field)
+ 3. Define the mask by reading 6-hour precip from the mask file
+ (fcst.grb) and looking for values > 0 (-mask_field)
-4. Apply intersection logic when combining the "input" value with
- the "mask" value (-intersection).
+ 4. Apply intersection logic when combining the "input" value with
+ the "mask" value (-intersection).
-5. Name the output NetCDF variable as "FREEZING_PRECIP" (-name).
- This is totally optional, but convenient.
+ 5. Name the output NetCDF variable as "FREEZING_PRECIP" (-name).
+ This is totally optional, but convenient.
-A user can write a script with multiple calls to Gen-Vx-Mask to
-apply complex masking logic and then pass the output mask file
-to Grid-Stat in its configuration file.
+ A user can write a script with multiple calls to Gen-Vx-Mask to
+ apply complex masking logic and then pass the output mask file
+ to Grid-Stat in its configuration file.
Q. How do I use neighborhood methods to compute fraction skill score?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-A common application of fraction skill score (FSS) is comparing forecast
-and observed thunderstorms. When computing FSS, first threshold the fields
-to define events and non-events. Then look at successively larger and
-larger areas around each grid point to see how the forecast event frequency
-compares to the observed event frequency.
-
-Applying this method to rainfall (and monsoons) is also reasonable.
-Keep in mind that Grid-Stat is the tool that computes FSS. Grid-Stat will
-need to be run once for each evaluation time. As an example, evaluating
-once per day, run Grid-Stat 122 times for the 122 days of a monsoon season.
-This will result in 122 FSS values. These can be viewed as a time series,
-or the Stat-Analysis tool could be used to aggregate them together into
-a single FSS value, like this:
-
-.. code-block:: none
-
- stat_analysis -job aggregate -line_type NBRCNT \
- -lookin out/grid_stat
-
-Be sure to pick thresholds (e.g. for the thunderstorms and monsoons)
-that capture the "events" that are of interest in studying.
-
-Also be aware that MET uses the "vld_thresh" setting in the configuration
-file to decide how to handle data along the edge of the domain. Let us say
-it is computing a fractional coverage field using a 5x5 neighborhood
-and it is at the edge of the domain. 15 points contain valid data and
-10 points are outside the domain. Grid-Stat computes the valid data ratio
-as 15/25 = 0.6. Then it applies the valid data threshold. Suppose
-vld_thresh = 0.5. Since 0.6 > 0.5 MET will compute a fractional coverage
-value for that point using the 15 valid data points. Next suppose
-vld_thresh = 1.0. Since 0.6 is less than 1.0, MET will just skip that
-point by setting it to bad data.
-
-Setting vld_thresh = 1.0 will ensure that FSS will only be computed at
-points where all NxN values contain valid data. Setting it to 0.5 only
-requires half of them.
+ .. dropdown:: Answer
+
+ A common application of fraction skill score (FSS) is comparing forecast
+ and observed thunderstorms. When computing FSS, first threshold the fields
+ to define events and non-events. Then look at successively larger and
+ larger areas around each grid point to see how the forecast event frequency
+ compares to the observed event frequency.
+
+ Applying this method to rainfall (and monsoons) is also reasonable.
+ Keep in mind that Grid-Stat is the tool that computes FSS. Grid-Stat will
+ need to be run once for each evaluation time. As an example, evaluating
+ once per day, run Grid-Stat 122 times for the 122 days of a monsoon season.
+ This will result in 122 FSS values. These can be viewed as a time series,
+ or the Stat-Analysis tool could be used to aggregate them together into
+ a single FSS value, like this:
+
+ .. code-block:: none
+
+ stat_analysis -job aggregate -line_type NBRCNT \
+ -lookin out/grid_stat
+
+ Be sure to pick thresholds (e.g. for the thunderstorms and monsoons)
+ that capture the "events" that are of interest in studying.
+
+ Also be aware that MET uses the "vld_thresh" setting in the configuration
+ file to decide how to handle data along the edge of the domain. Let us say
+ it is computing a fractional coverage field using a 5x5 neighborhood
+ and it is at the edge of the domain. 15 points contain valid data and
+ 10 points are outside the domain. Grid-Stat computes the valid data ratio
+ as 15/25 = 0.6. Then it applies the valid data threshold. Suppose
+ vld_thresh = 0.5. Since 0.6 > 0.5 MET will compute a fractional coverage
+ value for that point using the 15 valid data points. Next suppose
+ vld_thresh = 1.0. Since 0.6 is less than 1.0, MET will just skip that
+ point by setting it to bad data.
+
+ Setting vld_thresh = 1.0 will ensure that FSS will only be computed at
+ points where all NxN values contain valid data. Setting it to 0.5 only
+ requires half of them.
Q. Is an example of verifying forecast probabilities?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-There is an example of verifying probabilities in the test scripts
-included with the MET release. Take a look in:
-
-.. code-block:: none
-
- ${MET_BUILD_BASE}/scripts/config/GridStatConfig_POP_12
-
-The config file should look something like this:
-
-.. code-block:: none
-
- fcst = {
- wind_thresh = [ NA ];
- field = [
- {
- name = "LCDC";
- level = [ "L0" ];
- prob = TRUE;
- cat_thresh = [ >=0.0, >=0.1, >=0.2, >=0.3, >=0.4, >=0.5, >=0.6, >=0.7, >=0.8, >=0.9];
- }
- ];
- };
+ .. dropdown:: Answer
- obs = {
- wind_thresh = [ NA ];
- field = [
- {
- name = "WIND";
- level = [ "Z2" ];
- cat_thresh = [ >=34 ];
- }
- ];
- };
-
-The PROB flag is set to TRUE to tell grid_stat to process this as
-probability data. The cat_thresh is set to partition the probability
-values between 0 and 1. Note that if the probability data contains
-values from 0 to 100, MET automatically divides by 100 to rescale to
-the 0 to 1 range.
+ There is an example of verifying probabilities in the test scripts
+ included with the MET release. Take a look in:
+
+ .. code-block:: none
+
+ ${MET_BUILD_BASE}/scripts/config/GridStatConfig_POP_12
+
+ The config file should look something like this:
+
+ .. code-block:: none
+
+ fcst = {
+ wind_thresh = [ NA ];
+ field = [
+ {
+ name = "LCDC";
+ level = [ "L0" ];
+ prob = TRUE;
+ cat_thresh = [ >=0.0, >=0.1, >=0.2, >=0.3, >=0.4, >=0.5, >=0.6, >=0.7, >=0.8, >=0.9];
+ }
+ ];
+ };
+
+ obs = {
+ wind_thresh = [ NA ];
+ field = [
+ {
+ name = "WIND";
+ level = [ "Z2" ];
+ cat_thresh = [ >=34 ];
+ }
+ ];
+ };
+
+ The PROB flag is set to TRUE to tell grid_stat to process this as
+ probability data. The cat_thresh is set to partition the probability
+ values between 0 and 1. Note that if the probability data contains
+ values from 0 to 100, MET automatically divides by 100 to rescale to
+ the 0 to 1 range.
Q. What is an example of using Grid-Stat with regridding and masking turned on?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Run Grid-Stat using the following commands and the attached config file
-
-.. code-block:: none
-
- mkdir out
- grid_stat \
- gfs_4_20160220_0000_012.grb2 \
- ST4.2016022012.06h \
- GridStatConfig \
- -outdir out
-
-Note the following two sections of the Grid-Stat config file:
-
-.. code-block:: none
-
- regrid = {
- to_grid = OBS;
- vld_thresh = 0.5;
- method = BUDGET;
- width = 2;
- }
-
-This tells Grid-Stat to do verification on the "observation" grid.
-Grid-Stat reads the GFS and Stage4 data and then automatically regrids
-the GFS data to the Stage4 domain using budget interpolation.
-Use "FCST" to verify the forecast domain. And use either a named
-grid or a grid specification string to regrid both the forecast and
-observation to a common grid. For example, to_grid = "G212"; will
-regrid both to NCEP Grid 212 before comparing them.
-
-.. code-block:: none
-
- mask = { grid = [ "FULL" ];
- poly = [ "MET_BASE/poly/CONUS.poly" ]; }
+ .. dropdown:: Answer
-This will compute statistics over the FULL model domain as well
-as the CONUS masking area.
+ Run Grid-Stat using the following commands and the attached config file
+
+ .. code-block:: none
+
+ mkdir out
+ grid_stat \
+ gfs_4_20160220_0000_012.grb2 \
+ ST4.2016022012.06h \
+ GridStatConfig \
+ -outdir out
+
+ Note the following two sections of the Grid-Stat config file:
+
+ .. code-block:: none
-To demonstrate that Grid-Stat worked as expected, run the following
-commands to plot its NetCDF matched pairs output file:
+ regrid = {
+ to_grid = OBS;
+ vld_thresh = 0.5;
+ method = BUDGET;
+ width = 2;
+ }
-.. code-block:: none
-
- plot_data_plane \
- out/grid_stat_120000L_20160220_120000V_pairs.nc \
- out/DIFF_APCP_06_A06_APCP_06_A06_CONUS.ps \
- 'name="DIFF_APCP_06_A06_APCP_06_A06_CONUS"; level="(*,*)";'
+ This tells Grid-Stat to do verification on the "observation" grid.
+ Grid-Stat reads the GFS and Stage4 data and then automatically regrids
+ the GFS data to the Stage4 domain using budget interpolation.
+ Use "FCST" to verify the forecast domain. And use either a named
+ grid or a grid specification string to regrid both the forecast and
+ observation to a common grid. For example, to_grid = "G212"; will
+ regrid both to NCEP Grid 212 before comparing them.
-Examine the resulting plot of that difference field.
+ .. code-block:: none
-Lastly, there is another option for defining that masking region.
-Rather than passing the ascii CONUS.poly file to grid_stat, run the
-gen_vx_mask tool and pass the NetCDF output of that tool to grid_stat.
-The advantage to gen_vx_mask is that it will make grid_stat run a
-bit faster. It can be used to construct much more complex masking areas.
+ mask = { grid = [ "FULL" ];
+ poly = [ "MET_BASE/poly/CONUS.poly" ]; }
+
+ This will compute statistics over the FULL model domain as well
+ as the CONUS masking area.
+
+ To demonstrate that Grid-Stat worked as expected, run the following
+ commands to plot its NetCDF matched pairs output file:
+
+ .. code-block:: none
+
+ plot_data_plane \
+ out/grid_stat_120000L_20160220_120000V_pairs.nc \
+ out/DIFF_APCP_06_A06_APCP_06_A06_CONUS.ps \
+ 'name="DIFF_APCP_06_A06_APCP_06_A06_CONUS"; level="(*,*)";'
+
+ Examine the resulting plot of that difference field.
+
+ Lastly, there is another option for defining that masking region.
+ Rather than passing the ascii CONUS.poly file to grid_stat, run the
+ gen_vx_mask tool and pass the NetCDF output of that tool to grid_stat.
+ The advantage to gen_vx_mask is that it will make grid_stat run a
+ bit faster. It can be used to construct much more complex masking areas.
Q. How do I use one mask for the forecast field and a different mask for the observation field?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-You can't define different
-masks for the forecast and observation fields in MET tools. MET only lets you
-define a single mask (a masking grid or polyline) and then you choose
-whether you want to apply it to the FCST, OBS, or BOTH of them.
-
-Nonetheless, there is a way you can accomplish this logic using the
-gen_vx_mask tool. You run it once to pre-process the forecast field
-and a second time to pre-process the observation field. And then pass
-those output files to your desired MET tool.
-
-Below is an example using sample data that is included with the MET
-release tarball. To illustrate, this command will read 3-hour
-precip and 2-meter temperature, and resets the precip at any grid
-point where the temperature is less than 290 K to a value of 0:
-
-.. code-block:: none
-
- gen_vx_mask \
- data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 \
- data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 \
- APCP_03_where_2m_TMPge290.nc \
- -type data \
- -input_field 'name="APCP"; level="A3";' \
- -mask_field 'name="TMP"; level="Z2";' \
- -thresh 'lt290&&ne-9999' -v 4 -value 0
+ .. dropdown:: Answer
-So this is a bit confusing. Here's what is happening:
+ You can't define different
+ masks for the forecast and observation fields in MET tools.
+ MET only lets you
+ define a single mask (a masking grid or polyline) and then you choose
+ whether you want to apply it to the FCST, OBS, or BOTH of them.
-* The first argument is the input file which defines the grid.
+ Nonetheless, there is a way you can accomplish this logic using the
+ gen_vx_mask tool. You run it once to pre-process the forecast field
+ and a second time to pre-process the observation field. And then pass
+ those output files to your desired MET tool.
-* The second argument is used to define the masking region and
- since I'm reading data from the same input file, I've listed
- that file twice.
+ Below is an example using sample data that is included with the MET
+ release tarball. To illustrate, this command will read 3-hour
+ precip and 2-meter temperature, and resets the precip at any grid
+ point where the temperature is less than 290 K to a value of 0:
-* The third argument is the output file name.
+ .. code-block:: none
-* The type of masking is "data" masking where we read a 2D field of
- data and apply a threshold.
+ gen_vx_mask \
+ data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 \
+ data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 \
+ APCP_03_where_2m_TMPge290.nc \
+ -type data \
+ -input_field 'name="APCP"; level="A3";' \
+ -mask_field 'name="TMP"; level="Z2";' \
+ -thresh 'lt290&&ne-9999' -v 4 -value 0
-* By default, gen_vx_mask initializes each grid point to a value
- of 0. Specifying "-input_field" tells it to initialize each grid
- point to the value of that field (in my example 3-hour precip).
-
-* The "-mask_field" option defines the data field that should be
- thresholded.
+ So this is a bit confusing. Here's what is happening:
-* The "-thresh" option defines the threshold to be applied.
-
-* The "-value" option tells it what "mask" value to write to the
- output, and I've chosen 0.
+ * The first argument is the input file which defines the grid.
-The example threshold is less than 290 and not -9999 (which is MET's
-internal missing data value). So any grid point where the 2 meter
-temperature is less than 290 K and is not bad data will be replaced
-by a value of 0.
+ * The second argument is used to define the masking region and
+ since I'm reading data from the same input file, I've listed
+ that file twice.
-To more easily demonstrate this, I changed to using "-value 10" and ran
-the output through plot_data_plane:
+ * The third argument is the output file name.
-.. code-block:: none
-
- plot_data_plane \
- APCP_03_where_2m_TMPge290.nc \
- APCP_03_where_2m_TMPge290.ps \
- 'name="data_mask"; level="(*,*)";'
+ * The type of masking is "data" masking where we read a 2D field of
+ data and apply a threshold.
+
+ * By default, gen_vx_mask initializes each grid point to a value
+ of 0. Specifying "-input_field" tells it to initialize each grid
+ point to the value of that field (in my example 3-hour precip).
+
+ * The "-mask_field" option defines the data field that should be
+ thresholded.
+
+ * The "-thresh" option defines the threshold to be applied.
+
+ * The "-value" option tells it what "mask" value to write to the
+ output, and I've chosen 0.
+
+ The example threshold is less than 290 and not -9999 (which is MET's
+ internal missing data value). So any grid point where the 2 meter
+ temperature is less than 290 K and is not bad data will be replaced
+ by a value of 0.
+
+ To more easily demonstrate this, I changed to using "-value 10" and ran
+ the output through plot_data_plane:
+
+ .. code-block:: none
+
+ plot_data_plane \
+ APCP_03_where_2m_TMPge290.nc \
+ APCP_03_where_2m_TMPge290.ps \
+ 'name="data_mask"; level="(*,*)";'
-In the resulting plot, anywhere you see the pink value of 10, that's
-where gen_vx_mask has masked out the grid point.
+ In the resulting plot, anywhere you see the pink value of 10, that's
+ where gen_vx_mask has masked out the grid point.
Pcp-Combine
-----------
@@ -599,371 +622,384 @@ Pcp-Combine
Q. How do I add and subtract with Pcp-Combine?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-An example of running the MET pcp_combine tool to put NAM 3-hourly
-precipitation accumulations data into user-desired 3 hour intervals is
-provided below.
+ .. dropdown:: Answer
+
+ An example of running the MET pcp_combine tool to put NAM 3-hourly
+ precipitation accumulations data into user-desired 3 hour intervals is
+ provided below.
-If the user wanted a 0-3 hour accumulation, this is already available
-in the 03 UTC file. Run this file
-through pcp_combine as a pass-through to put it into NetCDF format:
+ If the user wanted a 0-3 hour accumulation, this is already available
+ in the 03 UTC file. Run this file
+ through pcp_combine as a pass-through to put it into NetCDF format:
-.. code-block:: none
-
- pcp_combine -add 03_file.grb 03 APCP_00_03.nc
-
-If the user wanted the 3-6 hour accumulation, they would subtract
-0-6 and 0-3 accumulations:
+ .. code-block:: none
-.. code-block:: none
-
- pcp_combine -subtract 06_file.grb 06 03_file.grb 03 APCP_03_06.nc
+ pcp_combine -add 03_file.grb 03 APCP_00_03.nc
+
+ If the user wanted the 3-6 hour accumulation, they would subtract
+ 0-6 and 0-3 accumulations:
-Similarly, if they wanted the 6-9 hour accumulation, they would
-subtract 0-9 and 0-6 accumulations:
+ .. code-block:: none
-.. code-block:: none
+ pcp_combine -subtract 06_file.grb 06 03_file.grb 03 APCP_03_06.nc
- pcp_combine -subtract 09_file.grb 09 06_file.grb 06 APCP_06_09.nc
+ Similarly, if they wanted the 6-9 hour accumulation, they would
+ subtract 0-9 and 0-6 accumulations:
-And so on.
+ .. code-block:: none
-Run the 0-3 and 12-15 through pcp_combine even though they already have
-the 3-hour accumulation. That way, all of the NAM files will be in the
-same file format, and can use the same configuration file settings for
-the other MET tools (grid_stat, mode, etc.). If the NAM files are a mix
-of GRIB and NetCDF, the logic would need to be a bit more complicated.
+ pcp_combine -subtract 09_file.grb 09 06_file.grb 06 APCP_06_09.nc
+
+ And so on.
+
+ Run the 0-3 and 12-15 through pcp_combine even though they already have
+ the 3-hour accumulation. That way, all of the NAM files will be in the
+ same file format, and can use the same configuration file settings for
+ the other MET tools (grid_stat, mode, etc.). If the NAM files are a mix
+ of GRIB and NetCDF, the logic would need to be a bit more complicated.
Q. How do I combine 12-hour accumulated precipitation from two different initialization times?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-The "-sum" command assumes the same initialization time. Use the "-add"
-option instead.
+ .. dropdown:: Answer
+
+ The "-sum" command assumes the same initialization time. Use the "-add"
+ option instead.
-.. code-block:: none
+ .. code-block:: none
- pcp_combine -add \
- WRFPRS_1997-06-03_APCP_A12.nc 'name="APCP_12"; level="(*,*)";' \
- WRFPRS_d01_1997-06-04_00_APCP_A12.grb 12 \
- Sum.nc
+ pcp_combine -add \
+ WRFPRS_1997-06-03_APCP_A12.nc 'name="APCP_12"; level="(*,*)";' \
+ WRFPRS_d01_1997-06-04_00_APCP_A12.grb 12 \
+ Sum.nc
-For the first file, list the file name followed by a config string
-describing the field to use from the NetCDF file. For the second file,
-list the file name followed by the accumulation interval to use
-(12 for 12 hours). The output file, Sum.nc, will contain the
-combine 12-hour accumulated precipitation.
+ For the first file, list the file name followed by a config string
+ describing the field to use from the NetCDF file. For the second file,
+ list the file name followed by the accumulation interval to use
+ (12 for 12 hours). The output file, Sum.nc, will contain the
+ combine 12-hour accumulated precipitation.
-Here is a small excerpt from the pcp_combine usage statement:
+ Here is a small excerpt from the pcp_combine usage statement:
-Note: For “-add” and "-subtract”, the accumulation intervals may be
-substituted with config file strings. For that first file, we replaced
-the accumulation interval with a config file string.
+ Note: For “-add” and "-subtract”, the accumulation intervals may be
+ substituted with config file strings. For that first file, we replaced
+ the accumulation interval with a config file string.
-Here are 3 commands you could use to plot these data files:
+ Here are 3 commands you could use to plot these data files:
-.. code-block:: none
+ .. code-block:: none
- plot_data_plane WRFPRS_1997-06-03_APCP_A12.nc \
- WRFPRS_1997-06-03_APCP_A12.ps 'name="APCP_12"; level="(*,*)";'
+ plot_data_plane WRFPRS_1997-06-03_APCP_A12.nc \
+ WRFPRS_1997-06-03_APCP_A12.ps 'name="APCP_12"; level="(*,*)";'
-.. code-block:: none
+ .. code-block:: none
- plot_data_plane WRFPRS_d01_1997-06-04_00_APCP_A12.grb \
- WRFPRS_d01_1997-06-04_00_APCP_A12.ps 'name="APCP" level="A12";'
+ plot_data_plane WRFPRS_d01_1997-06-04_00_APCP_A12.grb \
+ WRFPRS_d01_1997-06-04_00_APCP_A12.ps 'name="APCP" level="A12";'
-.. code-block:: none
+ .. code-block:: none
- plot_data_plane sum.nc sum.ps 'name="APCP_24"; level="(*,*)";'
+ plot_data_plane sum.nc sum.ps 'name="APCP_24"; level="(*,*)";'
Q. How do I correct a precipitation time range?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Typically, accumulated precipitation is stored in GRIB files using an
-accumulation interval with a "time range" indicator value of 4. Here is
-a description of the different time range indicator values and
-meanings: http://www.nco.ncep.noaa.gov/pmb/docs/on388/table5.html
+ .. dropdown:: Answer
+
+ Typically, accumulated precipitation is stored in GRIB files using an
+ accumulation interval with a "time range" indicator value of 4. Here is
+ a description of the different time range indicator values and
+ meanings: http://www.nco.ncep.noaa.gov/pmb/docs/on388/table5.html
-For example, take a look at the APCP in the GRIB files included in the
-MET tar ball:
+ For example, take a look at the APCP in the GRIB files included in the
+ MET tar ball:
-.. code-block:: none
+ .. code-block:: none
- wgrib ${MET_BUILD_BASE}/data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 | grep APCP
- 1:0:d=05080700:APCP:kpds5=61:kpds6=1:kpds7=0:TR=4:P1=0: \
- P2=12:TimeU=1:sfc:0- 12hr acc:NAve=0
- 2:31408:d=05080700:APCP:kpds5=61:kpds6=1:kpds7=0:TR=4: \
- P1=9:P2=12:TimeU=1:sfc:9- 12hr acc:NAve=0
+ wgrib ${MET_BUILD_BASE}/data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 | grep APCP
+ 1:0:d=05080700:APCP:kpds5=61:kpds6=1:kpds7=0:TR=4:P1=0: \
+ P2=12:TimeU=1:sfc:0- 12hr acc:NAve=0
+ 2:31408:d=05080700:APCP:kpds5=61:kpds6=1:kpds7=0:TR=4: \
+ P1=9:P2=12:TimeU=1:sfc:9- 12hr acc:NAve=0
-The "TR=4" indicates that these records contain an accumulation
-between times P1 and P2. In the first record, the precip is accumulated
-between 0 and 12 hours. In the second record, the precip is accumulated
-between 9 and 12 hours.
+ The "TR=4" indicates that these records contain an accumulation
+ between times P1 and P2. In the first record, the precip is accumulated
+ between 0 and 12 hours. In the second record, the precip is accumulated
+ between 9 and 12 hours.
-However, the GRIB data uses a time range indicator of 5, not 4.
+ However, the GRIB data uses a time range indicator of 5, not 4.
-.. code-block:: none
+ .. code-block:: none
- wgrib rmf_gra_2016040600.24 | grep APCP
- 291:28360360:d=16040600:APCP:kpds5=61:kpds6=1:kpds7=0: \
- TR=5:P1=0:P2=24:TimeU=1:sfc:0-24hr diff:NAve=0
+ wgrib rmf_gra_2016040600.24 | grep APCP
+ 291:28360360:d=16040600:APCP:kpds5=61:kpds6=1:kpds7=0: \
+ TR=5:P1=0:P2=24:TimeU=1:sfc:0-24hr diff:NAve=0
-pcp_combine is looking in "rmf_gra_2016040600.24" for a 24 hour
-*accumulation*, but since the time range indicator is no 4, it doesn't
-find a match.
+ pcp_combine is looking in "rmf_gra_2016040600.24" for a 24 hour
+ *accumulation*, but since the time range indicator is no 4, it doesn't
+ find a match.
-If possible switch the time range indicator to 4 on the GRIB files. If
-this is not possible, there is another workaround. Instead of telling
-pcp_combine to look for a particular accumulation interval, give it a
-more complete description of the chosen field to use from each file.
-Here is an example:
+ If possible switch the time range indicator to 4 on the GRIB files. If
+ this is not possible, there is another workaround. Instead of telling
+ pcp_combine to look for a particular accumulation interval, give it a
+ more complete description of the chosen field to use from each file.
+ Here is an example:
-.. code-block:: none
+ .. code-block:: none
- pcp_combine -add rmf_gra_2016040600.24 'name="APCP"; level="L0-24";' \
- rmf_gra_2016040600_APCP_00_24.nc
-
-The resulting file should have the accumulation listed at 24h rather than 0-24.
+ pcp_combine -add rmf_gra_2016040600.24 'name="APCP"; level="L0-24";' \
+ rmf_gra_2016040600_APCP_00_24.nc
+
+ The resulting file should have the accumulation listed at
+ 24h rather than 0-24.
Q. How do I use Pcp-Combine as a pass-through to simply reformat from GRIB to NetCDF or to change output variable name?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-The pcp_combine tool is typically used to modify the accumulation interval
-of precipitation amounts in model and/or analysis datasets. For example,
-when verifying model output in GRIB format containing runtime accumulations
-of precipitation, run the pcp_combine -subtract option every 6 hours to
-create 6-hourly precipitation amounts. In this example, it is not really
-necessary to run pcp_combine on the 6-hour GRIB forecast file since the
-model output already contains the 0 to 6 hour accumulation. However, the
-output of pcp_combine is typically passed to point_stat, grid_stat, or mode
-for verification. Having the 6-hour forecast in GRIB format and all other
-forecast hours in NetCDF format (output of pcp_combine) makes the logic
-for configuring the other MET tools messy. To make the configuration
-consistent for all forecast hours, one option is to choose to run
-pcp_combine as a pass-through to simply reformat from GRIB to NetCDF.
-Listed below is an example of passing a single record to the
-pcp_combine -add option to do the reformatting:
-
-.. code-block:: none
-
- $MET_BUILD/bin/pcp_combine -add forecast_F06.grb \
- 'name="APCP"; level="A6";' \
- forecast_APCP_06_F06.nc -name APCP_06
-
-Reformatting from GRIB to NetCDF may be done for any other reason the
-user may have. For example, the -name option can be used to define the
-NetCDF output variable name. Presuming this file is then passed to
-another MET tool, the new variable name (CompositeReflectivity) will
-appear in the output of downstream tools:
-
-.. code-block:: none
-
- $MET_BUILD/bin/pcp_combine -add forecast.grb \
- 'name="REFC"; level="L0"; GRIB1_ptv=129; lead_time="120000";' \
- forecast.nc -name CompositeReflectivity
+ .. dropdown:: Answer
+
+ The pcp_combine tool is typically used to modify the accumulation interval
+ of precipitation amounts in model and/or analysis datasets. For example,
+ when verifying model output in GRIB format containing runtime accumulations
+ of precipitation, run the pcp_combine -subtract option every 6 hours to
+ create 6-hourly precipitation amounts. In this example, it is not really
+ necessary to run pcp_combine on the 6-hour GRIB forecast file since the
+ model output already contains the 0 to 6 hour accumulation. However, the
+ output of pcp_combine is typically passed to point_stat, grid_stat, or mode
+ for verification. Having the 6-hour forecast in GRIB format and all other
+ forecast hours in NetCDF format (output of pcp_combine) makes the logic
+ for configuring the other MET tools messy. To make the configuration
+ consistent for all forecast hours, one option is to choose to run
+ pcp_combine as a pass-through to simply reformat from GRIB to NetCDF.
+ Listed below is an example of passing a single record to the
+ pcp_combine -add option to do the reformatting:
+
+ .. code-block:: none
+
+ $MET_BUILD/bin/pcp_combine -add forecast_F06.grb \
+ 'name="APCP"; level="A6";' \
+ forecast_APCP_06_F06.nc -name APCP_06
+
+ Reformatting from GRIB to NetCDF may be done for any other reason the
+ user may have. For example, the -name option can be used to define the
+ NetCDF output variable name. Presuming this file is then passed to
+ another MET tool, the new variable name (CompositeReflectivity) will
+ appear in the output of downstream tools:
+
+ .. code-block:: none
+
+ $MET_BUILD/bin/pcp_combine -add forecast.grb \
+ 'name="REFC"; level="L0"; GRIB1_ptv=129; lead_time="120000";' \
+ forecast.nc -name CompositeReflectivity
Q. How do I use “-pcprx" to run a project faster?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-To run a project faster, the “-pcprx” option may be used to narrow the
-search down to whatever regular expression you provide. Here are a two
-examples:
+ .. dropdown:: Answer
-.. code-block:: none
-
- # Only using Stage IV data (ST4)
- pcp_combine -sum 00000000_000000 06 \
- 20161015_18 12 ST4.2016101518.APCP_12_SUM.nc -pcprx "ST4.*.06h"
+ To run a project faster, the “-pcprx” option may be used to narrow the
+ search down to whatever regular expression you provide. Here are a two
+ examples:
+
+ .. code-block:: none
- # Specify that files starting with pgbq[number][number]be used:
- pcp_combine \
- -sum 20160221_18 06 20160222_18 24 \
- gfs_APCP_24_20160221_18_F00_F24.nc \
- -pcpdir /scratch4/BMC/shout/ptmp/Andrew.Kren/pre2016c3_corr/temp \
- -pcprx 'pgbq[0-9][0-9].gfs.2016022118' -v 3
+ # Only using Stage IV data (ST4)
+ pcp_combine -sum 00000000_000000 06 \
+ 20161015_18 12 ST4.2016101518.APCP_12_SUM.nc -pcprx "ST4.*.06h"
+
+ # Specify that files starting with pgbq[number][number]be used:
+ pcp_combine \
+ -sum 20160221_18 06 20160222_18 24 \
+ gfs_APCP_24_20160221_18_F00_F24.nc \
+ -pcpdir /scratch4/BMC/shout/ptmp/Andrew.Kren/pre2016c3_corr/temp \
+ -pcprx 'pgbq[0-9][0-9].gfs.2016022118' -v 3
Q. How do I enter the time format correctly?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Here is an **incorrect example** of running pcp_combine with sub-hourly
-accumulation intervals:
+ .. dropdown:: Answer
+
+ Here is an **incorrect example** of running pcp_combine with sub-hourly
+ accumulation intervals:
-.. code-block:: none
+ .. code-block:: none
- # incorrect example:
- pcp_combine -subtract forecast.grb 0055 \
- forecast2.grb 0005 forecast.nc -field APCP
+ # incorrect example:
+ pcp_combine -subtract forecast.grb 0055 \
+ forecast2.grb 0005 forecast.nc -field APCP
-The time signature is entered incorrectly. Let’s assume that "0055"
-meant 0 hours and 55 minutes and "0005" meant 0 hours and 5 minutes.
+ The time signature is entered incorrectly. Let’s assume that "0055"
+ meant 0 hours and 55 minutes and "0005" meant 0 hours and 5 minutes.
-Looking at the usage statement for pcp_combine (just type pcp_combine with
-no arguments): "accum1" indicates the accumulation interval to be used
-from in_file1 in HH[MMSS] format (required).
+ Looking at the usage statement for pcp_combine (just type pcp_combine with
+ no arguments): "accum1" indicates the accumulation interval to be used
+ from in_file1 in HH[MMSS] format (required).
-The time format listed "HH[MMSS]" means specifying hours or
-hours/minutes/seconds. The incorrect example is using hours/minutes.
+ The time format listed "HH[MMSS]" means specifying hours or
+ hours/minutes/seconds. The incorrect example is using hours/minutes.
-Below is the **correct example**. Add the seconds to the end of the
-time strings, like this:
+ Below is the **correct example**. Add the seconds to the end of the
+ time strings, like this:
-.. code-block:: none
+ .. code-block:: none
- # correct example:
- pcp_combine -subtract forecast.grb 005500 \
- forecast2.grb 000500 forecast.nc -field APCP
+ # correct example:
+ pcp_combine -subtract forecast.grb 005500 \
+ forecast2.grb 000500 forecast.nc -field APCP
Q. How do I use Pcp-Combine when my GRIB data doesn't have the appropriate accumulation interval time range indicator?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Run wgrib on the data files and the output is listed below:
+ .. dropdown:: Answer
+
+ Run wgrib on the data files and the output is listed below:
-.. code-block:: none
+ .. code-block:: none
- 279:503477484:d=15062313:APCP:kpds5=61:kpds6=1:kpds7=0:TR= 10:P1=3:P2=247:TimeU=0:sfc:1015min \
- fcst:NAve=0 \
- 279:507900854:d=15062313:APCP:kpds5=61:kpds6=1:kpds7=0:TR= 10:P1=3:P2=197:TimeU=0:sfc:965min \
- fcst:NAve=0
+ 279:503477484:d=15062313:APCP:kpds5=61:kpds6=1:kpds7=0:TR= 10:P1=3:P2=247:TimeU=0:sfc:1015min \
+ fcst:NAve=0 \
+ 279:507900854:d=15062313:APCP:kpds5=61:kpds6=1:kpds7=0:TR= 10:P1=3:P2=197:TimeU=0:sfc:965min \
+ fcst:NAve=0
-Notice the output which says "TR=10". TR means time range indicator and
-a value of 10 means that the level information contains an instantaneous
-forecast time, not an accumulation interval.
+ Notice the output which says "TR=10". TR means time range indicator and
+ a value of 10 means that the level information contains an instantaneous
+ forecast time, not an accumulation interval.
-Here's a table describing the TR values:
-http://www.nco.ncep.noaa.gov/pmb/docs/on388/table5.html
+ Here's a table describing the TR values:
+ http://www.nco.ncep.noaa.gov/pmb/docs/on388/table5.html
-The default logic for pcp_combine is to look for GRIB code 61 (i.e. APCP)
-defined with an accumulation interval (TR = 4). Since the data doesn't
-meet that criteria, the default logic of pcp_combine won't work. The
-arguments need to be more specific to tell pcp_combine exactly what to do.
+ The default logic for pcp_combine is to look for GRIB code 61 (i.e. APCP)
+ defined with an accumulation interval (TR = 4). Since the data doesn't
+ meet that criteria, the default logic of pcp_combine won't work. The
+ arguments need to be more specific to tell pcp_combine exactly what to do.
-Try the command:
+ Try the command:
-.. code-block:: none
+ .. code-block:: none
- pcp_combine -subtract \
- forecast.grb 'name="APCP"; level="L0"; lead_time="165500";' \
- forecast2.grb 'name="APCP"; level="L0"; lead_time="160500";' \
- forecast.nc -name APCP_A005000
+ pcp_combine -subtract \
+ forecast.grb 'name="APCP"; level="L0"; lead_time="165500";' \
+ forecast2.grb 'name="APCP"; level="L0"; lead_time="160500";' \
+ forecast.nc -name APCP_A005000
-Some things to point out here:
+ Some things to point out here:
-1. Notice in the wgrib output that the forecast times are 1015 min and
- 965 min. In HHMMSS format, that's "165500" and "160500".
+ 1. Notice in the wgrib output that the forecast times are 1015 min and
+ 965 min. In HHMMSS format, that's "165500" and "160500".
-2. An accumulation interval can’t be specified since the data isn't stored
- that way. Instead, use a config file string to describe the data to use.
+ 2. An accumulation interval can’t be specified since the data
+ isn't stored that way. Instead, use a config file string to
+ describe the data to use.
-3. The config file string specifies a "name" (APCP) and "level" string. APCP
- is defined at the surface, so a level value of 0 (L0) was specified.
+ 3. The config file string specifies a "name" (APCP) and "level" string.
+ APCP
+ is defined at the surface, so a level value of 0 (L0) was specified.
-4. Technically, the "lead_time" doesn’t need to be specified at all,
- pcp_combine
- would find the single APCP record in each input GRIB file and use them.
- But just in case, the lead_time option was included to be extra certain to
- get exactly the data that is needed.
+ 4. Technically, the "lead_time" doesn’t need to be specified at all,
+ pcp_combine
+ would find the single APCP record in each input GRIB file and use them.
+ But just in case, the lead_time option was included to be extra
+ certain to get exactly the data that is needed.
-5. The default output variable name pcp_combine would write would be
- "APCP_L0". However, to indicate that its a 50-minute
- "accumulation interval" use a
- different output variable name (APCP_A005000). Any string name is
- possible. Maybe "Precip50Minutes" or "RAIN50". But whatever string is
- chosen will be used in the Grid-Stat, Point-Stat, or MODE config file to
- tell that tool what variable to process.
+ 5. The default output variable name pcp_combine would write would be
+ "APCP_L0". However, to indicate that its a 50-minute
+ "accumulation interval" use a
+ different output variable name (APCP_A005000). Any string name is
+ possible. Maybe "Precip50Minutes" or "RAIN50". But whatever string is
+ chosen will be used in the Grid-Stat, Point-Stat, or MODE config file
+ to tell that tool what variable to process.
Q. How do I use “-sum”, “-add”, and “-subtract“ to achieve the same accumulation interval?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Here is an example of using pcp_combine to put GFS into 24- hour intervals
-for comparison against 24-hourly StageIV precipitation with GFS data
-through the pcp_combine tool. Be aware that the 24-hour StageIV data is
-defined as an accumulation from 12Z on one day to 12Z on the next day:
-http://www.emc.ncep.noaa.gov/mmb/ylin/pcpanl/stage4/
-
-Therefore, only the 24-hour StageIV data can be used to evaluate 12Z to
-12Z accumulations from the model. Alternatively, the 6- hour StageIV
-accumulations could be used to evaluate any 24 hour accumulation from
-the model. For the latter, run the 6-hour StageIV files through pcp_combine
-to generate the desired 24-hour accumulation.
-
-Here is an example. Run pcp_combine to compute 24-hour accumulations for
-GFS. In this example, process the 20150220 00Z initialization of GFS.
-
-.. code-block:: none
-
- pcp_combine \
- -sum 20150220_00 06 20150221_00 24 \
- gfs_APCP_24_20150220_00_F00_F24.nc \
- -pcprx "gfs_4_20150220_00.*grb2" \
- -pcpdir /d1/model_data/20150220
+ .. dropdown:: Answer
-pcp_combine is looking in the */d1/SBU/GFS/model_data/20150220* directory
-at files which match this regular expression "gfs_4_20150220_00.*grb2".
-That directory contains data for 00, 06, 12, and 18 hour initializations,
-but the "-pcprx" option narrows the search down to the 00 hour
-initialization which makes it run faster. It inspects all the matching
-files, looking for 6-hour APCP data to sum up to a 24-hour accumulation
-valid at 20150221_00. This results in a 24-hour accumulation between
-forecast hours 0 and 24.
-
-The following command will compute the 24-hour accumulation between forecast
-hours 12 and 36:
-
-.. code-block:: none
-
- pcp_combine \
- -sum 20150220_00 06 20150221_12 24 \
- gfs_APCP_24_20150220_00_F12_F36.nc \
- -pcprx "gfs_4_20150220_00.*grb2" \
- -pcpdir /d1/model_data/20150220
-
-The "-sum" command is meant to make things easier by searching the
-directory. But instead of using "-sum", another option would be the
-"- add" command. Explicitly list the 4 files that need to be extracted
-from the 6-hour APCP and add them up to 24. In the directory structure,
-the previous "-sum" job could be rewritten with "-add" like this:
-
-.. code-block:: none
-
- pcp_combine -add \
- /d1/model_data/20150220/gfs_4_20150220_0000_018.grb2 06 \
- /d1/model_data/20150220/gfs_4_20150220_0000_024.grb2 06 \
- /d1/model_data/20150220/gfs_4_20150220_0000_030.grb2 06 \
- /d1/model_data/20150220/gfs_4_20150220_0000_036.grb2 06 \
- gfs_APCP_24_20150220_00_F12_F36_add_option.nc
-
-This example explicitly tells pcp_combine which files to read and
-what accumulation interval (6 hours) to extract from them. The resulting
-output should be identical to the output of the "-sum" command.
+ Here is an example of using pcp_combine to put GFS into 24- hour intervals
+ for comparison against 24-hourly StageIV precipitation with GFS data
+ through the pcp_combine tool. Be aware that the 24-hour StageIV data is
+ defined as an accumulation from 12Z on one day to 12Z on the next day:
+ http://www.emc.ncep.noaa.gov/mmb/ylin/pcpanl/stage4/
+
+ Therefore, only the 24-hour StageIV data can be used to evaluate 12Z to
+ 12Z accumulations from the model. Alternatively, the 6- hour StageIV
+ accumulations could be used to evaluate any 24 hour accumulation from
+ the model. For the latter, run the 6-hour StageIV files through
+ pcp_combine to generate the desired 24-hour accumulation.
+
+ Here is an example. Run pcp_combine to compute 24-hour accumulations for
+ GFS. In this example, process the 20150220 00Z initialization of GFS.
+
+ .. code-block:: none
+
+ pcp_combine \
+ -sum 20150220_00 06 20150221_00 24 \
+ gfs_APCP_24_20150220_00_F00_F24.nc \
+ -pcprx "gfs_4_20150220_00.*grb2" \
+ -pcpdir /d1/model_data/20150220
+
+ pcp_combine is looking in the */d1/SBU/GFS/model_data/20150220* directory
+ at files which match this regular expression "gfs_4_20150220_00.*grb2".
+ That directory contains data for 00, 06, 12, and 18 hour initializations,
+ but the "-pcprx" option narrows the search down to the 00 hour
+ initialization which makes it run faster. It inspects all the matching
+ files, looking for 6-hour APCP data to sum up to a 24-hour accumulation
+ valid at 20150221_00. This results in a 24-hour accumulation between
+ forecast hours 0 and 24.
+
+ The following command will compute the 24-hour accumulation between
+ forecast hours 12 and 36:
+
+ .. code-block:: none
+
+ pcp_combine \
+ -sum 20150220_00 06 20150221_12 24 \
+ gfs_APCP_24_20150220_00_F12_F36.nc \
+ -pcprx "gfs_4_20150220_00.*grb2" \
+ -pcpdir /d1/model_data/20150220
+
+ The "-sum" command is meant to make things easier by searching the
+ directory. But instead of using "-sum", another option would be the
+ "- add" command. Explicitly list the 4 files that need to be extracted
+ from the 6-hour APCP and add them up to 24. In the directory structure,
+ the previous "-sum" job could be rewritten with "-add" like this:
+
+ .. code-block:: none
+
+ pcp_combine -add \
+ /d1/model_data/20150220/gfs_4_20150220_0000_018.grb2 06 \
+ /d1/model_data/20150220/gfs_4_20150220_0000_024.grb2 06 \
+ /d1/model_data/20150220/gfs_4_20150220_0000_030.grb2 06 \
+ /d1/model_data/20150220/gfs_4_20150220_0000_036.grb2 06 \
+ gfs_APCP_24_20150220_00_F12_F36_add_option.nc
+
+ This example explicitly tells pcp_combine which files to read and
+ what accumulation interval (6 hours) to extract from them. The resulting
+ output should be identical to the output of the "-sum" command.
Q. What is the difference between “-sum” vs. “-add”?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-The -sum and -add options both do the same thing. It's just that
-'-sum' could find files more quickly with the use of the -pcprx flag.
-This could also be accomplished by using a calling script.
+ .. dropdown:: Answer
+
+ The -sum and -add options both do the same thing. It's just that
+ '-sum' could find files more quickly with the use of the -pcprx flag.
+ This could also be accomplished by using a calling script.
Q. How do I select a specific GRIB record?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-In this example, record 735 needs to be selected.
-
-.. code-block:: none
+ .. dropdown:: Answer
- pcp_combine -add 20160101_i12_f015_HRRR_wrfnat.grb2 \
- 'name="APCP"; level="R735";' \
- -name "APCP_01" HRRR_wrfnat.20160101_i12_f015.nc
+ In this example, record 735 needs to be selected.
+
+ .. code-block:: none
-Instead of having the level as "L0", tell it to use "R735" to select
-grib record 735.
+ pcp_combine -add 20160101_i12_f015_HRRR_wrfnat.grb2 \
+ 'name="APCP"; level="R735";' \
+ -name "APCP_01" HRRR_wrfnat.20160101_i12_f015.nc
+
+ Instead of having the level as "L0", tell it to use "R735" to select
+ grib record 735.
Plot-Data-Plane
---------------
@@ -971,112 +1007,117 @@ Plot-Data-Plane
Q. How do I inspect Gen-Vx-Mask output?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Check to see if the call to Gen-Vx-Mask actually did create good output
-with Plot-Data-Plane. The following commands assume that the MET executables
-are found in your path.
-
-.. code-block:: none
+ .. dropdown:: Answer
- plot_data_plane \
- out/gen_vx_mask/CONUS_poly.nc \
- out/gen_vx_mask/CONUS_poly.ps \
- 'name="CONUS"; level="(*,*)";'
+ Check to see if the call to Gen-Vx-Mask actually did create good output
+ with Plot-Data-Plane. The following commands assume that the MET
+ executables are found in your path.
-View that postscript output file, using something like "gv"
-for ghostview:
+ .. code-block:: none
-.. code-block:: none
-
- gv out/gen_vx_mask/CONUS_poly.ps
+ plot_data_plane \
+ out/gen_vx_mask/CONUS_poly.nc \
+ out/gen_vx_mask/CONUS_poly.ps \
+ 'name="CONUS"; level="(*,*)";'
+
+ View that postscript output file, using something like "gv"
+ for ghostview:
+
+ .. code-block:: none
+
+ gv out/gen_vx_mask/CONUS_poly.ps
-Please review a map of 0's and 1's over the USA to determine if the output
-file is what the user expects. It always a good idea to start with
-plot_data_plane when working with data to make sure MET
-is plotting the data correctly and in the expected location.
+ Please review a map of 0's and 1's over the USA to determine if the output
+ file is what the user expects. It always a good idea to start with
+ plot_data_plane when working with data to make sure MET
+ is plotting the data correctly and in the expected location.
Q. How do I specify the GRIB version?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-When MET reads Gridded data files, it must determine the type of
-file it's reading. The first thing it checks is the suffix of the file.
-The following are all interpreted as GRIB1: .grib, .grb, and .gb.
-While these mean GRIB2: .grib2, .grb2, and .gb2.
+ .. dropdown:: Answer
+
+ When MET reads Gridded data files, it must determine the type of
+ file it's reading. The first thing it checks is the suffix of the file.
+ The following are all interpreted as GRIB1: .grib, .grb, and .gb.
+ While these mean GRIB2: .grib2, .grb2, and .gb2.
-There are 2 choices to control how MET interprets a grib file. Renaming
-the files to use a particular suffix, or keep them
-named and explicitly tell MET to interpret them as GRIB1 or GRIB2 using
-the "file_type" configuration option.
+ There are 2 choices to control how MET interprets a grib file. Renaming
+ the files to use a particular suffix, or keep them
+ named and explicitly tell MET to interpret them as GRIB1 or GRIB2 using
+ the "file_type" configuration option.
-The examples below use the plot_data_plane tool to plot the data. Set
+ The examples below use the plot_data_plane tool to plot the data. Set
-.. code-block:: none
-
- "file_type = GRIB2;"
+ .. code-block:: none
-To keep the files named this as they are, add "file_type = GRIB2;" to all the
-MET configuration files (i.e. Grid-Stat, MODE, and so on) that you use:
+ "file_type = GRIB2;"
-.. code-block:: none
-
- plot_data_plane \
- test_2.5_prog.grib \
- test_2.5_prog.ps \
- 'name="TSTM"; level="A0"; file_type=GRIB2;' \
- -plot_range 0 100
+ To keep the files named this as they are, add "file_type = GRIB2;"
+ to all the MET configuration files (i.e. Grid-Stat, MODE, and so on)
+ that you use:
+
+ .. code-block:: none
+ plot_data_plane \
+ test_2.5_prog.grib \
+ test_2.5_prog.ps \
+ 'name="TSTM"; level="A0"; file_type=GRIB2;' \
+ -plot_range 0 100
Q. How do I test the variable naming convention? (Record number example.)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Make sure MET can read GRIB2 data. Plot the data from that GRIB2 file
-by running:
+ .. dropdown:: Answer
+
+ Make sure MET can read GRIB2 data. Plot the data from that GRIB2 file
+ by running:
-.. code-block:: none
+ .. code-block:: none
- plot_data_plane LTIA98_KWBR_201305180600.grb2 tmp_z2.ps 'name="TMP"; level="R2";
+ plot_data_plane LTIA98_KWBR_201305180600.grb2 tmp_z2.ps 'name="TMP"; level="R2";
-"R2" tells MET to plot record number 2. Record numbers 1 and 2 both
-contain temperature data and 2-meters. Here's some wgrib2 output:
+ "R2" tells MET to plot record number 2. Record numbers 1 and 2 both
+ contain temperature data and 2-meters. Here's some wgrib2 output:
-.. code-block:: none
+ .. code-block:: none
- 1:0:d=2013051806:TMP:2 m above ground:anl:analysis/forecast error 2:3323062:d=2013051806:TMP:2 m above ground:anl:
+ 1:0:d=2013051806:TMP:2 m above ground:anl:analysis/forecast error 2:3323062:d=2013051806:TMP:2 m above ground:anl:
-The GRIB id info has been the same between records 1 and 2.
+ The GRIB id info has been the same between records 1 and 2.
Q. How do I compute and verify wind speed?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Here's how to compute and verify wind speed using MET. Good news, MET
-already includes logic for deriving wind speed on the fly. The GRIB
-abbreviation for wind speed is WIND. To request WIND from a GRIB1 or
-GRIB2 file, MET first checks to see if it already exists in the current
-file. If so, it'll use it as is. If not, it'll search for the corresponding
-U and V records and derive wind speed to use on the fly.
-
-In this example the RTMA file is named rtma.grb2 and the UPP file is
-named wrf.grb, please try running the following commands to plot wind speed:
-
-.. code-block:: none
-
- plot_data_plane wrf.grb wrf_wind.ps \
- 'name"WIND"; level="Z10";' -v 3
- plot_data_plane rtma.grb2 rtma_wind.ps \
- 'name"WIND"; level="Z10";' -v 3
+ .. dropdown:: Answer
-In the first call, the log message should be similar to this:
+ Here's how to compute and verify wind speed using MET. Good news, MET
+ already includes logic for deriving wind speed on the fly. The GRIB
+ abbreviation for wind speed is WIND. To request WIND from a GRIB1 or
+ GRIB2 file, MET first checks to see if it already exists in the current
+ file. If so, it'll use it as is. If not, it'll search for the corresponding
+ U and V records and derive wind speed to use on the fly.
+
+ In this example the RTMA file is named rtma.grb2 and the UPP file is
+ named wrf.grb, please try running the following commands to
+ plot wind speed:
+
+ .. code-block:: none
+
+ plot_data_plane wrf.grb wrf_wind.ps \
+ 'name"WIND"; level="Z10";' -v 3
+ plot_data_plane rtma.grb2 rtma_wind.ps \
+ 'name"WIND"; level="Z10";' -v 3
-.. code-block:: none
+ In the first call, the log message should be similar to this:
- DEBUG 3: MetGrib1DataFile::data_plane_array() ->
- Attempt to derive winds from U and V components.
+ .. code-block:: none
-In the second one, this won't appear since wind speed already exists
-in the RTMA file.
+ DEBUG 3: MetGrib1DataFile::data_plane_array() ->
+ Attempt to derive winds from U and V components.
+
+ In the second one, this won't appear since wind speed already exists
+ in the RTMA file.
Stat-Analysis
-------------
@@ -1084,184 +1125,196 @@ Stat-Analysis
Q. How does '-aggregate_stat' work?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-In Stat-Analysis, there is a "-vx_mask" job filtering option. That option
-reads the VX_MASK column from the input STAT lines and applies string
-matching with the values in that column. Presumably, all of the MPR lines
-will have the value of "FULL" in the VX_MASK column.
-
-Stat-Analysis has the ability to read MPR lines and recompute statistics
-from them using the same library code that the other MET tools use. The
-job command options which begin with "-out" are used to specify settings
-to be applied to the output of that process. For example, the "-fcst_thresh"
-option filters strings from the input "FCST_THRESH" header column. The
-"-out_fcst_thresh" option defines the threshold to be applied to the output
-of Stat-Analysis. So reading MPR lines and applying a threshold to define
-contingency table statistics (CTS) would be done using the
-"-out_fcst_thresh" option.
-
-Stat-Analysis does have the ability to filter MPR lat/lon locations
-using the "-mask_poly" option for a lat/lon polyline and the "-mask_grid"
-option to define a retention grid.
-
-However, there is currently no "-mask_sid" option.
-
-With met-5.2 and later versions, one option is to apply column string
-matching using the "-column_str" option to define the list of station
-ID's you would like to aggregate. That job would look something like this:
-
-.. code-block:: none
+ .. dropdown:: Answer
- stat_analysis -lookin path/to/mpr/directory \
- -job aggregate_stat -line_type MPR -out_line_type CNT \
- -column_str OBS_SID SID1,SID2,SID3,...,SIDN \
- -set_hdr VX_MASK SID_GROUP_NAME \
- -out_stat mpr_to_cnt.stat
-
-Where SID1...SIDN is a comma-separated list of the station id's in the
-group. Notice that a value for the output VX_MASK column using the
-"-set_hdr" option has been specified. Otherwise, this would show a list
-of the unique values found in that column. Presumably, all the input
-VX_MASK columns say "FULL" so that's what the output would say. Use
-"-set_hdr" to explicitly set the output value.
+ In Stat-Analysis, there is a "-vx_mask" job filtering option. That option
+ reads the VX_MASK column from the input STAT lines and applies string
+ matching with the values in that column. Presumably, all of the MPR lines
+ will have the value of "FULL" in the VX_MASK column.
+
+ Stat-Analysis has the ability to read MPR lines and recompute statistics
+ from them using the same library code that the other MET tools use. The
+ job command options which begin with "-out" are used to specify settings
+ to be applied to the output of that process. For example,
+ the "-fcst_thresh"
+ option filters strings from the input "FCST_THRESH" header column. The
+ "-out_fcst_thresh" option defines the threshold to be applied to the output
+ of Stat-Analysis. So reading MPR lines and applying a threshold to define
+ contingency table statistics (CTS) would be done using the
+ "-out_fcst_thresh" option.
+
+ Stat-Analysis does have the ability to filter MPR lat/lon locations
+ using the "-mask_poly" option for a lat/lon polyline and the "-mask_grid"
+ option to define a retention grid.
+
+ However, there is currently no "-mask_sid" option.
+
+ With MET-5.2 and later versions, one option is to apply column string
+ matching using the "-column_str" option to define the list of station
+ ID's you would like to aggregate. That job would look something like this:
+
+ .. code-block:: none
+
+ stat_analysis -lookin path/to/mpr/directory \
+ -job aggregate_stat -line_type MPR -out_line_type CNT \
+ -column_str OBS_SID SID1,SID2,SID3,...,SIDN \
+ -set_hdr VX_MASK SID_GROUP_NAME \
+ -out_stat mpr_to_cnt.stat
+
+ Where SID1...SIDN is a comma-separated list of the station id's in the
+ group. Notice that a value for the output VX_MASK column using the
+ "-set_hdr" option has been specified. Otherwise, this would show a list
+ of the unique values found in that column. Presumably, all the input
+ VX_MASK columns say "FULL" so that's what the output would say. Use
+ "-set_hdr" to explicitly set the output value.
Q. What is the best way to average the FSS scores within several days or even several months using 'Aggregate to Average Scores'?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Below is the best way to aggregate together the Neighborhood Continuous
-(NBRCNT) lines across multiple days, specifically the fractions skill
-score (FSS). The Stat-Analysis tool is designed to do this. This example
-is for aggregating scores for the accumulated precipitation (APCP) field.
+ .. dropdown:: Answer
+
+ Below is the best way to aggregate together the Neighborhood Continuous
+ (NBRCNT) lines across multiple days, specifically the fractions skill
+ score (FSS). The Stat-Analysis tool is designed to do this. This example
+ is for aggregating scores for the accumulated precipitation (APCP) field.
-Run the "aggregate" job type in stat_analysis to do this:
+ Run the "aggregate" job type in stat_analysis to do this:
-.. code-block:: none
+ .. code-block:: none
- stat_analysis -lookin directory/file*_nbrcnt.txt \
- -job aggregate -line_type NBRCNT -by FCST_VAR,FCST_LEAD,FCST_THRESH,INTERP_MTHD,INTERP_PNTS -out_stat agg_nbrcnt.txt
+ stat_analysis -lookin directory/file*_nbrcnt.txt \
+ -job aggregate -line_type NBRCNT -by FCST_VAR,FCST_LEAD,FCST_THRESH,INTERP_MTHD,INTERP_PNTS -out_stat agg_nbrcnt.txt
-This job reads all the files that are passed to it on the command line with
-the "-lookin" option. List explicit filenames to read them directly.
-Listing a top-level directory name will search that directory for files
-ending in ".stat".
+ This job reads all the files that are passed to it on the command line with
+ the "-lookin" option. List explicit filenames to read them directly.
+ Listing a top-level directory name will search that directory for files
+ ending in ".stat".
-In this case, the job running is to "aggregate" the "NBRCNT" line type.
+ In this case, the job running is to "aggregate" the "NBRCNT" line type.
-In this case, the "-by" option is being used and lists several header
-columns. Stat-Analysis will run this job separately for each unique
-combination of those header column entries.
+ In this case, the "-by" option is being used and lists several header
+ columns. Stat-Analysis will run this job separately for each unique
+ combination of those header column entries.
-The output is printed to the screen, or use the "-out_stat" option to
-also write the aggregated output to a file named "agg_nbrcnt.txt".
+ The output is printed to the screen, or use the "-out_stat" option to
+ also write the aggregated output to a file named "agg_nbrcnt.txt".
Q. How do I use '-by' to capture unique entries?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Here is a stat-analysis job that could be used to run, read the MPR lines,
-define the probabilistic forecast thresholds, define the single observation
-threshold, and compute a PSTD output line. Using "-by FCST_VAR" tells it
-to run the job separately for each unique entry found in the FCST_VAR column.
-
-.. code-block:: none
+ .. dropdown:: Answer
- stat_analysis \
- -lookin point_stat_model2_120000L_20160501_120000V.stat \
- -job aggregate_stat -line_type MPR -out_line_type PSTD \
- -out_fcst_thresh ge0,ge0.1,ge0.2,ge0.3,ge0.4,ge0.5,ge0.6,ge0.7,ge0.8,ge0.9,ge1.0 \
- -out_obs_thresh eq1.0 \
- -by FCST_VAR \
- -out_stat out_pstd.txt
+ Here is a stat-analysis job that could be used to run, read the
+ MPR lines, define the probabilistic forecast thresholds, define the
+ single observation threshold, and compute a PSTD output line.
+ Using "-by FCST_VAR" tells it to run the job separately for
+ each unique entry found in the FCST_VAR column.
+
+ .. code-block:: none
+
+ stat_analysis \
+ -lookin point_stat_model2_120000L_20160501_120000V.stat \
+ -job aggregate_stat -line_type MPR -out_line_type PSTD \
+ -out_fcst_thresh ge0,ge0.1,ge0.2,ge0.3,ge0.4,ge0.5,ge0.6,ge0.7,ge0.8,ge0.9,ge1.0 \
+ -out_obs_thresh eq1.0 \
+ -by FCST_VAR \
+ -out_stat out_pstd.txt
-The output statistics are written to "out_pstd.txt".
+ The output statistics are written to "out_pstd.txt".
Q. How do I use '-filter' to refine my output?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-A.
-Here is an example of running a Stat-Analysis filter job to discard any
-CNT lines (continuous statistics) where the forecast rate and observation
-rate are less than 0.05. This is an alternative way of tossing out those
-cases without having to modify the source code.
-
-.. code-block:: none
-
- stat_analysis \
- -lookin out/grid_stat/grid_stat_120000L_20050807_120000V.stat \
- -job filter -dump_row filter_cts.txt -line_type CTS \
- -column_min BASER 0.05 -column_min FMEAN 0.05
- DEBUG 2: STAT Lines read = 436
- DEBUG 2: STAT Lines retained = 36
- DEBUG 2:
- DEBUG 2: Processing Job 1: -job filter -line_type CTS -column_min BASER
- 0.05 -column_min
- FMEAN 0.05 -dump_row filter_cts.txt
- DEBUG 1: Creating
- STAT output file "filter_cts.txt"
- FILTER: -job filter -line_type
- CTS -column_min
- BASER 0.05 -column_min
- FMEAN 0.05 -dump_row filter_cts.txt
- DEBUG 2: Job 1 used 36 out of 36 STAT lines.
-
-This job reads find 56 CTS lines, but only keeps 36 of them where both
-the BASER and FMEAN columns are at least 0.05.
+ .. dropdown:: Answer
+
+ Here is an example of running a Stat-Analysis filter job to discard any
+ CNT lines (continuous statistics) where the forecast rate and observation
+ rate are less than 0.05. This is an alternative way of tossing out those
+ cases without having to modify the source code.
+
+ .. code-block:: none
+
+ stat_analysis \
+ -lookin out/grid_stat/grid_stat_120000L_20050807_120000V.stat \
+ -job filter -dump_row filter_cts.txt -line_type CTS \
+ -column_min BASER 0.05 -column_min FMEAN 0.05
+ DEBUG 2: STAT Lines read = 436
+ DEBUG 2: STAT Lines retained = 36
+ DEBUG 2:
+ DEBUG 2: Processing Job 1: -job filter -line_type CTS -column_min BASER
+ 0.05 -column_min
+ FMEAN 0.05 -dump_row filter_cts.txt
+ DEBUG 1: Creating
+ STAT output file "filter_cts.txt"
+ FILTER: -job filter -line_type
+ CTS -column_min
+ BASER 0.05 -column_min
+ FMEAN 0.05 -dump_row filter_cts.txt
+ DEBUG 2: Job 1 used 36 out of 36 STAT lines.
+
+ This job reads find 56 CTS lines, but only keeps 36 of them where both
+ the BASER and FMEAN columns are at least 0.05.
Q. How do I use the “-by” flag to stratify results?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Adding "-by FCST_VAR" is a great way to associate a single value,
-of say RMSE, with each of the forecast variables (UGRD,VGRD and WIND).
+ .. dropdown:: Answer
+
+ Adding "-by FCST_VAR" is a great way to associate a single value,
+ of say RMSE, with each of the forecast variables (UGRD,VGRD and WIND).
-Run the following job on the output from Grid-Stat generated when the
-"make test" command is run:
+ Run the following job on the output from Grid-Stat generated when the
+ "make test" command is run:
-.. code-block:: none
-
- stat_analysis -lookin out/grid_stat \
- -job aggregate_stat -line_type SL1L2 -out_line_type CNT \
- -by FCST_VAR,FCST_LEV \
- -out_stat cnt.txt
+ .. code-block:: none
+
+ stat_analysis -lookin out/grid_stat \
+ -job aggregate_stat -line_type SL1L2 -out_line_type CNT \
+ -by FCST_VAR,FCST_LEV \
+ -out_stat cnt.txt
-The resulting cnt.txt file includes separate output for 6 different
-FCST_VAR values at different levels.
+ The resulting cnt.txt file includes separate output for 6 different
+ FCST_VAR values at different levels.
Q. How do I speed up run times?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-A.
-By default, Stat-Analysis has two options enabled which slow it down.
-Disabling these two options will create quicker run times:
-1. The computation of rank correlation statistics, Spearman's Rank
- Correlation and Kendall's Tau. Disable them using "-rank_corr_flag FALSE".
+ .. dropdown:: Answer
-2. The computation of bootstrap confidence intervals. Disable them using
- "-n_boot_rep 0".
+ By default, Stat-Analysis has two options enabled which slow it down.
+ Disabling these two options will create quicker run times:
-Two more suggestions for faster run times.
+ 1. The computation of rank correlation statistics, Spearman's Rank
+ Correlation and Kendall's Tau. Disable them using
+ "-rank_corr_flag FALSE".
-1. Instead of using "-fcst_var u", use "-by fcst_var". This will compute
- statistics separately for each unique entry found in the FCST_VAR column.
+ 2. The computation of bootstrap confidence intervals. Disable them using
+ "-n_boot_rep 0".
-2. Instead of using "-out" to write the output to a text file, use "-out_stat"
- which will write a full STAT output file, including all the header columns.
- This will create a long list of values in the OBTYPE column. To avoid the
- long, OBTYPE column value, manually set the output using
- "-set_hdr OBTYPE ALL_TYPES". Or set its value to whatever is needed.
+ Two more suggestions for faster run times.
-.. code-block:: none
-
- stat_analysis \
- -lookin diag_conv_anl.2015060100.stat \
- -job aggregate_stat -line_type MPR -out_line_type CNT -by FCST_VAR \
- -out_stat diag_conv_anl.2015060100_cnt.txt -set_hdr OBTYPE ALL_TYPES \
- -n_boot_rep 0 -rank_corr_flag FALSE -v 4
+ 1. Instead of using "-fcst_var u", use "-by fcst_var". This will compute
+ statistics separately for each unique entry found in the
+ FCST_VAR column.
+
+ 2. Instead of using "-out" to write the output to a text file,
+ use "-out_stat"
+ which will write a full STAT output file, including all the
+ header columns.
+ This will create a long list of values in the OBTYPE column.
+ To avoid the
+ long, OBTYPE column value, manually set the output using
+ "-set_hdr OBTYPE ALL_TYPES". Or set its value to whatever is needed.
+
+ .. code-block:: none
-Adding the "-by FCST_VAR" option to compute stats for all variables and
-runs quickly.
+ stat_analysis \
+ -lookin diag_conv_anl.2015060100.stat \
+ -job aggregate_stat -line_type MPR -out_line_type CNT -by FCST_VAR \
+ -out_stat diag_conv_anl.2015060100_cnt.txt -set_hdr OBTYPE ALL_TYPES \
+ -n_boot_rep 0 -rank_corr_flag FALSE -v 4
+
+ Adding the "-by FCST_VAR" option to compute stats for all variables and
+ runs quickly.
TC-Stat
-------
@@ -1269,64 +1322,67 @@ TC-Stat
Q. How do I use the “-by” flag to stratify results?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-To perform tropical cyclone evaluations for multiple models use the
-"-by AMODEL" option with the tc_stat tool. Here is an example.
+ .. dropdown:: Answer
+
+ To perform tropical cyclone evaluations for multiple models use the
+ "-by AMODEL" option with the tc_stat tool. Here is an example.
-In this case the tc_stat job looked at the 48 hour lead time for the HWRF
-and H3HW models. Without the “-by AMODEL” option, the output would be
-all grouped together.
+ In this case the tc_stat job looked at the 48 hour lead time for the HWRF
+ and H3HW models. Without the “-by AMODEL” option, the output would be
+ all grouped together.
-.. code-block:: none
+ .. code-block:: none
- tc_stat \
- -lookin d2014_vx_20141117_reset/al/tc_pairs/tc_pairs_H3WI_* \
- -lookin d2014_vx_20141117_reset/al/tc_pairs/tc_pairs_HWFI_* \
- -job summary -lead 480000 -column TRACK -amodel HWFI,H3WI \
- -by AMODEL -out sample.out
+ tc_stat \
+ -lookin d2014_vx_20141117_reset/al/tc_pairs/tc_pairs_H3WI_* \
+ -lookin d2014_vx_20141117_reset/al/tc_pairs/tc_pairs_HWFI_* \
+ -job summary -lead 480000 -column TRACK -amodel HWFI,H3WI \
+ -by AMODEL -out sample.out
-This will result in all 48 hour HWFI and H3WI track forecasts to be
-aggregated (statistics and scores computed) for each model separately.
+ This will result in all 48 hour HWFI and H3WI track forecasts to be
+ aggregated (statistics and scores computed) for each model separately.
Q. How do I use rapid intensification verification?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-To get the most output, run something like this:
+ .. dropdown:: Answer
-.. code-block:: none
+ To get the most output, run something like this:
- tc_stat \
- -lookin path/to/tc_pairs/output \
- -job rirw -dump_row test \
- -out_line_type CTC,CTS,MPR
+ .. code-block:: none
-By default, rapid intensification (RI) is defined as a 24-hour exact
-change exceeding 30kts. To define RI differently, modify that definition
-using the ADECK, BDECK, or both using -rirw_time, -rirw_exact,
-and -rirw_thresh options. Set -rirw_window to something larger than 0
-to enable false alarms to be considered hits when they were "close enough"
-in time.
+ tc_stat \
+ -lookin path/to/tc_pairs/output \
+ -job rirw -dump_row test \
+ -out_line_type CTC,CTS,MPR
-.. code-block:: none
+ By default, rapid intensification (RI) is defined as a 24-hour exact
+ change exceeding 30kts. To define RI differently, modify that definition
+ using the ADECK, BDECK, or both using -rirw_time, -rirw_exact,
+ and -rirw_thresh options. Set -rirw_window to something larger than 0
+ to enable false alarms to be considered hits when they were "close enough"
+ in time.
- tc_stat \
- -lookin path/to/tc_pairs/output \
- -job rirw -dump_row test \
- -rirw_time 36 -rirw_window 12 \
- -out_line_type CTC,CTS,MPR
+ .. code-block:: none
-To evaluate Rapid Weakening (RW) by setting "-rirw_thresh <=-30".
-To stratify your results by lead time, you could add the "-by LEAD" option.
+ tc_stat \
+ -lookin path/to/tc_pairs/output \
+ -job rirw -dump_row test \
+ -rirw_time 36 -rirw_window 12 \
+ -out_line_type CTC,CTS,MPR
-.. code-block:: none
+ To evaluate Rapid Weakening (RW) by setting "-rirw_thresh <=-30".
+ To stratify your results by lead time, you could add the
+ "-by LEAD" option.
- tc_stat \
- -lookin path/to/tc_pairs/output \
- -job rirw -dump_row test \
- -rirw_time 36 -rirw_window 12 \
- -rirw_thresh <=-30 -by LEAD \
- -out_line_type CTC,CTS,MPR
+ .. code-block:: none
+
+ tc_stat \
+ -lookin path/to/tc_pairs/output \
+ -job rirw -dump_row test \
+ -rirw_time 36 -rirw_window 12 \
+ -rirw_thresh <=-30 -by LEAD \
+ -out_line_type CTC,CTS,MPR
Utilities
---------
@@ -1334,131 +1390,136 @@ Utilities
Q. What would be an example of scripting to call MET?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-The following is an example of how to call MET from a bash script
-including passing in variables. This shell script is listed below to run
-Grid-Stat, call Plot-Data-Plane to plot the resulting difference field,
-and call convert to reformat from PostScript to PNG.
-
-.. code-block:: none
-
- #!/bin/sh
- for case in `echo "FCST OBS"`; do
- export TO_GRID=${case}
- grid_stat gfs.t00z.pgrb2.0p25.f000 \
- nam.t00z.conusnest.hiresf00.tm00.grib2 GridStatConfig
- plot_data_plane \
- *TO_GRID_${case}*_pairs.nc TO_GRID_${case}.ps 'name="DIFF_TMP_P500_TMP_P500_FULL"; \
- level="(*,*)";'
- convert -rotate 90 -background white -flatten TO_GRID_${case}.ps
- TO_GRID_${case}.png
- done
-
+ .. dropdown:: Answer
+
+ The following is an example of how to call MET from a bash script
+ including passing in variables. This shell script is listed below to run
+ Grid-Stat, call Plot-Data-Plane to plot the resulting difference field,
+ and call convert to reformat from PostScript to PNG.
+
+ .. code-block:: none
+
+ #!/bin/sh
+ for case in `echo "FCST OBS"`; do
+ export TO_GRID=${case}
+ grid_stat gfs.t00z.pgrb2.0p25.f000 \
+ nam.t00z.conusnest.hiresf00.tm00.grib2 GridStatConfig
+ plot_data_plane \
+ *TO_GRID_${case}*_pairs.nc TO_GRID_${case}.ps 'name="DIFF_TMP_P500_TMP_P500_FULL"; \
+ level="(*,*)";'
+ convert -rotate 90 -background white -flatten TO_GRID_${case}.ps
+ TO_GRID_${case}.png
+ done
Q. How do I convert TRMM data files?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Here is an example of NetCDF that the MET software is not expecting. Here
-is an option for accessing that same TRMM data, following links from the
-MET website:
-http://dtcenter.org/community-code/model-evaluation-tools-met/input-data
-
-.. code-block:: none
-
- # Pull binary 3-hourly TRMM data file
- wget
- ftp://disc2.nascom.nasa.gov/data/TRMM/Gridded/3B42_V7/201009/3B42.100921.00z.7.
- precipitation.bin
- # Pull Rscript from MET website
- wget http://dtcenter.org/sites/default/files/community-code/met/r-scripts/trmmbin2nc.R
- # Edit that Rscript by setting
- out_lat_ll = -50
- out_lon_ll = 0
- out_lat_ur = 50
- out_lon_ur = 359.75
- # Run the Rscript
- Rscript trmmbin2nc.R 3B42.100921.00z.7.precipitation.bin \
- 3B42.100921.00z.7.precipitation.nc
- # Plot the result
- plot_data_plane 3B42.100921.00z.7.precipitation.nc \
- 3B42.100921.00z.7.precipitation.ps 'name="APCP_03"; level="(*,*)";'
-
-It may be possible that the domain of the data is smaller. Here are some options:
-
-1. In that Rscript, choose different boundaries (i.e. out_lat/lon_ll/ur)
- to specify the tile of data to be selected.
-
-2. As of version 5.1, MET includes support for regridding the data it reads.
- Keep TRMM on it's native domain and use the MET tools to do the regridding.
- For example, the Regrid-Data-Plane" tool reads a NetCDF file, regrids
- the data, and writes a NetCDF file. Alternatively, the "regrid" section
- of the configuration files for the MET tools may be used to do the
- regridding on the fly. For example, run Grid-Stat to compare to the model
- output to TRMM and say
-
-.. code-block:: none
-
- "regrid = { field = FCST;
- ...}"
-
-That tells Grid-Stat to automatically regrid the TRMM observations to
-the model domain.
+ .. dropdown:: Answer
+
+ Here is an example of NetCDF that the MET software is not expecting. Here
+ is an option for accessing that same TRMM data, following links from the
+ MET website:
+ http://dtcenter.org/community-code/model-evaluation-tools-met/input-data
+
+ .. code-block:: none
+
+ # Pull binary 3-hourly TRMM data file
+ wget
+ ftp://disc2.nascom.nasa.gov/data/TRMM/Gridded/3B42_V7/201009/3B42.100921.00z.7.
+ precipitation.bin
+ # Pull Rscript from MET website
+ wget http://dtcenter.org/sites/default/files/community-code/met/r-scripts/trmmbin2nc.R
+ # Edit that Rscript by setting
+ out_lat_ll = -50
+ out_lon_ll = 0
+ out_lat_ur = 50
+ out_lon_ur = 359.75
+ # Run the Rscript
+ Rscript trmmbin2nc.R 3B42.100921.00z.7.precipitation.bin \
+ 3B42.100921.00z.7.precipitation.nc
+ # Plot the result
+ plot_data_plane 3B42.100921.00z.7.precipitation.nc \
+ 3B42.100921.00z.7.precipitation.ps 'name="APCP_03"; level="(*,*)";'
+
+ It may be possible that the domain of the data is smaller.
+ Here are some options:
+
+ 1. In that Rscript, choose different boundaries (i.e. out_lat/lon_ll/ur)
+ to specify the tile of data to be selected.
+
+ 2. As of version 5.1, MET includes support for regridding the
+ data it reads. Keep TRMM on it's native domain and use the
+ MET tools to do the regridding.
+ For example, the Regrid-Data-Plane" tool reads a NetCDF file, regrids
+ the data, and writes a NetCDF file. Alternatively, the "regrid" section
+ of the configuration files for the MET tools may be used to do the
+ regridding on the fly. For example, run Grid-Stat to compare to
+ the model output to TRMM and say
+
+ .. code-block:: none
+
+ "regrid = { field = FCST;
+ ...}"
+
+ That tells Grid-Stat to automatically regrid the TRMM observations to
+ the model domain.
Q. How do I convert a PostScript to png?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-A.
-Use the linux “convert” tool to convert a Plot-Data-Plane PostScript
-file to a png:
-.. code-block:: none
+ .. dropdown:: Answer
+
+ Use the linux “convert” tool to convert a Plot-Data-Plane PostScript
+ file to a png:
+
+ .. code-block:: none
- convert -rotate 90 -background white plot_dbz.ps plot_dbz.png
+ convert -rotate 90 -background white plot_dbz.ps plot_dbz.png
-To convert a MODE PostScript to png
+ To convert a MODE PostScript to png
-.. code-block:: none
+ .. code-block:: none
- convert mode_out.ps mode_out.png
+ convert mode_out.ps mode_out.png
-Will result in all 6-7 pages in the PostScript file be written out to a
-seperate .png with the following naming convention:
+ Will result in all 6-7 pages in the PostScript file be written out to a
+ seperate .png with the following naming convention:
-mode_out-0.png, mode_out-1.png, mode_out-2.png, etc.
+ mode_out-0.png, mode_out-1.png, mode_out-2.png, etc.
Q. How does pairwise differences using plot_tcmpr.R work?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-One necessary step in computing pairwise differences is "event equalizing"
-the data. This means extracting a subset of cases that are common to
-both models.
+ .. dropdown:: Answer
+
+ One necessary step in computing pairwise differences is "event equalizing"
+ the data. This means extracting a subset of cases that are common to
+ both models.
-While the tc_stat tool does not compute pairwise differences, it can apply
-the "event_equalization" logic to extract the cases common to two models.
-This is done using the config file "event_equal = TRUE;" option or
-setting "-event_equal true" on the command line.
+ While the tc_stat tool does not compute pairwise differences, it can apply
+ the "event_equalization" logic to extract the cases common to two models.
+ This is done using the config file "event_equal = TRUE;" option or
+ setting "-event_equal true" on the command line.
-Most of the hurricane track analysis and plotting is done using the
-plot_tcmpr.R Rscript. It makes a call to the tc_stat tool to track
-data down to the desired subset, compute pairwise differences if needed,
-and then plot the result.
+ Most of the hurricane track analysis and plotting is done using the
+ plot_tcmpr.R Rscript. It makes a call to the tc_stat tool to track
+ data down to the desired subset, compute pairwise differences if needed,
+ and then plot the result.
-.. code-block:: none
+ .. code-block:: none
- Rscript ${MET_BUILD_BASE}/scripts/Rscripts/plot_tcmpr.R \
- -lookin tc_pairs_output.tcst \
- -filter '-amodel AHWI,GFSI' \
- -series AMODEL AHWI,GFSI,AHWI-GFSI \
- -plot MEAN,BOXPLOT
+ Rscript ${MET_BUILD_BASE}/scripts/Rscripts/plot_tcmpr.R \
+ -lookin tc_pairs_output.tcst \
+ -filter '-amodel AHWI,GFSI' \
+ -series AMODEL AHWI,GFSI,AHWI-GFSI \
+ -plot MEAN,BOXPLOT
-The resulting plots include three series - one for AHWI, one for GFSI,
-and one for their pairwise difference.
+ The resulting plots include three series - one for AHWI, one for GFSI,
+ and one for their pairwise difference.
-It's a bit cumbersome to understand all the options available, but this may
-be really useful. If nothing else, it could be adapted to dump out the
-pairwise differences that are needed.
+ It's a bit cumbersome to understand all the options available, but this may
+ be really useful. If nothing else, it could be adapted to dump out the
+ pairwise differences that are needed.
Miscellaneous
@@ -1466,266 +1527,281 @@ Miscellaneous
Q. Regrid-Data-Plane - How do I define a LatLon grid?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-A.
-Here is an example of the NetCDF variable attributes that MET uses to
-define a LatLon grid:
-.. code-block:: none
+ .. dropdown:: Answer
+
+ Here is an example of the NetCDF variable attributes that MET uses to
+ define a LatLon grid:
+
+ .. code-block:: none
- :Projection = "LatLon" ;
- :lat_ll = "25.063000 degrees_north" ;
- :lon_ll = "-124.938000 degrees_east" ;
- :delta_lat = "0.125000 degrees" ;
- :delta_lon = "0.125000 degrees" ;
- :Nlat = "224 grid_points" ;
- :Nlon = "464 grid_points" ;
+ :Projection = "LatLon" ;
+ :lat_ll = "25.063000 degrees_north" ;
+ :lon_ll = "-124.938000 degrees_east" ;
+ :delta_lat = "0.125000 degrees" ;
+ :delta_lon = "0.125000 degrees" ;
+ :Nlat = "224 grid_points" ;
+ :Nlon = "464 grid_points" ;
-This can be created by running the Regrid-Data-Plane" tool to regrid
-some GFS data to a LatLon grid:
+ This can be created by running the Regrid-Data-Plane" tool to regrid
+ some GFS data to a LatLon grid:
-.. code-block:: none
+ .. code-block:: none
- regrid_data_plane \
- gfs_2012040900_F012.grib G110 \
- gfs_g110.nc -field 'name="TMP"; level="Z2";'
+ regrid_data_plane \
+ gfs_2012040900_F012.grib G110 \
+ gfs_g110.nc -field 'name="TMP"; level="Z2";'
-Use ncdump to look at the attributes. As an exercise, try defining
-these global attributes (and removing the other projection-related ones)
-and then try again.
+ Use ncdump to look at the attributes. As an exercise, try defining
+ these global attributes (and removing the other projection-related ones)
+ and then try again.
Q. Pre-processing - How do I use wgrib2, pcp_combine regrid and reformat to format NetCDF files?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-If you are extracting only one or two fields from a file, using MET's
-Regrid-Data-Plane can be used to generate a Lat-Lon projection. If
-regridding all fields, the wgrib2 utility may be more useful. Here's an
-example of using wgrib2 and pcp_combine to generate NetCDF files
-MET can read:
+ .. dropdown:: Answer
+
+ If you are extracting only one or two fields from a file, using MET's
+ Regrid-Data-Plane can be used to generate a Lat-Lon projection. If
+ regridding all fields, the wgrib2 utility may be more useful. Here's an
+ example of using wgrib2 and pcp_combine to generate NetCDF files
+ MET can read:
-.. code-block:: none
+ .. code-block:: none
- wgrib2 gfsrain06.grb -new_grid latlon 112:131:0.1 \
- 25:121:0.1 gfsrain06_regrid.grb2
-
-And then run that GRIB2 file through pcp_combine using the "-add" option
-with only one file provided:
+ wgrib2 gfsrain06.grb -new_grid latlon 112:131:0.1 \
+ 25:121:0.1 gfsrain06_regrid.grb2
-.. code-block:: none
+ And then run that GRIB2 file through pcp_combine using the "-add" option
+ with only one file provided:
- pcp_combine -add gfsrain06_regrid.grb2 'name="APCP"; \
- level="A6";' gfsrain06_regrid.nc
+ .. code-block:: none
-Then the output NetCDF file does not have this problem:
+ pcp_combine -add gfsrain06_regrid.grb2 'name="APCP"; \
+ level="A6";' gfsrain06_regrid.nc
-.. code-block:: none
+ Then the output NetCDF file does not have this problem:
- ncdump -h 2a_wgrib2_regrid.nc | grep "_ll"
- :lat_ll = "25.000000 degrees_north" ;
- :lon_ll = "112.000000 degrees_east" ;
+ .. code-block:: none
+
+ ncdump -h 2a_wgrib2_regrid.nc | grep "_ll"
+ :lat_ll = "25.000000 degrees_north" ;
+ :lon_ll = "112.000000 degrees_east" ;
Q. TC-Pairs - How do I get rid of WARNING: TrackInfo Using Specify Model Suffix?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Below is a command example to run:
-
-.. code-block:: none
-
- tc_pairs \
- -adeck aep142014.h4hw.dat \
- -bdeck bep142014.dat \
- -config TCPairsConfig_v5.0 \
- -out tc_pairs_v5.0_patch \
- -log tc_pairs_v5.0_patch.log \
- -v 3
-
-Below is a warning message:
-
-.. code-block:: none
-
- WARNING: TrackInfo::add(const ATCFLine &) ->
- skipping ATCFLine since the valid time is not
- increasing (20140801_000000 < 20140806_060000):
- WARNING: AL, 03, 2014080100, 03, H4HW, 000,
- 120N, 547W, 38, 1009, XX, 34, NEQ, 0084, 0000,
- 0000, 0083, -99, -99, 59, 0, 0, , 0, , 0, 0,
-
-As a sanity check, the MET-TC code makes sure that the valid time of
-the track data doesn't go backwards in time. This warning states that
-this is
-occurring. The very likely reason for this is that the data being used
-are probably passing tc_pairs duplicate track data.
-
-Using grep, notice that the same track data shows up in
-"aal032014.h4hw.dat" and "aal032014_hfip_d2014_BERTHA.dat". Try this:
-
-.. code-block:: none
-
- grep H4HW aal*.dat | grep 2014080100 | grep ", 000,"
- aal032014.h4hw.dat:AL, 03, 2014080100, 03, H4HW, 000,
- 120N, 547W, 38, 1009, XX, 34, NEQ, 0084,
- 0000, 0000, 0083, -99, -99, 59, 0, 0, ,
- 0, , 0, 0, , , , , 0, 0, 0, 0, THERMO PARAMS,
- -9999, -9999, -9999, Y, 10, DT, -999
- aal032014_hfip_d2014_BERTHA.dat:AL, 03, 2014080100,
- 03, H4HW, 000, 120N, 547W, 38, 1009, XX, 34, NEQ,
- 0084, 0000, 0000, 0083, -99, -99, 59, 0, 0, , 0, , 0,
- 0, , , , , 0, 0, 0, 0, THERMOPARAMS, -9999 ,-9999 ,
- -9999 ,Y ,10 ,DT ,-999
-
-Those 2 lines are nearly identical, except for the spelling of
-"THERMO PARAMS" with a space vs "THERMOPARAMS" with no space.
-
-Passing tc_pairs duplicate track data results in this sort of warning.
-The DTC had the same sort of problem when setting up a real-time
-verification system. The same track data was making its way into
-multiple ATCF files.
-
-If this really is duplicate track data, work on the logic for where/how
-to store the track data. However, if the H4HW data in the first file
-actually differs from that in the second file, there is another option.
-You can specify a model suffix to be used for each ADECK source, as in
-this example (suffix=_EXP):
-
-.. code-block:: none
-
- tc_pairs \
- -adeck aal032014.h4hw.dat suffix=_EXP \
- -adeck aal032014_hfip_d2014_BERTHA.dat \
- -bdeck bal032014.dat \
- -config TCPairsConfig_match \
- -out tc_pairs_v5.0_patch \
- -log tc_pairs_v5.0_patch.log -v 3
-
-Any model names found in "aal032014.h4hw.dat" will now have _EXP tacked
-onto the end. Note that if a list of model names in the TCPairsConfig file
-needs specifying, include the _EXP variants to get them to show up in
-the output or it won’t show up.
-
-That'll get rid of the warnings because they will be storing the track
-data from the first source using a slightly different model name. This
-feature was added for users who are testing multiple versions of a
-model on the same set of storms. They might be using the same ATCF ID
-in all their output. But this enables them to distinguish the output
-in tc_pairs.
+ .. dropdown:: Answer
+
+ Below is a command example to run:
+
+ .. code-block:: none
+
+ tc_pairs \
+ -adeck aep142014.h4hw.dat \
+ -bdeck bep142014.dat \
+ -config TCPairsConfig_v5.0 \
+ -out tc_pairs_v5.0_patch \
+ -log tc_pairs_v5.0_patch.log \
+ -v 3
+
+ Below is a warning message:
+
+ .. code-block:: none
+
+ WARNING: TrackInfo::add(const ATCFLine &) ->
+ skipping ATCFLine since the valid time is not
+ increasing (20140801_000000 < 20140806_060000):
+ WARNING: AL, 03, 2014080100, 03, H4HW, 000,
+ 120N, 547W, 38, 1009, XX, 34, NEQ, 0084, 0000,
+ 0000, 0083, -99, -99, 59, 0, 0, , 0, , 0, 0,
+
+ As a sanity check, the MET-TC code makes sure that the valid time of
+ the track data doesn't go backwards in time. This warning states that
+ this is
+ occurring. The very likely reason for this is that the data being used
+ are probably passing tc_pairs duplicate track data.
+
+ Using grep, notice that the same track data shows up in
+ "aal032014.h4hw.dat" and "aal032014_hfip_d2014_BERTHA.dat". Try this:
+
+ .. code-block:: none
+
+ grep H4HW aal*.dat | grep 2014080100 | grep ", 000,"
+ aal032014.h4hw.dat:AL, 03, 2014080100, 03, H4HW, 000,
+ 120N, 547W, 38, 1009, XX, 34, NEQ, 0084,
+ 0000, 0000, 0083, -99, -99, 59, 0, 0, ,
+ 0, , 0, 0, , , , , 0, 0, 0, 0, THERMO PARAMS,
+ -9999, -9999, -9999, Y, 10, DT, -999
+ aal032014_hfip_d2014_BERTHA.dat:AL, 03, 2014080100,
+ 03, H4HW, 000, 120N, 547W, 38, 1009, XX, 34, NEQ,
+ 0084, 0000, 0000, 0083, -99, -99, 59, 0, 0, , 0, , 0,
+ 0, , , , , 0, 0, 0, 0, THERMOPARAMS, -9999 ,-9999 ,
+ -9999 ,Y ,10 ,DT ,-999
+
+ Those 2 lines are nearly identical, except for the spelling of
+ "THERMO PARAMS" with a space vs "THERMOPARAMS" with no space.
+
+ Passing tc_pairs duplicate track data results in this sort of warning.
+ The DTC had the same sort of problem when setting up a real-time
+ verification system. The same track data was making its way into
+ multiple ATCF files.
+
+ If this really is duplicate track data, work on the logic for where/how
+ to store the track data. However, if the H4HW data in the first file
+ actually differs from that in the second file, there is another option.
+ You can specify a model suffix to be used for each ADECK source, as in
+ this example (suffix=_EXP):
+
+ .. code-block:: none
+
+ tc_pairs \
+ -adeck aal032014.h4hw.dat suffix=_EXP \
+ -adeck aal032014_hfip_d2014_BERTHA.dat \
+ -bdeck bal032014.dat \
+ -config TCPairsConfig_match \
+ -out tc_pairs_v5.0_patch \
+ -log tc_pairs_v5.0_patch.log -v 3
+
+ Any model names found in "aal032014.h4hw.dat" will now have _EXP tacked
+ onto the end. Note that if a list of model names in the TCPairsConfig file
+ needs specifying, include the _EXP variants to get them to show up in
+ the output or it won’t show up.
+
+ That'll get rid of the warnings because they will be storing the track
+ data from the first source using a slightly different model name. This
+ feature was added for users who are testing multiple versions of a
+ model on the same set of storms. They might be using the same ATCF ID
+ in all their output. But this enables them to distinguish the output
+ in tc_pairs.
Q. Why is the grid upside down?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-The user provides a gridded data file to MET and it runs without error,
-but the data is packed upside down.
-
-Try using the "file_type" entry. The "file_type" entry specifies the
-input file type (e.g. GRIB1, GRIB2, NETCDF_MET, NETCDF_PINT, NETCDF_NCCF)
-rather than letting the code determine it itself. For valid file_type
-values, see "File types" in the *data/config/ConfigConstants* file. This
-entry should be defined within the "fcst" or "obs" dictionaries.
-Sometimes, directly specifying the type of file will help MET figure
-out what to properly do with the data.
-
-Another option is to use the Regrid-Data-Plane tool. The Regrid-Data-Plane
-tool may be run to read data from any gridded data file MET supports
-(i.e. GRIB1, GRIB2, and a variety of NetCDF formats), interpolate to a
-user-specified grid, and write the field(s) out in NetCDF format. See the
-Regrid-Data-Plane tool :numref:`regrid-data-plane` in the MET
-User's Guide for more
-detailed information. While the Regrid-Data-Plane tool is useful as a
-stand-alone tool, the capability is also included to automatically regrid
-data in most of the MET tools that handle gridded data. This "regrid"
-entry is a dictionary containing information about how to handle input
-gridded data files. The "regird" entry specifies regridding logic and
-has a "to_grid" entry that can be set to NONE, FCST, OBS, a named grid,
-the path to a gridded data file defining the grid, or an explicit grid
-specification string. See the :ref:`regrid` entry in
-the Configuration File Overview in the MET User's Guide for a more detailed
-description of the configuration file entries that control automated
-regridding.
-
-A single model level can be plotted using the plot_data_plane utility.
-This tool can assist the user by showing the data to be verified to
-ensure that times and locations matchup as expected.
+ .. dropdown:: Answer
+
+ The user provides a gridded data file to MET and it runs without error,
+ but the data is packed upside down.
+
+ Try using the "file_type" entry. The "file_type" entry specifies the
+ input file type (e.g. GRIB1, GRIB2, NETCDF_MET, NETCDF_PINT, NETCDF_NCCF)
+ rather than letting the code determine it itself. For valid file_type
+ values, see "File types" in the *data/config/ConfigConstants* file. This
+ entry should be defined within the "fcst" or "obs" dictionaries.
+ Sometimes, directly specifying the type of file will help MET figure
+ out what to properly do with the data.
+
+ Another option is to use the Regrid-Data-Plane tool. The Regrid-Data-Plane
+ tool may be run to read data from any gridded data file MET supports
+ (i.e. GRIB1, GRIB2, and a variety of NetCDF formats), interpolate to a
+ user-specified grid, and write the field(s) out in NetCDF format. See the
+ Regrid-Data-Plane tool :numref:`regrid-data-plane` in the MET
+ User's Guide for more
+ detailed information. While the Regrid-Data-Plane tool is useful as a
+ stand-alone tool, the capability is also included to automatically regrid
+ data in most of the MET tools that handle gridded data. This "regrid"
+ entry is a dictionary containing information about how to handle input
+ gridded data files. The "regird" entry specifies regridding logic and
+ has a "to_grid" entry that can be set to NONE, FCST, OBS, a named grid,
+ the path to a gridded data file defining the grid, or an explicit grid
+ specification string. See the :ref:`regrid` entry in
+ the Configuration File Overview in the MET User's Guide for a more detailed
+ description of the configuration file entries that control automated
+ regridding.
+
+ A single model level can be plotted using the plot_data_plane utility.
+ This tool can assist the user by showing the data to be verified to
+ ensure that times and locations matchup as expected.
Q. Why was the MET written largely in C++ instead of FORTRAN?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-MET relies upon the object-oriented aspects of C++, particularly in
-using the MODE tool. Due to time and budget constraints, it also makes
-use of a pre-existing forecast verification library that was developed
-at NCAR.
+ .. dropdown:: Answer
+
+ MET relies upon the object-oriented aspects of C++, particularly in
+ using the MODE tool. Due to time and budget constraints, it also makes
+ use of a pre-existing forecast verification library that was developed
+ at NCAR.
Q. How does MET differ from the previously mentioned existing verification packages?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-MET is an actively maintained, evolving software package that is being
-made freely available to the public through controlled version releases.
+ .. dropdown:: Answer
+
+ MET is an actively maintained, evolving software package that is being
+ made freely available to the public through controlled version releases.
Q. Will the MET work on data in native model coordinates?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-No - it will not. In the future, we may add options to allow additional
-model grid coordinate systems.
+ .. dropdown:: Answer
+
+ No - it will not. In the future, we may add options to allow additional
+ model grid coordinate systems.
Q. How do I get help if my questions are not answered in the User's Guide?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-First, look on our
-`MET User's Guide website `_.
-If that doesn't answer your question, create a post in the
-`METplus GitHub Discussions Forum `_.
+ .. dropdown:: Answer
+
+ First, look on our
+ `MET User's Guide website `_.
+ If that doesn't answer your question, create a post in the
+ `METplus GitHub Discussions Forum `_.
Q. What graphical features does MET provide?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-MET provides some :ref:`plotting and graphics support`. The plotting
-tools, including plot_point_obs, plot_data_plane, and plot_mode_field, can
-help users visualize the data.
-
-MET is intended to be a set of command line tools for evaluating forecast
-quality. So, the development effort is focused on providing the latest,
-state of the art verification approaches, rather than on providing nice
-plotting features. However, the ASCII output statistics of MET may be plotted
-with a wide variety of plotting packages, including R, NCL, IDL, and GNUPlot.
-METViewer is also currently being developed and used by the DTC and NOAA
-It creates basic plots of MET output verification statistics. The types of
-plots include series plots with confidence intervals, box plots, x-y scatter
-plots and histograms.
-
-R is a language and environment for statistical computing and graphics.
-It's a free package that runs on most operating systems and provides nice
-plotting features and a wide array of powerful statistical analysis tools.
-There are sample scripts on the
-`MET website `_
-that you can use and modify to perform the type of analysis you need. If
-you create your own scripts, we encourage you to submit them to us through the
-`METplus GitHub Discussions Forum `_
-so that we can post them for other users.
+ .. dropdown:: Answer
+
+ MET provides some :ref:`plotting and graphics support`.
+ The plotting
+ tools, including plot_point_obs, plot_data_plane, and plot_mode_field, can
+ help users visualize the data.
+
+ MET is intended to be a set of command line tools for evaluating forecast
+ quality. So, the development effort is focused on providing the latest,
+ state of the art verification approaches, rather than on providing nice
+ plotting features. However, the ASCII output statistics of MET may
+ be plotted
+ with a wide variety of plotting packages, including R, NCL, IDL,
+ and GNUPlot.
+ METViewer is also currently being developed and used by the DTC and NOAA
+ It creates basic plots of MET output verification statistics. The types of
+ plots include series plots with confidence intervals, box plots,
+ x-y scatter plots and histograms.
+
+ R is a language and environment for statistical computing and graphics.
+ It's a free package that runs on most operating systems and provides nice
+ plotting features and a wide array of powerful statistical analysis tools.
+ There are sample scripts on the
+ `MET website `_
+ that you can use and modify to perform the type of analysis you need. If
+ you create your own scripts, we encourage you to submit them to us
+ through the
+ `METplus GitHub Discussions Forum `_
+ so that we can post them for other users.
Q. How do I find the version of the tool I am using?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-Type the name of the tool followed by **-version**. For example,
-type “pb2nc **-version**”.
+ .. dropdown:: Answer
+
+ Type the name of the tool followed by **--version**. For example,
+ type “pb2nc **--version**”.
Q. What are MET's conventions for latitude, longitude, azimuth and bearing angles?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A.
-MET considers north latitude and east longitude positive. Latitudes
-have range from :math:`-90^\circ` to :math:`+90^\circ`. Longitudes have
-range from :math:`-180^\circ` to :math:`+180^\circ`. Plane angles such
-as azimuths and bearing (example: horizontal wind direction) have
-range :math:`0^\circ` to :math:`360^\circ` and are measured clockwise
-from the north.
+ .. dropdown:: Answer
+
+ MET considers north latitude and east longitude positive. Latitudes
+ have range from :math:`-90^\circ` to :math:`+90^\circ`. Longitudes have
+ range from :math:`-180^\circ` to :math:`+180^\circ`. Plane angles such
+ as azimuths and bearing (example: horizontal wind direction) have
+ range :math:`0^\circ` to :math:`360^\circ` and are measured clockwise
+ from the north.
.. _Troubleshooting:
@@ -1743,142 +1819,157 @@ on other things to check if you are having problems installing or running MET.
MET won't compile
-----------------
-* Have you specified the locations of NetCDF, GNU Scientific Library,
- and BUFRLIB, and optional additional libraries using corresponding
- MET\_ environment variables prior to running configure?
+ .. dropdown:: Troubleshooting Help
-* Have these libraries been compiled and installed using the same set
- of compilers used to build MET?
+ * Have you specified the locations of NetCDF, GNU Scientific Library,
+ and BUFRLIB, and optional additional libraries using corresponding
+ MET\_ environment variables prior to running configure?
-* Are you using NetCDF version 3.4 or version 4? Currently, only NetCDF
- version 3.6 can be used with MET.
+ * Have these libraries been compiled and installed using the same set
+ of compilers used to build MET?
BUFRLIB Errors during MET installation
--------------------------------------
-.. code-block:: none
+ .. dropdown:: Troubleshooting Help
+
+ .. code-block:: none
- error message: /usr/bin/ld: cannot find -lbufr
- The linker can not find the BUFRLIB library archive file it needs.
+ error message: /usr/bin/ld: cannot find -lbufr
+ The linker can not find the BUFRLIB library archive file it needs.
- export MET_BUFRLIB=/home/username/BUFRLIB_v10.2.3:$MET_BUFRLIB
+ export MET_BUFRLIB=/home/username/BUFRLIB_v11.3.0:$MET_BUFRLIB
-It isn't making it's way into the configuration because BUFRLIB_v10.2.3
-isn't showing up in the output of make. This may indicate the wrong shell
-type. The .bashrc file sets the environment for the Bourne shell, but
-the above error could indicate that the c- shell is being used instead.
+ It isn't making it's way into the configuration because BUFRLIB_v11.3.0
+ isn't showing up in the output of make. This may indicate the wrong shell
+ type. The .bashrc file sets the environment for the Bourne shell, but
+ the above error could indicate that the c- shell is being used instead.
-Try the following 2 things:
+ Try the following 2 things:
-1. Check to make sure this file exists:
+ 1. Check to make sure this file exists:
- .. code-block:: none
+ .. code-block:: none
- ls /home/username/BUFRLIB_v10.2.3/libbufr.a
+ ls /home/username/BUFRLIB_v11.3.0/libbufr.a
-2. Rerun the MET configure command using the following option on the
- command line:
+ 2. Rerun the MET configure command using the following option on the
+ command line:
- .. code-block:: none
-
- MET_BUFRLIB=/home/username/BUFRLIB_v10.2.3
+ .. code-block:: none
+
+ MET_BUFRLIB=/home/username/BUFRLIB_v11.3.0
-After doing that, please try recompiling MET. If it fails, please submit the following log files: "make_install.log" as well as "config.log" with a new post in the `METplus GitHub Discussions Forum `_.
+ After doing that, please try recompiling MET. If it fails, please
+ submit the following log files: "make_install.log" as well as
+ "config.log" with a new post in the
+ `METplus GitHub Discussions Forum `_.
Command line double quotes
--------------------------
-Single quotes, double quotes, and escape characters can be difficult for
-MET to parse. If there are problems, especially in Python code, try
-breaking the command up like the below example.
+ .. dropdown:: Troubleshooting Help
-.. code-block:: none
+ Single quotes, double quotes, and escape characters can be difficult for
+ MET to parse. If there are problems, especially in Python code, try
+ breaking the command up like the below example.
- ['regrid_data_plane',
- '/h/data/global/WXQC/data/umm/1701150006',
- 'G003', '/h/data/global/WXQC/data/met/nc_mdl/umm/1701150006', '- field',
- '\'name="HGT"; level="P500";\'', '-v', '6']
+ .. code-block:: none
+
+ ['regrid_data_plane',
+ '/h/data/global/WXQC/data/umm/1701150006',
+ 'G003', '/h/data/global/WXQC/data/met/nc_mdl/umm/1701150006', '- field',
+ '\'name="HGT"; level="P500";\'', '-v', '6']
Environment variable settings
-----------------------------
-In the below incorrect example for many environment variables have both
-the main variable set and the INC and LIB variables set:
+ .. dropdown:: Troubleshooting Help
+
+ In the below incorrect example for many environment variables have both
+ the main variable set and the INC and LIB variables set:
-.. code-block:: none
+ .. code-block:: none
- export MET_GSL=$MET_LIB_DIR/gsl
- export MET_GSLINC=$MET_LIB_DIR/gsl/include/gsl
- export MET_GSLLIB=$MET_LIB_DIR/gsl/lib
-
-**only MET_GSL *OR *MET_GSLINC *AND *MET_GSLLIB need to be set.**
-So, for example, either set:
+ export MET_GSL=$MET_LIB_DIR/gsl
+ export MET_GSLINC=$MET_LIB_DIR/gsl/include/gsl
+ export MET_GSLLIB=$MET_LIB_DIR/gsl/lib
+
+ **only MET_GSL *OR *MET_GSLINC *AND *MET_GSLLIB need to be set.**
+ So, for example, either set:
-.. code-block:: none
+ .. code-block:: none
- export MET_GSL=$MET_LIB_DIR/gsl
+ export MET_GSL=$MET_LIB_DIR/gsl
-or set:
+ or set:
-.. code-block:: none
+ .. code-block:: none
- export MET_GSLINC=$MET_LIB_DIR/gsl/include/gsl export MET_GSLLIB=$MET_LIB_DIR/gsl/lib
+ export MET_GSLINC=$MET_LIB_DIR/gsl/include/gsl export MET_GSLLIB=$MET_LIB_DIR/gsl/lib
-Additionally, MET does not use MET_HDF5INC and MET_HDF5LIB.
-It only uses MET_HDF5.
+ Additionally, MET does not use MET_HDF5INC and MET_HDF5LIB.
+ It only uses MET_HDF5.
-Our online tutorial can help figure out what should be set and what the
-value should be:
-https://met.readthedocs.io/en/latest/Users_Guide/installation.html
+ Our online tutorial can help figure out what should be set and what the
+ value should be:
+ https://met.readthedocs.io/en/latest/Users_Guide/installation.html
NetCDF install issues
---------------------
-This example shows a problem with NetCDF in the make_install.log file:
+ .. dropdown:: Troubleshooting Help
+
+ This example shows a problem with NetCDF in the make_install.log file:
-.. code-block:: none
+ .. code-block:: none
- /usr/bin/ld: warning: libnetcdf.so.11,
- needed by /home/zzheng25/metinstall/lib/libnetcdf_c++4.so,
- may conflict with libnetcdf.so.7
+ /usr/bin/ld: warning: libnetcdf.so.11,
+ needed by /home/zzheng25/metinstall/lib/libnetcdf_c++4.so,
+ may conflict with libnetcdf.so.7
-Below are examples of too many MET_NETCDF options:
+ Below are examples of too many MET_NETCDF options:
-.. code-block:: none
+ .. code-block:: none
- MET_NETCDF='/home/username/metinstall/'
- MET_NETCDFINC='/home/username/local/include'
- MET_NETCDFLIB='/home/username/local/lib'
+ MET_NETCDF='/home/username/metinstall/'
+ MET_NETCDFINC='/home/username/local/include'
+ MET_NETCDFLIB='/home/username/local/lib'
-Either MET_NETCDF **OR** MET_NETCDFINC **AND** MET_NETCDFLIB need to be set.
-If the NetCDF include files are in */home/username/local/include* and the
-NetCDF library files are in */home/username/local/lib*, unset the
-MET_NETCDF environment variable, then run "make clean", reconfigure,
-and then run "make install" and "make test" again.
+ Either MET_NETCDF **OR** MET_NETCDFINC **AND** MET_NETCDFLIB
+ need to be set.
+ If the NetCDF include files are in */home/username/local/include* and the
+ NetCDF library files are in */home/username/local/lib*, unset the
+ MET_NETCDF environment variable, then run "make clean", reconfigure,
+ and then run "make install" and "make test" again.
Error while loading shared libraries
------------------------------------
-* Add the lib dir to your LD_LIBRARY_PATH. For example, if you receive
- the following error: "./mode_analysis: error while loading shared
- libraries: libgsl.so.19: cannot open shared object file:
- No such file or directory", you should add the path to the
- gsl lib (for example, */home/user/MET/gsl-2.1/lib*)
- to your LD_LIBRARY_PATH.
+ .. dropdown:: Troubleshooting Help
+
+ * Add the lib dir to your LD_LIBRARY_PATH. For example, if you receive
+ the following error: "./mode_analysis: error while loading shared
+ libraries: libgsl.so.19: cannot open shared object file:
+ No such file or directory", you should add the path to the
+ gsl lib (for example, */home/user/MET/gsl-2.1/lib*)
+ to your LD_LIBRARY_PATH.
General troubleshooting
-----------------------
-* For configuration files used, make certain to use empty square brackets
- (e.g. [ ]) to indicate no stratification is desired. Do NOT use empty
- double quotation marks inside square brackets (e.g. [""]).
+ .. dropdown:: Troubleshooting Help
+
+ * For configuration files used, make certain to use empty square brackets
+ (e.g. [ ]) to indicate no stratification is desired. Do NOT use empty
+ double quotation marks inside square brackets (e.g. [""]).
-* Have you designated all the required command line arguments?
+ * Have you designated all the required command line arguments?
-* Try rerunning with a higher verbosity level. Increasing the verbosity
- level to 4 or 5 prints much more diagnostic information to the screen.
+ * Try rerunning with a higher verbosity level. Increasing the verbosity
+ level to 4 or 5 prints much more diagnostic information to the screen.
Where to get help
=================
@@ -1890,8 +1981,7 @@ is available through the
How to contribute code
======================
-
+
If you have code you would like to contribute, we will gladly consider
your contribution. Please create a post in the
`METplus GitHub Discussions Forum `_.
-
diff --git a/docs/Users_Guide/appendixB.rst b/docs/Users_Guide/appendixB.rst
index e6f343bca4..ea3120c10e 100644
--- a/docs/Users_Guide/appendixB.rst
+++ b/docs/Users_Guide/appendixB.rst
@@ -25,6 +25,8 @@ The following map projections are currently supported in MET:
* Semi Lat/Lon
+.. _App_B-grid_specification_strings:
+
Grid Specification Strings
==========================
diff --git a/docs/Users_Guide/appendixF.rst b/docs/Users_Guide/appendixF.rst
index c490cc07e3..4a8fe86cae 100644
--- a/docs/Users_Guide/appendixF.rst
+++ b/docs/Users_Guide/appendixF.rst
@@ -7,118 +7,182 @@ Appendix F Python Embedding
Introduction
============
-MET includes the ability to embed Python to a limited degree. Users may use Python scripts and whatever associated Python packages they wish in order to prepare 2D gridded data fields, point observations, and matched pairs as input to the MET tools. We fully expect that this degree of embedding will increase in the future. In addition, plans are in place to extend Python with MET in upcoming releases, allowing users to invoke MET tools directly from their Python script. While MET version 8.0 was built on Python 2.x, MET versions 9.0 and beyond are built on Python 3.6+.
+MET includes the ability to embed Python to a limited degree. Users may use their own Python scripts and any associated Python packages they wish in order to prepare 2D gridded data fields, point observations, and matched pairs as input to the MET tools. We fully expect that this degree of embedding will increase in the future. In addition, plans are in place to extend Python with MET in upcoming releases, allowing users to invoke MET tools directly from their Python script. While MET version 8.0 was built on Python 2.x, MET versions 9.0 and beyond are built on Python 3.6+.
.. _compiling_python_support:
-Compiling Python Support
-========================
+Compiling MET for Python Embedding
+==================================
-In order to use Python embedding, the user's local Python installation must have the C-language Python header files and libraries. Sometimes when Python is installed locally, these header files and libraries are deleted at the end of the installation process, leaving only the binary executable and run-time shared object files. But the Python header files and libraries must be present to compile support in MET for Python embedding. Assuming the requisite Python files are present, and that Python embedding is enabled when building MET (which is done by passing the **--enable-python** option to the **configure** command line), the MET C++ code will use these in the compilation process to link directly to the Python libraries.
+In order to use Python embedding, a local Python installation must be available when compiling the MET software with the following requirements:
-The local Python installation must also support a minimum set of required packages. The MET build includes some python wrapper scripts to facilitate the passing of data in memory as well as the reading and writing of temporary files. The packages required by those wrapper scripts are **sys, os, argparse, importlib, numpy and netCDF4**. While most of these are standard packages and readily available, numpy and netCDF4 may not be. Users are advised to confirm their availability prior to compiling MET with python embedding support.
+1. Python version 3.10.4+
-In addition to the **configure** option mentioned above, three variables, **MET_PYTHON_BIN_EXE**, **MET_PYTHON_CC**, and **MET_PYTHON_LD**, must also be set for the configuration process. These may either be set as environment variables or as command line options to **configure**. These constants are passed as compiler command line options when building MET to enable the compiler to find the requisite Python executable, header files, and libraries in the user's local filesystem. Fortunately, Python provides a way to set these variables properly. This frees the user from the necessity of having any expert knowledge of the compiling and linking process. Along with the **Python** executable, there should be another executable called **python3-config**, whose output can be used to set these environment variables as follows:
+2. C-language Python header files and libraries
-• Set **MET_PYTHON_BIN_EXE** to the full path of the desired python executable.
+3. **NumPy** Python package
-• On the command line, run "**python3-config --cflags**". Set the value of **MET_PYTHON_CC** to the output of that command.
+4. **netCDF4** Python package
-• Again on the command line, run "**python3-config --ldflags**". Set the value of **MET_PYTHON_LD** to the output of that command.
+5. **Pandas** Python package
-Make sure that these are set as environment variables or that you have included them on the command line prior to running **configure**.
+6. **Xarray** Python package
+Users should be aware that in some cases, the C-language Python header files and libraries may be deleted at the end of the Python installation process, and they may need to confirm their availability prior to compiling MET. Once the user has confirmed the above requirements are satisfied, they can compile the MET software for Python embedding by passing the **\-\-enable-python** option to the **configure** script on the command line. This will link the MET C++ code directly to the Python libraries. The **NumPy** and **netCDF4** Python packages are required by the Python scripts included with the MET software that facilitate the passing of data in memory and the reading and writing of temporary files when Python embedding is used.
-MET_PYTHON_EXE
-==============
+In addition to using **\-\-enable-python** with **configure** as mentioned above, the following environment variables must also be set prior to executing **configure**: **MET_PYTHON_BIN_EXE**, **MET_PYTHON_CC**, and **MET_PYTHON_LD**. These may either be set as environment variables or as command line options to **configure**. These environment variables are used when building MET to enable the compiler to find the requisite Python executable, header files, and libraries in the user's local filesystem. Fortunately, Python provides a way to set these variables properly. This frees the user from the necessity of having any expert knowledge of the compiling and linking process. Along with the **Python** executable in the users local Python installation, there should be another executable called **python3-config**, whose output can be used to set these environment variables as follows:
-When Python embedding support is compiled, MET instantiates the Python interpreter directly. However, for users of highly configurable Conda environments, the Python instance set at compilation time may not be sufficient. Users may want to switch between Conda environments for which different packages are available. MET version 9.0 has been enhanced to address this need.
+• Set **MET_PYTHON_BIN_EXE** to the full path of the desired Python executable.
-The types of Python embedding supported in MET are described below. In all cases, by default, the compiled Python instance is used to execute the Python script. If the packages that script imports are not available for the compiled Python instance, users will encounter a runtime error. In the event of a runtime error, users are advised to set the **MET_PYTHON_EXE** environment variable and rerun. This environment variable should be set to the full path to the version of Python you would like to use. See an example below.
+• On the command line, run "**python3-config \-\-cflags**". Set the value of **MET_PYTHON_CC** to the output of that command.
+
+• Again on the command line, run "**python3-config \-\-ldflags \-\-embed**". Set the value of **MET_PYTHON_LD** to the output of that command.
+
+Make sure that these are set as environment variables or that you have included them on the command line prior to running **configure**
+
+If a user attempts to invoke Python embedding with a version of MET that was not compiled with Python, MET will return an ERROR:
+
+.. code-block:: none
+ :caption: MET Errors Without Python Enabled
+
+ ERROR : Met2dDataFileFactory::new_met_2d_data_file() -> Support for Python has not been compiled!
+ ERROR : To run Python scripts, recompile with the --enable-python option.
+
+ - or -
+
+ ERROR : process_point_obs() -> Support for Python has not been compiled!
+ ERROR : To run Python scripts, recompile with the --enable-python option.
+
+Controlling Which Python MET Uses When Running
+==============================================
+
+When MET is compiled with Python embedding support, MET uses the Python executable in that Python installation by default when Python embedding is used. However, for users of highly configurable Python environments, the Python instance set at compilation time may not be sufficient. Users may want to use an alternate Python installation if they need additional packages not available in the Python installation used when compiling MET. In MET versions 9.0+, users have the ability to use a different Python executable when running MET than the version used when compiling MET by setting the environment variable **MET_PYTHON_EXE**.
+
+If a user's Python script requires packages that are not available in the Python installation used when compiling the MET software, they will encounter a runtime error when using MET. In this instance, the user will need to change the Python MET is using to a different installation with the required packages for their script. It is the responsibility of the user to manage this Python installation, and one popular approach is to use a custom Anaconda (Conda) Python environment. Once the Python installation meeting the user's requirements is available, the user can force MET to use it by setting the **MET_PYTHON_EXE** environment variable to the full path of the Python executable in that installation. For example:
.. code-block:: none
+ :caption: Setting MET_PYTHON_EXE
- export MET_PYTHON_EXE=/usr/local/python3/bin/python3
+ export MET_PYTHON_EXE=/usr/local/python3/bin/python3
-Setting this environment variable triggers slightly different processing logic in MET. Rather than executing the user-specified script with compiled Python instance directly, MET does the following:
+Setting this environment variable triggers slightly different processing logic in MET than when MET uses the Python installation that was used when compiling MET. When using the Python installation that was used when compiling MET, Python is called directly and data are passed in memory from Python to the MET tools. When the user sets **MET_PYTHON_EXE**, MET does the following:
1. Wrap the user's Python script and arguments with a wrapper script (write_tmp_mpr.py, write_tmp_point.py, or write_tmp_dataplane.py) and specify the name of a temporary file to be written.
2. Use a system call to the **MET_PYTHON_EXE** Python instance to execute these commands and write the resulting data objects to a temporary ASCII or NetCDF file.
-3. Use the compiled Python instance to run a wrapper script (read_tmp_ascii.py or read_tmp_dataplane.py) to read data from that temporary file.
+3. Use the Python instance that MET was compiled with to run a wrapper script (read_tmp_ascii.py or read_tmp_dataplane.py) to read data from that temporary file.
-With this approach, users should be able to execute Python scripts in their own custom environments.
+With this approach, users are able to execute Python scripts using their own custom Python installations.
-.. _pyembed-2d-data:
+.. _pyembed-data-structures:
-Python Embedding for 2D data
-============================
+Data Structures Supported by Python Embedding
+=============================================
-We now describe how to write Python scripts so that the MET tools may extract 2D gridded data fields from them. Currently, MET offers two ways to interact with Python scripts: by using NumPy N-dimensional arrays (ndarrays) or by using Xarray DataArrays. The interface to be used (NumPy or Xarray) is specified on the command line (more on this later). The user's scripts can use any Python libraries that are supported by the local Python installation, or any personal or institutional libraries or code that are desired in order to implement the Python script, so long as the data has been loaded into either a NumPy ndarray or an Xarray DataArray by the end of the script. This offers advantages when using data file formats that MET does not directly support. If there is Python code to read the data format, the user can use those tools to read the data, and then copy the data into a NumPy ndarray or an Xarray DataArray. MET can then ingest the data via the Python script. Note that whether a NumPy ndarray or an Xarray DataArray is used, the data should be stored as double precision floating point numbers. Using different data types, such as integers or single precision floating point numbers, will lead to unexpected results in MET.
+Python embedding with MET tools offers support for three different types of data structures:
-**Using NumPy N-dimensional Arrays**
+1. Two-dimensional (2D) gridded dataplanes
-The data must be loaded into a 2D NumPy ndarray named **met_data**. In addition there must be a Python dictionary named **attrs** which contains metadata such as timestamps, grid projection and other information. Here is an example **attrs** dictionary:
+2. Point data conforming to the :ref:`MET 11-column format`
-.. code-block:: none
+3. Matched-pair data conforming to the :ref:`MET MPR Line Type`
- attrs = {
-
- 'valid': '20050807_120000',
- 'init': '20050807_000000',
- 'lead': '120000',
- 'accum': '120000',
-
- 'name': 'Foo',
- 'long_name': 'FooBar',
- 'level': 'Surface',
- 'units': 'None',
-
- # Define 'grid' as a string or a dictionary
-
- 'grid': {
- 'type': 'Lambert Conformal',
- 'hemisphere': 'N',
- 'name': 'FooGrid',
- 'scale_lat_1': 25.0,
- 'scale_lat_2': 25.0,
- 'lat_pin': 12.19,
- 'lon_pin': -135.459,
- 'x_pin': 0.0,
- 'y_pin': 0.0,
- 'lon_orient': -95.0,
- 'd_km': 40.635,
- 'r_km': 6371.2,
- 'nx': 185,
- 'ny': 129,
- }
-
- }
-
-In the **attrs** dictionary, valid time, initialization time, lead time and accumulation time (if any) must be indicated by strings. Valid and initialization times must be given in YYYYMMDD[_HH[MMSS]] format, and lead and accumulation times must be given in HH[MMSS] format, where the square brackets indicate optional elements. The dictionary must also include strings for the name, long_name, level, and units to describe the data. The rest of the **attrs** dictionary gives the grid size and projection information in the same format that is used in the netCDF files written out by the MET tools. Those entries are also listed below. Note that the **grid** entry in the **attrs** dictionary can either be defined as a string or as a dictionary itself.
-
-If specified as a string, **grid** can be defined as follows:
-
-• As a named grid:
+Details for each of these data structures are provided below.
+
+.. note::
+
+ All sample commands and directories listed below are relative to the top level of the MET source code directory.
+
+.. _pyembed-2d-data:
+
+Python Embedding for 2D Gridded Dataplanes
+------------------------------------------
+
+Currently, MET supports two different types of Python objects for two-dimensional gridded dataplanes: NumPy N-dimensional arrays (ndarrays) and Xarray DataArrays. The keyword **PYTHON_NUMPY** is used on the command line when using ndarrays, and **PYTHON_XARRAY** when using Xarray DataArrays. Example commands are included at the end of this section.
+
+Python Script Requirements for 2D Gridded Dataplanes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+1. The data must be stored in a variable with the name **met_data**
+
+2. The **met_data** variable must be of type **Xarray DataArray** or **NumPy N-D Array**
+
+3. The data inside the **met_data** variable must be **double precision floating point** type
+
+4. A Python dictionary named **attrs** must be defined in the user's script and contain the :ref:`required attributes`
+
+.. _pyembed-2d-attrs:
+
+Required Attributes for 2D Gridded Dataplanes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The **attrs** dictionary must contain the following information:
+
+.. list-table:: 2D Dataplane Attributes
+ :widths: 5 5 10
+ :header-rows: 1
+
+ * - key
+ - description
+ - data type/format
+ * - valid
+ - valid time
+ - string (YYYYMMDD_HHMMSS)
+ * - init
+ - initialization time
+ - string (YYYYMMDD_HHMMSS)
+ * - lead
+ - forecast lead
+ - string (HHMMSS)
+ * - accum
+ - accumulation interval
+ - string (HHMMSS)
+ * - name
+ - variable name
+ - string
+ * - long_name
+ - variable long name
+ - string
+ * - level
+ - variable level
+ - string
+ * - units
+ - variable units
+ - string
+ * - grid
+ - grid information
+ - string or dict
+
+.. note::
+
+ Often times Xarray DataArray objects come with their own set of attributes available as a property. To avoid conflict with the required attributes
+ for MET, it is advised to strip these attributes and rely on the **attrs** dictionary defined in your script.
+
+The grid entry in the **attrs** dictionary must contain the grid size and projection information in the same format that is used in the netCDF files written out by the MET tools. The value of this item in the dictionary can either be a string, or another dictionary. Examples of the **grid** entry defined as a string are:
+
+• Using a named grid supported by MET:
.. code-block:: none
+ :caption: Named Grid
- 'grid': 'G212'
+ 'grid': 'G212'
• As a grid specification string, as described in :ref:`appendixB`:
.. code-block:: none
+ :caption: Grid Specification String
- 'grid': 'lambert 185 129 12.19 -133.459 -95 40.635 6371.2 25 25 N'
+ 'grid': 'lambert 185 129 12.19 -133.459 -95 40.635 6371.2 25 25 N'
• As the path to an existing gridded data file:
.. code-block:: none
+ :caption: Grid From File
- 'grid': '/path/to/sample_data.grib'
+ 'grid': '/path/to/sample_data.grib'
-When specified as a dictionary, the contents of the **grid** dictionary vary based on the grid **type** string. The entries for the supported grid types are described below:
+When specified as a dictionary, the contents of the **grid** entry vary based upon the grid **type**. The required elements for supported grid types are:
• **Lambert Conformal** grid dictionary entries:
@@ -188,103 +252,331 @@ When specified as a dictionary, the contents of the **grid** dictionary vary bas
Additional information about supported grids can be found in :ref:`appendixB`.
-**Using Xarray DataArrays**
+Finally, an example **attrs** dictionary is shown below:
-To use Xarray DataArrays, a similar procedure to the NumPy case is followed. The Xarray DataArray can be represented as a NumPy N-dimensional array (ndarray) via the **values** property of the DataArray, and an **attrs** property that contains a dictionary of attributes. The user must name the Xarray DataArray to be **met_data**. When one of the MET tools runs the Python script, it will look for an Xarray DataArray named **met_data**, and will retrieve the data and metadata from the **values** and **attrs** properties, respectively, of the Xarray DataArray. The Xarray DataArray **attrs** dictionary is populated in the same way as for the NumPy interface (please see :ref:`pyembed-2d-data` for requirements of each entry in the **attrs** dictionary). The **values** NumPy ndarray property of the Xarray DataArray is also populated in the same way as the NumPy case.
+.. code-block:: none
+ :caption: Sample Attrs Dictionary
+
+ attrs = {
+
+ 'valid': '20050807_120000',
+ 'init': '20050807_000000',
+ 'lead': '120000',
+ 'accum': '120000',
+
+ 'name': 'Foo',
+ 'long_name': 'FooBar',
+ 'level': 'Surface',
+ 'units': 'None',
+
+ # Define 'grid' as a string or a dictionary
+
+ 'grid': {
+ 'type': 'Lambert Conformal',
+ 'hemisphere': 'N',
+ 'name': 'FooGrid',
+ 'scale_lat_1': 25.0,
+ 'scale_lat_2': 25.0,
+ 'lat_pin': 12.19,
+ 'lon_pin': -135.459,
+ 'x_pin': 0.0,
+ 'y_pin': 0.0,
+ 'lon_orient': -95.0,
+ 'd_km': 40.635,
+ 'r_km': 6371.2,
+ 'nx': 185,
+ 'ny': 129,
+ }
+ }
+
+Running Python Embedding for 2D Gridded Dataplanes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+On the command line for any of the MET tools which will be obtaining its data from a Python script rather than directly from a data file, the user should specify either **PYTHON_NUMPY** or **PYTHON_XARRAY** wherever a (forecast or observation) data file would normally be given. Then in the **name** entry of the config file dictionaries for the forecast or observation data (typically used to specify the field name from the input data file), the user should list the **full path** to the Python script to be run followed by any command line arguments for that script. Note that for tools like MODE that take two data files, it is entirely possible to use the **PYTHON_NUMPY** for one file and the **PYTHON_XARRAY** for the other.
+
+Listed below is an example of running the Plot-Data-Plane tool to call a Python script for data that is included with the MET release tarball. Assuming the MET executables are in your path, this example may be run from the top-level MET source code directory:
-.. note::
- Currently, MET does not support Xarray Dataset structures. If you have a Dataset in Xarray, you can create a DataArray of a single variable using:
+.. code-block:: none
+ :caption: plot_data_plane Python Embedding
- met_data = xr.DataArray(ds.varname,attrs=ds.attrs)
+ plot_data_plane PYTHON_NUMPY fcst.ps \
+ 'name="scripts/python/examples/read_ascii_numpy.py data/python/fcst.txt FCST";' \
+ -title "Python enabled plot_data_plane"
+
+The first argument for the Plot-Data-Plane tool is the gridded data file to be read. When calling Python script that has a two-dimensional gridded dataplane stored in a NumPy N-D array object, set this to the constant string **PYTHON_NUMPY**. The second argument is the name of the output PostScript file to be written. The third argument is a string describing the data to be plotted. When calling a Python script, set **name** to the full path of the Python script to be run along with any command line arguments for that script. Lastly, the **-title** option is used to add a title to the plot. Note that any print statements included in the Python script will be printed to the screen. The above example results in the following log messages:
- | ds = your Dataset name
- | varname = variable name in the Dataset you'd like to use in MET
+.. code-block:: none
+
+ DEBUG 1: Opening data file: PYTHON_NUMPY
+ Input File: 'data/python/fcst.txt'
+ Data Name : 'FCST'
+ Data Shape: (129, 185)
+ Data Type: dtype('float64')
+ Attributes: {'name': 'FCST', 'long_name': 'FCST_word',
+ 'level': 'Surface', 'units': 'None',
+ 'init': '20050807_000000', 'valid': '20050807_120000',
+ 'lead': '120000', 'accum': '120000'
+ 'grid': {...} }
+ DEBUG 1: Creating postscript file: fcst.ps
-__________________
+Special Case for Ensemble-Stat, Series-Analysis, and MTD
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-It remains to discuss command lines and config files. Two methods for specifying the Python command and input file name are supported.
+The Ensemble-Stat, Series-Analysis, MTD and Gen-Ens-Prod tools all have the ability to read multiple input files. Because of this feature, a different approach to Python embedding is required. A typical use of these tools is to provide a list of files on the command line. For example:
-**Python Embedding Option 1:**
+.. code-block::
+ :caption: Gen-Ens-Prod Command Line
-On the command line for any of the MET tools which will be obtaining its data from a Python script rather than directly from a data file, the user should specify either PYTHON_NUMPY or PYTHON_XARRAY wherever a (forecast or observation) data file name would normally be given. Then in the **name** entry of the config file dictionaries for the forecast or observation data, the user should list the Python script to be run followed by any command line arguments for that script. Note that for tools like MODE that take two data files, it would be entirely possible to use the NumPy interface for one file and the Xarray interface for the other.
+ gen_ens_prod ens1.nc ens2.nc ens3.nc ens4.nc -out ens_prod.nc -config GenEnsProd_config
-___________________
+In this case, a user is passing 4 ensemble members to Gen-Ens-Prod to be evaluated, and each member is in a separate file. If a user wishes to use Python embedding to process the ensemble input files, then the same exact command is used however special modifications inside the GenEnsProd_config file are needed. In the config file dictionary, the user must set the **file_type** entry to either **PYTHON_NUMPY** or **PYTHON_XARRAY** to activate the Python embedding for these tools. Then, in the **name** entry of the config file dictionaries for the forecast or observation data, the user must list the **full path** to the Python script to be run. However, in the Python command, replace the name of the input gridded data file to the Python script with the constant string **MET_PYTHON_INPUT_ARG**. When looping over all of the input files, the MET tools will replace that constant **MET_PYTHON_INPUT_ARG** with the path to the input file currently being processed and optionally, any command line arguments for the Python script. Here is what this looks like in the GenEnsProd_config file for the above example:
-Listed below is an example of running the Plot-Data-Plane tool to call a Python script for data that is included with the MET release tarball. Assuming the MET executables are in your path, this example may be run from the top-level MET source code directory.
+.. code-block::
+ :caption: Gen-Ens-Prod MET_PYTHON_INPUT_ARG Config
-.. code-block:: none
+ file_type = PYTHON_NUMPY;
+ field = [ { name = "gen_ens_prod_pyembed.py MET_PYTHON_INPUT_ARG"; } ];
- plot_data_plane PYTHON_NUMPY fcst.ps \
- 'name="scripts/python/read_ascii_numpy.py data/python/fcst.txt FCST";' \
- -title "Python enabled plot_data_plane"
-
-The first argument for the Plot-Data-Plane tool is the gridded data file to be read. When calling a NumPy Python script, set this to the constant string PYTHON_NUMPY. The second argument is the name of the output PostScript file to be written. The third argument is a string describing the data to be plotted. When calling a Python script, set **name** to the Python script to be run along with command line arguments. Lastly, the **-title** option is used to add a title to the plot. Note that any print statements included in the Python script will be printed to the screen. The above example results in the following log messages.
+In the event the user requires command line arguments to their Python script, they must be included alongside the file names separated by a delimiter. For example, the above Gen-Ens-Prod command with command line arguments for Python would look like:
-.. code-block:: none
-
- DEBUG 1: Opening data file: PYTHON_NUMPY
- Input File: 'data/python/fcst.txt'
- Data Name : 'FCST'
- Data Shape: (129, 185)
- Data Type: dtype('float64')
- Attributes: {'name': 'FCST', 'long_name': 'FCST_word',
- 'level': 'Surface', 'units': 'None',
- 'init': '20050807_000000', 'valid': '20050807_120000',
- 'lead': '120000', 'accum': '120000'
- 'grid': {...} }
- DEBUG 1: Creating postscript file: fcst.ps
+.. code-block::
+ :caption: Gen-Ens-Prod Command Line with Python Args
+
+ gen_ens_proce ens1.nc,arg1,arg2 ens2.nc,arg1,arg2 ens3.nc,arg1,arg2 ens4.nc,arg1,arg2 \
+ -out ens_prod.nc -config GenEnsProd_config
+
+In this case, the user's Python script will receive "ens1.nc,arg1,arg2" as a single command line argument for each execution of the Python script (i.e. 1 time per file). The user must parse this argument inside their Python script to obtain **arg1** and **arg2** as separate arguments. The list of input files and optionally, any command line arguments can be written to a single file called **file_list** that is substituted for the file names and command line arguments. For example:
+
+.. code-block::
+ :caption: Gen-Ens-Prod File List
-**Python Embedding Option 2 using MET_PYTHON_INPUT_ARG:**
+ echo "ens1.nc,arg1,arg2 ens2.nc,arg1,arg2 ens3.nc,arg1,arg2 ens4.nc,arg1,arg2" > file_list
+ gen_ens_prod file_list -out ens_prod.nc -config GenEnsProd_config
-The second option was added to support the use of Python embedding in tools which read multiple input files. Option 1 reads a single field of data from a single source, whereas tools like Ensemble-Stat, Series-Analysis, and MTD read data from multiple input files. While option 2 can be used in any of the MET tools, it is required for Python embedding in Ensemble-Stat, Series-Analysis, and MTD.
+Finally, the above tools do not require data files to be present on a local disk. If the user wishes, their Python script can obtain data from other sources based upon only the command line arguments to their Python script. For example:
-On the command line for any of the MET tools, specify the path to the input gridded data file(s) as the usage statement for the tool indicates. Do **not** substitute in PYTHON_NUMPY or PYTHON_XARRAY on the command line. In the config file dictionary set the **file_type** entry to either PYTHON_NUMPY or PYTHON_XARRAY to activate the Python embedding logic. Then, in the **name** entry of the config file dictionaries for the forecast or observation data, list the Python script to be run followed by any command line arguments for that script. However, in the Python command, replace the name of the input gridded data file with the constant string MET_PYTHON_INPUT_ARG. When looping over multiple input files, the MET tools will replace that constant **MET_PYTHON_INPUT_ARG** with the path to the file currently being processed. The example plot_data_plane command listed below yields the same result as the example shown above, but using the option 2 logic instead.
+.. code-block::
+ :caption: Gen-Ens-Prod Python Args Only
-The Ensemble-Stat, Series-Analysis, and MTD tools support the use of file lists on the command line, as do some other MET tools. Typically, the ASCII file list contains a list of files which actually exist on your machine and should be read as input. For Python embedding, these tools loop over the ASCII file list entries, set MET_PYTHON_INPUT_ARG to that string, and execute the Python script. This only allows a single command line argument to be passed to the Python script. However multiple arguments may be concatenated together using some delimiter, and the Python script can be defined to parse arguments using that delimiter. When file lists are constructed in this way, the entries will likely not be files which actually exist on your machine. In this case, users should place the constant string "file_list" on the first line of their ASCII file lists. This will ensure that the MET tools will parse the file list properly.
+ gen_ens_prod 20230101,0 20230102,0 20230103,0 -out ens_prod.nc -confg GenEnsProd_config
+
+In the above command, each of the arguments "20230101,0", "20230102,0", and "20230103,0" are provided to the user's Python script in separate calls. Then, inside the Python script these arguments are used to construct a filename or query to a data server or other mechanism to return the desired data and format it the way MET expects inside the Python script, prior to calling Gen-Ens-Prod.
+
+Examples of Python Embedding for 2D Gridded Dataplanes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+**Grid-Stat with Python embedding for forecast and observations**
.. code-block:: none
-
- plot_data_plane data/python/fcst.txt fcst.ps \
- 'name="scripts/python/read_ascii_numpy.py MET_PYTHON_INPUT_ARG FCST"; \
- file_type=PYTHON_NUMPY;' \
- -title "Python enabled plot_data_plane"
+ :caption: GridStat Command with Dual Python Embedding
+
+ grid_stat 'PYTHON_NUMPY' 'PYTHON_NUMPY' GridStat_config -outdir /path/to/output
+
+.. code-block:: none
+ :caption: GridStat Config with Dual Python Embedding
+
+ fcst = {
+ field = [
+ {
+ name = "/path/to/fcst/python/script.py python_arg1 python_arg2";
+ }
+ ];
+ }
+
+ obs = {
+ field = [
+ {
+ name = "/path/to/obs/python/script.py python_arg1 python_arg2";
+ }
+ ];
+ }
.. _pyembed-point-obs-data:
Python Embedding for Point Observations
-=======================================
-
-The ASCII2NC tool supports the "-format python" option. With this option, point observations may be passed as input. An example of this is shown below:
+---------------------------------------
+
+MET also supports point observation data supplied in the :ref:`MET 11-column format`.
+
+Python Script Requirements for Point Observations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+1. The data must be stored in a variable with the name **point_data**
+
+2. The **point_data** variable must be a Python list representation of a NumPy N-D Array created from a Pandas DataFrame
+
+3. The **point_data** variable must have data in each of the 11 columns required for the MET tools even if it is NA
+
+To provide the data that MET expects for point observations, the user is encouraged when designing their Python script to consider how to map their observations into the MET 11-column format. Then, the user can populate their observations into a Pandas DataFrame with the following column names and dtypes:
+
+.. list-table:: Point Observation DataFrame Columns and Dtypes
+ :widths: 5 5 10
+ :header-rows: 1
+
+ * - column name
+ - data type (dtype)
+ - description
+ * - typ
+ - string
+ - Message Type
+ * - sid
+ - string
+ - Station ID
+ * - vld
+ - string
+ - Valid Time (YYYYMMDD_HHMMSS)
+ * - lat
+ - numeric
+ - Latitude (Degrees North)
+ * - lon
+ - numeric
+ - Longitude (Degrees East)
+ * - elv
+ - numeric
+ - Elevation (MSL)
+ * - var
+ - string
+ - Variable name (or GRIB code)
+ * - lvl
+ - numeric
+ - Level
+ * - hgt
+ - numeric
+ - Height (MSL or AGL)
+ * - qc
+ - string
+ - QC string
+ * - obs
+ - numeric
+ - Observation Value
+
+To create the variable for MET, use the **.values** property of the Pandas DataFrame and the **.tolist()** method of the NumPy N-D Array. For example:
+
+.. code-block:: Python
+ :caption: Convert Pandas DataFrame to MET variable
+
+ # Pandas DataFrame
+ my_dataframe = pd.DataFrame()
+
+ # Convert to MET variable
+ point_data = my_dataframe.values.tolist()
+
+Running Python Embedding for Point Observations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The Point2Grid, Plot-Point-Obs, Ensemble-Stat, and Point-Stat tools support Python embedding for point observations. Python embedding for these tools can be invoked directly on the command line by replacing the input MET NetCDF point observation file name with the **full path** to the Python script and any arguments. The Python command must begin with the prefix **PYTHON_NUMPY=**. The full command should be enclosed in quotes to prevent embedded whitespace from causing parsing errors. An example of this is shown below for Plot-Point-Obs:
.. code-block:: none
+ :caption: plot_point_obs with Python Embedding
- ascii2nc -format python \
- "MET_BASE/python/read_ascii_point.py sample_ascii_obs.txt" \
- sample_ascii_obs_python.nc
+ plot_point_obs \
+ "PYTHON_NUMPY=scripts/python/examples/read_ascii_point.py data/sample_obs/ascii/sample_ascii_obs.txt" \
+ output_image.ps
-The Point2Grid, Plot-Point-Obs, Ensemble-Stat, and Point-Stat tools also process point observations. They support Python embedding of point observations directly on the command line by replacing the input MET NetCDF point observation file name with the Python command to be run. The Python command must begin with the prefix 'PYTHON_NUMPY=' and be followed by the path to the User's Python script and any arguments. The full command should be enclosed in single quotes to prevent embedded whitespace from causing parsing errors. An example of this is shown below:
+The ASCII2NC tool also supports Python embedding, however invoking it varies slightly from other MET tools. For ASCII2NC, Python embedding is used by providing the "-format python" option on the command line. With this option, point observations may be passed as input. An example of this is shown below:
.. code-block:: none
+ :caption: ascii2nc with Python Embedding
+
+ ascii2nc -format python \
+ "scripts/python/examples/read_ascii_point.py data/sample_obs/ascii/sample_ascii_obs.txt" \
+ sample_ascii_obs_python.nc
+
+Both of the above examples use the **read_ascii_point.py** example script which is included with the MET code. It reads ASCII data in MET's 11-column point observation format and stores it in a Pandas DataFrame to be read by the MET tools using Python embedding for point data. The **read_ascii_point.py** example script can be found in:
- plot_point_obs \
- "PYTHON_NUMPY=MET_BASE/python/read_ascii_point.py sample_ascii_obs.txt" \
- output_image.ps
+• MET installation directory in *scripts/python/examples*.
-Both of the above examples use the **read_ascii_point.py** sample script which is included with the MET code. It reads ASCII data in MET's 11-column point observation format and stores it in a Pandas DataFrame to be read by the MET tools using Python embedding for point data. The **read_ascii_point.py** sample script can be found in:
+• `MET GitHub repository `_ in *scripts/python/examples*.
-• MET installation directory in *MET_BASE/python*.
+Examples of Python Embedding for Point Observations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-• `MET GitHub repository `_ in *met/scripts/python*.
+**Point-Stat with Python embedding for forecast and observations**
+
+.. code-block:: none
+ :caption: PointStat Command with Dual Python Embedding
+
+ point_stat 'PYTHON_NUMPY' 'PYTHON_NUMPY=/path/to/obs/python/script.py python_arg1 python_arg2' PointStat_config -outdir /path/to/output
+
+.. code-block:: none
+ :caption: PointStat Config with Dual Python Embedding
+
+ fcst = {
+ field = [
+ {
+ name = "/path/to/fcst/python/script.py python_arg1 python_arg2";
+ }
+ ];
+ }
.. _pyembed-mpr-data:
-Python Embedding for MPR data
-=============================
+Python Embedding for MPR Data
+-----------------------------
+
+The MET Stat-Analysis tool also supports Python embedding. By using the command line option **-lookin python**, Stat-Analysis can read matched pair (MPR) data formatted in the MET MPR line-type format via Python.
+
+.. note::
+
+ This functionality assumes you are passing only the MPR line type information, and not other statistical line types. Sometimes users configure MET tools to write the MPR line type to the STAT file (along with all other line types). The example below will not work for those files, but rather only files from MET tools containing just the MPR line type information, or optionally, data in another format that the user adapts to the MPR line type format.
+
+Python Script Requirements for MPR Data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+1. The data must be stored in a variable with the name **mpr_data**
+
+2. The **mpr_data** variable must be a Python list representation of a NumPy N-D Array created from a Pandas DataFrame
+
+3. The **met_data** variable must have data in **exactly** 36 columns, corresponding to the summation of the :ref:`common STAT output` and the :ref:`MPR line type output`.
+
+If a user does not have an existing MPR line type file created by the MET tools, they will need to map their data into the 36 columns expected by Stat-Analysis for the MPR line type data. If a user already has MPR line type files, the most direct way for a user to read MPR line type data is to model their Python script after the sample **read_ascii_mpr.py** script. Sample code is included here for convenience:
+
+.. code-block:: Python
+ :caption: Reading MPR line types with Pandas
+
+ # Open the MPR line type file
+ mpr_dataframe = pd.read_csv(input_mpr_file,\
+ header=None,\
+ delim_whitespace=True,\
+ keep_default_na=False,\
+ skiprows=1,\
+ usecols=range(1,36),\
+ dtype=str)
+
+ # Convert to the variable MET expects
+ mpr_data = mpr_dataframe.values.tolist()
+
+Running Python Embedding for MPR Data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The Stat-Analysis tool supports the "-lookin python" option. With this option, matched pair (MPR) data may be passed as input. An example of this is provided in :numref:`StA-pyembed`. That example uses the **read_ascii_mpr.py** sample script which is included with the MET code. It reads MPR data and stores it in a Pandas dataframe to be read by the Stat-Analysis tool with Python.
+Stat-Analysis can be run using the **-lookin python** command line option:
+
+.. code-block:: none
+ :caption: Stat-Analysis with Python Embedding of MPR Data
+
+ stat_analysis \
+ -lookin python scripts/python/examples/read_ascii_mpr.py point_stat_mpr.txt \
+ -job aggregate_stat -line_type MPR -out_line_type CNT \
+ -by FCST_VAR,FCST_LEV
+
+In this example, rather than passing the MPR output lines from Point-Stat directly into Stat-Analysis (which is the typical approach), the **read_ascii_mpr.py** Python embedding script reads that file and passes the data to Stat-Analysis. The aggregate_stat job is defined on the command line and CNT statistics are derived from the MPR input data. Separate CNT statistics are computed for each unique combination of FCST_VAR and FCST_LEV present in the input.
The **read_ascii_mpr.py** sample script can be found in:
-• MET installation directory in *MET_BASE/python*.
+• MET installation directory in *scripts/python/examples*.
+
+• `MET GitHub repository `_ in *MET/scripts/python/examples*.
+
+MET Python Package
+==================
+
+MET comes with a Python package that provides core functionality for the Python embedding capability. In rare cases, advanced users may find the classes and functions included with this Python package useful.
+
+To utilize the MET Python package **standalone** when NOT using it with Python embedding, users must add the following to their **PYTHONPATH** environment variable:
+
+.. code-block::
+ :caption: MET Python Module PYTHONPATH
+
+ export PYTHONPATH={MET_INSTALL_DIR}/share/met/python
-• `MET GitHub repository `_ in *met/scripts/python*.
+where {MET_INSTALL_DIR} is the top level directory where MET is installed, for example **/usr/local/met**.
diff --git a/docs/Users_Guide/config_options.rst b/docs/Users_Guide/config_options.rst
index 93a7a96229..3239becefc 100644
--- a/docs/Users_Guide/config_options.rst
+++ b/docs/Users_Guide/config_options.rst
@@ -1541,6 +1541,8 @@ Point-Stat and Ensemble-Stat, the reference time is the forecast valid time.
end = 5400;
}
+.. _config_options-mask:
+
mask
^^^^
@@ -1562,14 +1564,26 @@ in the following ways:
* The "poly" entry contains a comma-separated list of files that define
verification masking regions. These masking regions may be specified in
- two ways: as a lat/lon polygon or using a gridded data file such as the
- NetCDF output of the Gen-Vx-Mask tool.
+ two ways: in an ASCII file containing lat/lon points defining the mask polygon,
+ or using a gridded data file such as the NetCDF output of the Gen-Vx-Mask tool.
+ Some details for each of these options are described below:
+
+ * If providing an ASCII file containing the lat/lon points defining the mask
+ polygon, the file must contain a name for the region followed by the latitude
+ (degrees north) and longitude (degrees east) for each vertex of the polygon.
+ The values are separated by whitespace (e.g. spaces or newlines), and the
+ first and last polygon points are connected.
+ The general form is "poly_name lat1 lon1 lat2 lon2... latn lonn".
+ Here is an example of a rectangle consisting of 4 points:
+
+ .. code-block:: none
+ :caption: ASCII Rectangle Polygon Mask
- * An ASCII file containing a lat/lon polygon.
- Latitude in degrees north and longitude in degrees east.
- The first and last polygon points are connected.
- For example, "MET_BASE/poly/EAST.poly" which consists of n points:
- "poly_name lat1 lon1 lat2 lon2... latn lonn"
+ RECTANGLE
+ 25 -120
+ 55 -120
+ 55 -70
+ 25 -70
Several masking polygons used by NCEP are predefined in the
installed *share/met/poly* directory. Creating a new polygon is as
@@ -1582,7 +1596,8 @@ in the following ways:
observation point falls within the polygon defined is done in x/y
grid space.
- * The NetCDF output of the gen_vx_mask tool.
+ * The NetCDF output of the gen_vx_mask tool. Please see :numref:`masking`
+ for more details.
* Any gridded data file that MET can read may be used to define a
verification masking region. Users must specify a description of the
@@ -1591,7 +1606,7 @@ in the following ways:
applied, any grid point where the resulting field is 0, the mask is
turned off. Any grid point where it is non-zero, the mask is turned
on.
- For example, "sample.grib {name = \"TMP\"; level = \"Z2\";} >273"
+ For example, "sample.grib {name = \"TMP\"; level = \"Z2\";} >273"
* The "sid" entry is an array of strings which define groups of
observation station ID's over which to compute statistics. Each entry
diff --git a/docs/Users_Guide/installation.rst b/docs/Users_Guide/installation.rst
index 9db4d6993f..a198fb8fab 100644
--- a/docs/Users_Guide/installation.rst
+++ b/docs/Users_Guide/installation.rst
@@ -21,6 +21,8 @@ Programming Languages
The MET package, including MET-TC, is written primarily in C/C++ in order to be compatible with an extensive verification code base in C/C++ already in existence. In addition, the object-based MODE and MODE-TD verification tools rely heavily on the object-oriented aspects of C++. Knowledge of C/C++ is not necessary to use the MET package. The MET package has been designed to be highly configurable through the use of ASCII configuration files, enabling a great deal of flexibility without the need for source code modifications.
+With the release of MET-11.1.0, C++11 is now the minimum required version of the C++ programming language standard.
+
NCEP's BUFRLIB is written entirely in Fortran. The portion of MET that handles the interface to the BUFRLIB for reading PrepBUFR point observation files is also written in Fortran.
The MET package is intended to be a tool for the modeling community to use and adapt. As users make upgrades and improvements to the tools, they are encouraged to offer those upgrades to the broader community by offering feedback to the developers.
@@ -122,7 +124,7 @@ MET Directory Structure
The top-level MET directory consists of Makefiles, configuration files, and several subdirectories. The top-level Makefile and configuration files control how the entire toolkit is built. Instructions for using these files to build MET can be found in :numref:`Install_Building-the-MET`.
-When MET has been successfully built and installed, the installation directory contains two subdirectories. The *bin/* directory contains executables for each module of MET as well as several plotting utilities. The *share/met/* directory contains many subdirectories with data required at runtime and a subdirectory of sample R scripts utilities. The *colortables/*, *map/*, and *ps/* subdirectories contain data used in creating PostScript plots for several MET tools. The *poly/* subdirectory contains predefined lat/lon polyline regions for use in selecting regions over which to verify. The polylines defined correspond to verification regions used by NCEP as described in :numref:`Appendix B, Section %s `. The *config/* directory contains default configuration files for the MET tools. The *python/* subdirectory contains sample scripts used in Python embedding (:numref:`Appendix F, Section %s `). The *table_files/* and *tc_data/* subdirectories contain GRIB table definitions and tropical cyclone data, respectively. The *Rscripts/* subdirectory contains a handful of plotting graphic utilities for MET-TC. These are the same Rscripts that reside under the top-level MET *scripts/Rscripts* directory, other than it is the installed location. The *wrappers/* subdirectory contains code used in Python embedding (:numref:`Appendix F, Section %s `).
+When MET has been successfully built and installed, the installation directory contains two subdirectories. The *bin/* directory contains executables for each module of MET as well as several plotting utilities. The *share/met/* directory contains many subdirectories with data required at runtime and a subdirectory of sample R scripts utilities. The *colortables/*, *map/*, and *ps/* subdirectories contain data used in creating PostScript plots for several MET tools. The *poly/* subdirectory contains predefined lat/lon polyline regions for use in selecting regions over which to verify. The polylines defined correspond to verification regions used by NCEP as described in :numref:`Appendix B, Section %s `. The *config/* directory contains default configuration files for the MET tools. The *python/* subdirectory contains python scripts. The *python/examples* subdirectory contains sample scripts used in Python embedding (:numref:`Appendix F, Section %s `). The *python/pyembed/* subdirectory contains code used in Python embedding (:numref:`Appendix F, Section %s `). The *table_files/* and *tc_data/* subdirectories contain GRIB table definitions and tropical cyclone data, respectively. The *Rscripts/* subdirectory contains a handful of plotting graphic utilities for MET-TC. These are the same Rscripts that reside under the top-level MET *scripts/Rscripts* directory, other than it is the installed location.
The *data/* directory contains several configuration and static data files used by MET. The *sample_fcst/* and *sample_obs/* subdirectories contain sample data used by the test scripts provided in the *scripts/* directory.
@@ -209,7 +211,7 @@ The following environment variables should also be set:
MET_PYTHON_CC='-I/usr/include/python3.6'
MET_PYTHON_LD='-L/usr/lib/python3.6/config-x86_64-linux-gnu -lpython3.6m'
- Note that this version of Python must include support for a minimum set of required pacakges. For more information about Python support in MET, including the list of required packages, please refer to :numref:`Appendix F, Section %s `.
+ Note that this version of Python must include support for a minimum set of required packages. For more information about Python support in MET, including the list of required packages, please refer to :numref:`Appendix F, Section %s `.
* If compiling MODIS-Regrid and/or lidar2nc, set $MET_HDF to point to the main HDF4 directory, or set $MET_HDFINC to point to the directory with the HDF4 include files and set $MET_HDFLIB to point to the directory with the HDF4 library files. Also, set $MET_HDFEOS to point to the main HDF EOS directory, or set $MET_HDFEOSINC to point to the directory with the HDF EOS include files and set $MET_HDFEOSLIB to point to the directory with the HDF EOS library files.
diff --git a/docs/Users_Guide/masking.rst b/docs/Users_Guide/masking.rst
index 4289128f43..a4e7345643 100644
--- a/docs/Users_Guide/masking.rst
+++ b/docs/Users_Guide/masking.rst
@@ -31,22 +31,23 @@ The usage statement for the Gen-Vx-Mask tool is shown below:
[-height n]
[-width n]
[-shapeno n]
+ [-shape_str name string]
[-value n]
[-name string]
[-log file]
[-v level]
[-compress level]
-gen_vx_mask has four required arguments and can take optional ones. Note, -type string (masking type) was previously optional but is now required.
+gen_vx_mask has four required arguments and can take optional ones. Note that **-type string** (masking type) was previously optional but is now required.
Required arguments for gen_vx_mask
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-1. The **input_file** argument is a gridded data file which specifies the grid definition for the domain over which the masking bitmap is to be defined. If output from gen_vx_mask, automatically read mask data as the **input_field**.
+1. The **input_grid** argument is a named grid, the path to a gridded data file, or an explicit grid specification string (see :numref:`App_B-grid_specification_strings`) which defines the grid for which a mask is to be defined. If set to a gen_vx_mask output file, automatically read mask data as the **input_field**.
2. The **mask_file** argument defines the masking information, see below.
-• For "poly", "poly_xy", "box", "circle", and "track" masking, specify an ASCII Lat/Lon file.
+• For "poly", "poly_xy", "box", "circle", and "track" masking, specify an ASCII Lat/Lon file. Refer to :ref:`Types_of_masking_gen_vx_mask` for details on how to construct the ASCII Lat/Lon file for each type of mask.
• For "grid" and "data" masking, specify a gridded data file.
@@ -58,7 +59,7 @@ Required arguments for gen_vx_mask
3. The **out_file** argument is the output NetCDF mask file to be written.
-4. The **-type string** is required to set the masking type. The application will give an error message and exit if "-type string" is not specified on the command line. See description of supported types below.
+4. The **-type string** is required to set the masking type. The application will give an error message and exit if "-type string" is not specified on the command line. See the description of supported types below.
Optional arguments for gen_vx_mask
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -83,18 +84,24 @@ Optional arguments for gen_vx_mask
10. The **-height n** and **-width n** options set the size in grid units for "box" masking.
-11. The **-shapeno n** option is only used for shapefile masking. (See description of shapefile masking below).
+11. The **-shapeno n** option is only used for shapefile masking. See the description of shapefile masking below.
-12. The **-value n** option can be used to override the default output mask data value (1).
+12. The **-shape_str name string** option is only used for shapefile masking. See the description of shapefile masking below.
-13. The **-name string** option can be used to specify the output variable name for the mask.
+13. The **-value n** option can be used to override the default output mask data value (1).
-14. The **-log file** option directs output and errors to the specified log file. All messages will be written to that file as well as standard out and error. Thus, users can save the messages without having to redirect the output on the command line. The default behavior is no log file.
+14. The **-name string** option can be used to specify the output variable name for the mask.
-15. The **-v level** option indicates the desired level of verbosity. The value of "level" will override the default setting of 2. Setting the verbosity to 0 will make the tool run with no log messages, while increasing the verbosity will increase the amount of logging.
+15. The **-log file** option directs output and errors to the specified log file. All messages will be written to that file as well as standard out and error. Thus, users can save the messages without having to redirect the output on the command line. The default behavior is no log file.
-16. The **-compress level** option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression.
+16. The **-v level** option indicates the desired level of verbosity. The value of "level" will override the default setting of 2. Setting the verbosity to 0 will make the tool run with no log messages, while increasing the verbosity will increase the amount of logging.
+17. The **-compress level** option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression.
+
+.. _Types_of_masking_gen_vx_mask:
+
+Types of masking available in gen_vx_mask
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The Gen-Vx-Mask tool supports the following types of masking region definition selected using the **-type** command line option:
1. Polyline (**poly**) masking reads an input ASCII file containing Lat/Lon locations, connects the first and last points, and selects grid points whose Lat/Lon location falls inside that polyline in Lat/Lon space. This option is useful when defining geographic subregions of a domain.
@@ -115,7 +122,11 @@ The Gen-Vx-Mask tool supports the following types of masking region definition s
9. Latitude (**lat**) and longitude (**lon**) masking computes the latitude and longitude value at each grid point. This logic only requires the definition of the grid, specified by the **input_file**. Technically, the **mask_file** is not needed, but a value must be specified for the command line to parse correctly. Users are advised to simply repeat the **input_file** setting twice. If the **-thresh** command line option is not used, the raw latitude or longitude values for each grid point will be written to the output. This option is useful when defining latitude or longitude bands over which to compute statistics.
-10. Shapefile (**shape**) masking uses a closed polygon taken from an ESRI shapefile to define the masking region. Gen-Vx-Mask reads the shapefile with the ".shp" suffix and extracts the latitude and longitudes of the vertices. The other types of shapefiles (index file, suffix ".shx", and dBASE file, suffix ".dbf") are not currently used. The shapefile must consist of closed polygons rather than polylines, points, or any of the other data types that shapefiles support. Shapefiles usually contain more than one polygon, and the **-shape n** command line option enables the user to select one polygon from the shapefile. The integer **n** tells which shape number to use from the shapefile. Note that this value is zero-based, so that the first polygon in the shapefile is polygon number 0, the second polygon in the shapefile is polygon number 1, etc. For the user's convenience, some utilities that perform human-readable screen dumps of shapefile contents are provided. The gis_dump_shp, gis_dump_shx and gis_dump_dbf tools enable the user to examine the contents of her shapefiles. As an example, if the user knows the name of the particular polygon but not the number of the polygon in the shapefile, the user can use the gis_dump_dbf utility to examine the names of the polygons in the shapefile. The information written to the screen will display the corresponding polygon number.
+10. Shapefile (**shape**) masking uses closed polygons taken from an ESRI shapefile to define the masking region. Gen-Vx-Mask reads the shapefile with the ".shp" suffix and extracts the latitude and longitudes of the vertices. The shapefile must consist of closed polygons rather than polylines, points, or any of the other data types that shapefiles support. When the **-shape_str** command line option is used, Gen-Vx-Mask also reads metadata from the corresponding dBASE file with the ".dbf" suffix.
+
+ Shapefiles usually contain more than one polygon, and the user must select which of these shapes should be used. The **-shapeno n** and **-shape_str name string** command line options enable the user to select one or more polygons from the shapefile. For **-shape n**, **n** is a comma-separated list of integer shape indices to be used. Note that these values are zero-based. So the first polygon in the shapefile is shape number 0, the second polygon in the shapefile is shape number 1, etc. For example, **-shapeno 0,1,2** uses the first three shapes in the shapefile. When multiple shapes are specified, the mask is defined as their union. So all grid points falling inside at least one of the specified shapes are included in the mask.
+
+ For the user's convenience, some utilities that perform human-readable screen dumps of shapefile contents are provided with MET. The **gis_dump_shp**, **gis_dump_shx**, and **gis_dump_dbf** tools enable the user to examine the contents of these shapefiles. In particular, the **gis_dump_dbf** tool prints the name and values of the metadata for each record. The **-shape_str** command line option filters the shapes using the attributes listed in the **gis_dump_dbf** output, and requires two arguments. The **name** argument is set to any valid shapefile attribute, and the **string** argument is a comma-separated list of values to be matched. An example of using **-shape_str** is **-shape_str CONTINENT Europe**, which will match all "CONTINENT" attribues that have the string "Europe" in them. Strings that contain embedded whitespace should be enclosed in single quotes. Also note that case insensitive matching is used. For example, when using a global country outline shapefile, **-shape_str NAME 'united kingdom,united states of america'** matches the "NAME" attributes that have both "United Kingdom" and "United States of America" in them. If **-shape_str** is used multiple times, only shapes matching all the named attributes will be used. For example, **-shape_str CONTINENT Europe -shape_str NAME Spain,Portugal** will only match shapes where the "CONTINENT" attrinute contains "Europe "and the "NAME" attribute contains "Spain" or "Portugal". If a user wishes, they can combine both the **-shape_str** and **-shapeno** options. In this case, the union of all matches from the shapefile will be used.
The polyline, polyline XY, box, circle, and track masking methods all read an ASCII file containing Lat/Lon locations. Those files must contain a string, which defines the name of the masking region, followed by a series of whitespace-separated latitude (degrees north) and longitude (degree east) values.
diff --git a/docs/Users_Guide/plotting.rst b/docs/Users_Guide/plotting.rst
index 1db3b4be91..1ac44e2f7e 100644
--- a/docs/Users_Guide/plotting.rst
+++ b/docs/Users_Guide/plotting.rst
@@ -71,7 +71,7 @@ An equivalent command using python embedding for point observations is shown bel
.. code-block:: none
- plot_point_obs 'PYTHON_NUMPY=MET_BASE/python/read_met_point_obs.py sample_pb.nc' sample_data.ps
+ plot_point_obs 'PYTHON_NUMPY=MET_BASE/python/examples/read_met_point_obs.py sample_pb.nc' sample_data.ps
Please see section :numref:`pyembed-point-obs-data` for more details about Python embedding in MET.
diff --git a/docs/Users_Guide/reformat_point.rst b/docs/Users_Guide/reformat_point.rst
index 1cd9b4705d..809639c249 100644
--- a/docs/Users_Guide/reformat_point.rst
+++ b/docs/Users_Guide/reformat_point.rst
@@ -1042,7 +1042,7 @@ Required arguments for point2grid
1. The **input_filename** argument indicates the name of the input file to be processed. The input can be a MET NetCDF point observation file generated by other MET tools or a NetCDF AOD dataset from GOES16/17. Python embedding for point observations is also supported, as described in :numref:`pyembed-point-obs-data`.
-The MET point observation NetCDF file name as **input_filename** argument is equivalent with "PYTHON_NUMPY=MET_BASE/python/read_met_point_obs.py netcdf_file name'.
+The MET point observation NetCDF file name as **input_filename** argument is equivalent with "PYTHON_NUMPY=MET_BASE/python/examples/read_met_point_obs.py netcdf_filename".
2. The **to_grid** argument defines the output grid as: (1) a named grid, (2) the path to a gridded data file, or (3) an explicit grid specification string.
@@ -1100,7 +1100,7 @@ Listed below is an example of processing the same set of observations but using
.. code-block:: none
point2grid \
- 'PYTHON_NUMPY=MET_BASE/python/read_met_point_obs.py ascii2nc_edr_hourly.20130827.nc' \
+ 'PYTHON_NUMPY=MET_BASE/python/examples/read_met_point_obs.py ascii2nc_edr_hourly.20130827.nc' \
G212 python_gridded_ascii_python.nc -config Point2GridConfig_edr \
-field 'name="200"; level="*"; valid_time="20130827_205959";' -method MAX -v 1
@@ -1191,10 +1191,10 @@ The script can be found at:
.. code-block:: none
- MET_BASE/utility/print_pointnc2ascii.py
+ MET_BASE/python/utility/print_pointnc2ascii.py
For how to use the script, issue the command:
.. code-block:: none
- python3 MET_BASE/utility/print_pointnc2ascii.py -h
+ python3 MET_BASE/python/utility/print_pointnc2ascii.py -h
diff --git a/docs/Users_Guide/release-notes.rst b/docs/Users_Guide/release-notes.rst
index 840af44bf6..979fdbfaf6 100644
--- a/docs/Users_Guide/release-notes.rst
+++ b/docs/Users_Guide/release-notes.rst
@@ -9,6 +9,33 @@ When applicable, release notes are followed by the GitHub issue number which des
enhancement, or new feature (`MET GitHub issues `_).
Important issues are listed **in bold** for emphasis.
+MET Version 11.1.0-beta2 release notes (20230505)
+-------------------------------------------------
+
+**Note** that the 11.1.0-beta2 release was originally created on 20230423 but was recreated on 20230428 and 20230505 to include critical bugfixes.
+
+ .. dropdown:: Documentation
+
+ * Improve documentation on Python Embedding for point observations (`#2303 `_).
+ * Create dropdown menus for Appendix A (`#2460 `_).
+ * Clarify MET Compile Time Python requirements (`#2490 `_).
+
+ .. dropdown:: Enhancements
+
+ * Enhance the MET point processing tools to read the Python 'point_data' variable instead of just 'met_point_data' (`#2285 `_).
+ * SonarQube: Further reduce bugs for MET-11.1.0-beta2 (`#2474 `_).
+ * SonarQube: Replace all instances of NULL with nullptr (`#2504 `_).
+ * SonarQube: Remove code that will never be executed (`#2506 `_).
+
+ .. dropdown:: Bugfixes
+
+ * Bugfix: Correct the branch name for the SonarQube scanning nightly (`#2401 `_).
+ * Bugfix: Fix support for the YYYYMMDD format in NetCDF level timestrings (`#2482 `_).
+ * Bugfix: AERONET the lat/lon is not changed with different station ID (`#2493 `_).
+ * Bugfix: dtype in Python embedding example script and appendixF correction (`#2518 `_).
+ * Bugfix: write_tmp_dataplane uses fill_value unrecognized by MET (`#2525 `_).
+ * **Bugfix: Resolve compilation problems due to need for \-std=c++11** (`#2531 `_).
+
MET Version 11.1.0-beta1 release notes (20230228)
-------------------------------------------------
diff --git a/docs/Users_Guide/stat-analysis.rst b/docs/Users_Guide/stat-analysis.rst
index 1c1f1db4c0..c1973ed681 100644
--- a/docs/Users_Guide/stat-analysis.rst
+++ b/docs/Users_Guide/stat-analysis.rst
@@ -9,7 +9,7 @@ Introduction
The Stat-Analysis tool ties together results from the Point-Stat, Grid-Stat, Ensemble-Stat, Wavelet-Stat, and TC-Gen tools by providing summary statistical information and a way to filter their STAT output files. It processes the STAT output created by the other MET tools in a variety of ways which are described in this section.
-MET version 9.0 adds support for the passing matched pair data (MPR) into Stat-Analysis using a Python script with the "-lookin python ..." option. An example of running Stat-Analysis with Python embedding is shown in :numref:`stat_analysis-usage`.
+MET version 9.0 adds support for the passing matched pair data (MPR) into Stat-Analysis using a Python script with the "-lookin python ..." option. An example of running Stat-Analysis with Python embedding can be found in :numref:`Appendix F, Section %s `.
Scientific and statistical aspects
==================================
@@ -282,12 +282,12 @@ The usage statement for the Stat-Analysis tool is shown below:
stat_analysis has two required arguments and accepts several optional ones.
-In the usage statement for the Stat-Analysis tool, some additional terminology is introduced. In the Stat-Analysis tool, the term "job" refers to a set of tasks to be performed after applying user-specified options (i.e., "filters"). The filters are used to pare down a collection of output from the MET statistics tools to only those lines that are desired for the analysis. The job and its filters together comprise the "job command line". The "job command line" may be specified either on the command line to run a single analysis job or within the configuration file to run multiple analysis jobs at the same time. If jobs are specified in both the configuration file and the command line, only the jobs indicated in the configuration file will be run. The various jobs types are described in :numref:`table_WS_format_info_ISC` and the filtering options are described in :numref:`wavelet_stat-configuration-file`.
+In the usage statement for the Stat-Analysis tool, some additional terminology is introduced. In the Stat-Analysis tool, the term "job" refers to a set of tasks to be performed after applying user-specified options (i.e., "filters"). The filters are used to pare down a collection of output from the MET statistics tools to only those lines that are desired for the analysis. The job and its filters together comprise the "job command line". The "job command line" may be specified either on the command line to run a single analysis job or within the configuration file to run multiple analysis jobs at the same time. If jobs are specified in both the configuration file and the command line, only the jobs indicated in the configuration file will be run. The various jobs types are described in :numref:`Des_components_STAT_analysis_tool` and the filtering options are described in :numref:`stat_analysis-configuration-file`.
Required arguments for stat_analysis
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-1. The **-lookin path** specifies the name of a directory to be searched recursively for STAT files (ending in ".stat") or any explicit file name with any suffix (such as "_ctc.txt") to be read. This option may be used multiple times to specify multiple directories and/or files to be read. If "-lookin python" is used, it must be followed by a Python embedding script and any command line arguments it takes. Python embedding can be used to pass matched pair (MPR) lines as input to Stat-Analysis.
+1. The **-lookin path** specifies the name of a directory to be searched recursively for STAT files (ending in ".stat") or any explicit file name with any suffix (such as "_ctc.txt") to be read. This option may be used multiple times to specify multiple directories and/or files to be read. If "-lookin python" is used, it must be followed by a Python embedding script and any command line arguments it takes. Python embedding can be used to pass **only** matched pair (MPR) lines as input to Stat-Analysis.
2. Either a configuration file must be specified with the **-config** option, or a **JOB COMMAND LINE** must be denoted. The **JOB COMMAND LINE** is described in :numref:`stat_analysis-configuration-file`
@@ -313,22 +313,6 @@ An example of the stat_analysis calling sequence is shown below.
In this example, the Stat-Analysis tool will search for valid STAT lines located in the *../out/point_stat* directory that meet the options specified in the configuration file, *config/STATAnalysisConfig*.
-.. _StA-pyembed:
-
-Python Embedding for Matched Pairs
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The example below uses Python embedding.
-
-.. code-block:: none
-
- stat_analysis \
- -lookin python MET_BASE/python/read_ascii_mpr.py point_stat_mpr.txt \
- -job aggregate_stat -line_type MPR -out_line_type CNT \
- -by FCST_VAR,FCST_LEV
-
-In this example, rather than passing the MPR output lines from Point-Stat directly into Stat-Analysis (which is the typical approach), the read_ascii_mpr.py Python embedding script reads that file and passes the data to Stat-Analysis. The aggregate_stat job is defined on the command line and CNT statistics are derived from the MPR input data. Separate CNT statistics are computed for each unique combination of FCST_VAR and FCST_LEV present in the input. Please refer to :numref:`Appendix F, Section %s ` for more details about Python embedding in MET.
-
.. _stat_analysis-configuration-file:
stat_analysis configuration file
diff --git a/docs/Users_Guide/tc-pairs.rst b/docs/Users_Guide/tc-pairs.rst
index dbacd34839..556f7358c2 100644
--- a/docs/Users_Guide/tc-pairs.rst
+++ b/docs/Users_Guide/tc-pairs.rst
@@ -421,35 +421,38 @@ TC-Pairs produces output in TCST format. The default output file name can be ove
- BMODEL
- User provided text string designating model name
* - 4
+ - DESC
+ - User provided description text string
+ * - 5
- STORM_ID
- BBCCYYYY designation of storm
- * - 5
+ * - 6
- BASIN
- Basin (BB in STORM_ID)
- * - 6
+ * - 7
- CYCLONE
- Cyclone number (CC in STORM_ID)
- * - 7
+ * - 8
- STORM_NAME
- Name of Storm
- * - 8
+ * - 9
- INIT
- Initialization time of forecast in YYYYMMDD_HHMMSS format.
- * - 9
+ * - 10
- LEAD
- Forecast lead time in HHMMSS format.
- * - 10
+ * - 11
- VALID
- Forecast valid time in YYYYMMDD_HHMMSS format.
- * - 11
+ * - 12
- INIT_MASK
- Initialization time masking grid applied
- * - 12
+ * - 13
- VALID_MASK
- Valid time masking grid applied
- * - 13
+ * - 14
- LINE_TYPE
- - Output line type (TCMPR or PROBRIRW)
+ - Output line types described below
.. _TCMPR Line Type:
@@ -463,151 +466,151 @@ TC-Pairs produces output in TCST format. The default output file name can be ove
* - Column Number
- Header Column Name
- Description
- * - 13
+ * - 14
- TCMPR
- Tropical Cyclone Matched Pair line type
- * - 14
+ * - 15
- TOTAL
- Total number of pairs in track
- * - 15
+ * - 16
- INDEX
- Index of the current track pair
- * - 16
+ * - 17
- LEVEL
- Level of storm classification
- * - 17
+ * - 18
- WATCH_WARN
- HU or TS watch or warning in effect
- * - 18
+ * - 19
- INITIALS
- Forecaster initials
- * - 19
+ * - 20
- ALAT
- Latitude position of adeck model
- * - 20
+ * - 21
- ALON
- Longitude position of adeck model
- * - 21
+ * - 22
- BLAT
- Latitude position of bdeck model
- * - 22
+ * - 23
- BLON
- Longitude position of bdeck model
- * - 23
+ * - 24
- TK_ERR
- Track error of adeck relative to bdeck (nm)
- * - 24
+ * - 25
- X_ERR
- X component position error (nm)
- * - 25
+ * - 26
- Y_ERR
- Y component position error (nm)
- * - 26
+ * - 27
- ALTK_ERR
- Along track error (nm)
- * - 27
+ * - 28
- CRTK_ERR
- Cross track error (nm)
- * - 28
+ * - 29
- ADLAND
- adeck distance to land (nm)
- * - 29
+ * - 30
- BDLAND
- bdeck distance to land (nm)
- * - 30
+ * - 31
- AMSLP
- adeck mean sea level pressure
- * - 31
+ * - 32
- BMSLP
- bdeck mean sea level pressure
- * - 32
+ * - 33
- AMAX_WIND
- adeck maximum wind speed
- * - 33
+ * - 34
- BMAX_WIND
- bdeck maximum wind speed
- * - 34, 35
+ * - 35, 36
- A/BAL_WIND_34
- a/bdeck 34-knot radius winds in full circle
- * - 36, 37
+ * - 37, 38
- A/BNE_WIND_34
- a/bdeck 34-knot radius winds in NE quadrant
- * - 38, 39
+ * - 39, 40
- A/BSE_WIND_34
- a/bdeck 34-knot radius winds in SE quadrant
- * - 40, 41
+ * - 41, 42
- A/BSW_WIND_34
- a/bdeck 34-knot radius winds in SW quadrant
- * - 42, 43
+ * - 43, 44
- A/BNW_WIND_34
- a/bdeck 34-knot radius winds in NW quadrant
- * - 44, 45
+ * - 45, 46
- A/BAL_WIND_50
- a/bdeck 50-knot radius winds in full circle
- * - 46, 47
+ * - 47, 48
- A/BNE_WIND_50
- a/bdeck 50-knot radius winds in NE quadrant
- * - 48, 49
+ * - 49, 50
- A/BSE_WIND_50
- a/bdeck 50-knot radius winds in SE quadrant
- * - 50, 51
+ * - 51, 52
- A/BSW_WIND_50
- a/bdeck 50-knot radius winds in SW quadrant
- * - 52, 53
+ * - 53, 54
- A/BNW_WIND_50
- a/bdeck 50-knot radius winds in NW quadrant
- * - 54, 55
+ * - 55, 56
- A/BAL_WIND_64
- a/bdeck 64-knot radius winds in full circle
- * - 56, 57
+ * - 57, 58
- A/BNE_WIND_64
- a/bdeck 64-knot radius winds in NE quadrant
- * - 58, 59
+ * - 59, 60
- A/BSE_WIND_64
- a/bdeck 64-knot radius winds in SE quadrant
- * - 60, 61
+ * - 61, 62
- A/BSW_WIND_64
- a/bdeck 64-knot radius winds in SW quadrant
- * - 62, 63
+ * - 63, 64
- A/BNW_WIND_64
- a/bdeck 64-knot radius winds in NW quadrant
- * - 64, 65
+ * - 65, 66
- A/BRADP
- pressure in millibars of the last closed isobar, 900 - 1050 mb
- * - 66, 67
+ * - 67, 68
- A/BRRP
- radius of the last closed isobar in nm, 0 - 9999 nm
- * - 68, 69
+ * - 69, 70
- A/BMRD
- radius of max winds, 0 - 999 nm
- * - 70, 71
+ * - 71, 72
- A/BGUSTS
- gusts, 0 through 995 kts
- * - 72, 73
+ * - 73, 74
- A/BEYE
- eye diameter, 0 through 999 nm
- * - 74, 75
+ * - 75, 76
- A/BDIR
- storm direction in compass coordinates, 0 - 359 degrees
- * - 76, 77
+ * - 77, 78
- A/BSPEED
- storm speed, 0 - 999 kts
- * - 78, 79
+ * - 79, 80
- A/BDEPTH
- system depth, D-deep, M-medium, S-shallow, X-unknown
- * - 80
+ * - 81
- NUM_MEMBERS
- consensus variable: number of models (or ensemble members) that were used to build the consensus track
- * - 81
+ * - 82
- TRACK_SPREAD
- consensus variable: the mean of the distances from the member location to the consensus track location (nm)
- * - 82
+ * - 83
- TRACK_STDEV
- consensus variable: the standard deviation of the distances from the member locations to the consensus track location (nm)
- * - 83
+ * - 84
- MSLP_STDEV
- consensus variable: the standard deviation of the member's mean sea level pressure values
- * - 84
+ * - 85
- MAX_WIND_STDEV
- consensus variable: the standard deviation of the member's maximum wind speed values
@@ -623,31 +626,31 @@ TC-Pairs produces output in TCST format. The default output file name can be ove
* - Column Number
- Header Column Name
- Description
- * - 13
+ * - 14
- TCDIAG
- Tropical Cyclone Diagnostics line type
- * - 14
+ * - 15
- TOTAL
- Total number of pairs in track
- * - 15
+ * - 16
- INDEX
- Index of the current track pair
- * - 16
+ * - 17
- DIAG_SOURCE
- Diagnostics data source indicated by the `-diag` command line option
- * - 17
+ * - 18
- TRACK_SOURCE
- ATCF ID of the track data used to define the diagnostics
- * - 18
+ * - 19
- FIELD_SOURCE
- Description of gridded field data source used to define the diagnostics
- * - 19
+ * - 20
- N_DIAG
- Number of storm diagnostic name and value columns to follow
- * - 20
+ * - 21
- DIAG_i
- Name of the of the ith storm diagnostic (repeated)
- * - 21
+ * - 22
- VALUE_i
- Value of the ith storm diagnostic (repeated)
@@ -663,75 +666,75 @@ TC-Pairs produces output in TCST format. The default output file name can be ove
* - Column Number
- Header Column Name
- Description
- * - 13
+ * - 14
- PROBRIRW
- Probability of Rapid Intensification/Weakening line type
- * - 14
+ * - 15
- ALAT
- Latitude position of edeck model
- * - 15
+ * - 16
- ALON
- Longitude position of edeck model
- * - 16
+ * - 17
- BLAT
- Latitude position of bdeck model
- * - 17
+ * - 18
- BLON
- Longitude position of bdeck model
- * - 18
+ * - 19
- INITIALS
- Forecaster initials
- * - 19
+ * - 20
- TK_ERR
- Track error of adeck relative to bdeck (nm)
- * - 20
+ * - 21
- X_ERR
- X component position error (nm)
- * - 21
+ * - 22
- Y_ERR
- Y component position error (nm)
- * - 22
+ * - 23
- ADLAND
- adeck distance to land (nm)
- * - 23
+ * - 24
- BDLAND
- bdeck distance to land (nm)
- * - 24
+ * - 25
- RI_BEG
- Start of RI time window in HH format
- * - 25
+ * - 26
- RI_END
- End of RI time window in HH format
- * - 26
+ * - 27
- RI_WINDOW
- Width of RI time window in HH format
- * - 27
+ * - 28
- AWIND_END
- Forecast maximum wind speed at RI end
- * - 28
+ * - 29
- BWIND_BEG
- Best track maximum wind speed at RI begin
- * - 29
+ * - 30
- BWIND_END
- Best track maximum wind speed at RI end
- * - 30
+ * - 31
- BDELTA
- Exact Best track wind speed change in RI window
- * - 31
+ * - 32
- BDELTA_MAX
- Maximum Best track wind speed change in RI window
- * - 32
+ * - 33
- BLEVEL_BEG
- Best track storm classification at RI begin
- * - 33
+ * - 34
- BLEVEL_END
- Best track storm classification at RI end
- * - 34
+ * - 35
- N_THRESH
- Number of probability thresholds
- * - 35
+ * - 36
- THRESH_i
- The ith probability threshold value (repeated)
- * - 36
+ * - 37
- PROB_i
- The ith probability value (repeated)
diff --git a/docs/Users_Guide/tc-rmw.rst b/docs/Users_Guide/tc-rmw.rst
index 766d9a4d7b..29e0e3be07 100644
--- a/docs/Users_Guide/tc-rmw.rst
+++ b/docs/Users_Guide/tc-rmw.rst
@@ -166,12 +166,12 @@ tc_rmw output file
The NetCDF output file contains the following dimensions:
-1. *range* - the radial dimension of the range-azimuth grid
+1. *track_point* - the track points corresponding to the model output valid times
-2. *azimuth* - the azimuthal dimension of the range-azimuth grid
+2. *pressure* - if any pressure levels are specified in the data variable list, they will be sorted and combined into a 3D NetCDF variable, which pressure as the vertical dimension and range and azimuth as the horizontal dimensions
-3. *pressure* - if any pressure levels are specified in the data variable list, they will be sorted and combined into a 3D NetCDF variable, which pressure as the vertical dimension and range and azimuth as the horizontal dimensions
+3. *range* - the radial dimension of the range-azimuth grid
-4. *track_point* - the track points corresponding to the model output valid times
+4. *azimuth* - the azimuthal dimension of the range-azimuth grid
For each data variable specified in the data variable list, a corresponding NetCDF variable will be created with the same name and units.
diff --git a/docs/Users_Guide/tc-stat.rst b/docs/Users_Guide/tc-stat.rst
index 1902330f8b..0d9e824837 100644
--- a/docs/Users_Guide/tc-stat.rst
+++ b/docs/Users_Guide/tc-stat.rst
@@ -59,6 +59,8 @@ The TC-Stat tool can be used to read TCMPR lines and compare the occurrence of r
Users may specify several job command options to configure the behavior of this job. Using these configurable options, the TC-Stat tool analyzes paired tracks and for each track point (i.e. each TCMPR line) determines whether rapid intensification or weakening occurred. For each point in time, it uses the forecast and BEST track event occurrence to populate a 2x2 contingency table. The job may be configured to require that forecast and BEST track events occur at exactly the same time to be considered a hit. Alternatively, the job may be configured to define a hit as long as the forecast and BEST track events occurred within a configurable time window. Using this relaxed matching criteria false alarms may be considered hits and misses may be considered correct negatives as long as the adeck and bdeck events were close enough in time. Each rirw job applies a single intensity change threshold. Therefore, assessing a model's performance with rapid intensification and weakening requires that two separate jobs be run.
+The RIRW job supports the **-out_stat** option to write the contingency table counts and statistics to a STAT output file.
+
Probability of Rapid Intensification
------------------------------------
@@ -383,6 +385,7 @@ _________________________
e.g.: -job filter -line_type TCMPR -amodel HWFI -dump_row ./tc_filter_job.tcst
-job summary -line_type TCMPR -column TK_ERR -dump_row ./tc_summary_job.tcst
-job rirw -line_type TCMPR -rirw_time 24 -rirw_exact false -rirw_thresh ge20
+ -job rirw -line_type TCMPR -rirw_time 24 -rirw_exact false -rirw_thresh ge20 -out_stat ./tc_rirw.stat
-job probrirw -line_type PROBRIRW -column_thresh RI_WINDOW ==24 \
-probrirw_thresh 30 -probrirw_prob_thresh ==0.25
@@ -470,6 +473,8 @@ The RIRW job produces contingency table counts and statistics defined by identif
Users may also specify the **-out_alpha** option to define the alpha value for the confidence intervals in the CTS output line type. In addition, the **-by column_name** option is a convenient way of running the same job across multiple stratifications of data. For example, **-by AMODEL** runs the same job for each unique AMODEL name in the data.
+Users may also specify the **-out_stat** option to write the contingency table counts and statistics (for the CTC and CTS output line types) to an output STAT file. Information about the RIRW timing information and filtering criteria are written to the STAT header columns while the contingency table counts and/or statistics are written to the CTC and/or CTS output columns.
+
**Job: PROBRIRW**
The PROBRIRW job produces probabilistic contingency table counts and statistics defined by placing forecast probabilities and BEST track rapid intensification events into an Nx2 contingency table. Users may specify several job command options to configure the behavior of this job:
diff --git a/docs/conf.py b/docs/conf.py
index fb7f5b4268..2f917f4079 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -20,11 +20,11 @@
project = 'MET'
author = 'UCAR/NCAR, NOAA, CSU/CIRA, and CU/CIRES'
author_list = 'Jensen, T., J. Prestopnik, H. Soh, L. Goodrich, B. Brown, R. Bullock, J. Halley Gotway, K. Newman, J. Opatz'
-version = '11.1.0-beta1'
+version = '11.1.0-beta2'
verinfo = version
release = f'{version}'
release_year = '2023'
-release_date = f'{release_year}-02-28'
+release_date = f'{release_year}-05-05'
copyright = f'{release_year}, {author}'
# -- General configuration ---------------------------------------------------
diff --git a/docs/requirements.txt b/docs/requirements.txt
index b0b0957e2a..0b266552df 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,3 +1,3 @@
sphinx-gallery
sphinxcontrib-bibtex
-sphinx-design
+sphinx-design==0.3.0
diff --git a/internal/scripts/installation/compile_MET_all.sh b/internal/scripts/installation/compile_MET_all.sh
index 0ac540d147..2eb0ff62e6 100755
--- a/internal/scripts/installation/compile_MET_all.sh
+++ b/internal/scripts/installation/compile_MET_all.sh
@@ -118,8 +118,15 @@ if [ ! -e $TAR_DIR ]; then
exit 1
fi
-# Update library linker path
-export LD_LIBRARY_PATH=${TEST_BASE}/external_libs/lib${MET_PYTHON:+:$MET_PYTHON/lib}${MET_NETCDF:+:$MET_NETCDF/lib}${MET_HDF5:+:$MET_HDF5/lib}${MET_BUFRLIB:+:$MET_BUFRLIB}${MET_GRIB2CLIB:+:$MET_GRIB2CLIB}${LIB_JASPER:+:$LIB_JASPER}${LIB_LIBPNG:+:$LIB_LIBPNG}${LIB_Z:+:$LIB_Z}${MET_GSL:+:$MET_GSL/lib}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}
+# If MET_PYTHON_LIB is not set in the environment file, set it to the
+# lib directory so it can be use to install MET with Python Embedding
+# support
+if [[ -z "$MET_PYTHON_LIB" ]]; then
+ MET_PYTHON_LIB=${MET_PYTHON}/lib
+fi
+
+
+# Print library linker path
echo "LD_LIBRARY_PATH = ${LD_LIBRARY_PATH}"
# if LIB_Z is not set in the environment file, set it to the
@@ -430,17 +437,17 @@ if [ $COMPILE_G2CLIB -eq 1 ]; then
rm -rf ${LIB_DIR}/g2clib/g2clib*
tar -xf ${TAR_DIR}/g2clib*.tar -C ${LIB_DIR}/g2clib
cd ${LIB_DIR}/g2clib/g2clib*
- sed -i 's|INC=.*|INC=-I${LIB_DIR}/include -I${LIB_DIR}/include/jasper|g' makefile
+ # Sed commands use double-quotes to support variable expansion.
+ sed -i "s|INC=.*|INC=-I${LIB_DIR}/include -I${LIB_DIR}/include/jasper|g" makefile
- # allow other compilers besides gcc
- sed -i 's/CC=gcc/CC=${CC_COMPILER}/g' makefile
+ # Allow other compilers besides gcc
+ sed -i "s|CC=gcc|CC=${CC}|g" makefile
# remove -D__64BIT__ flag because compiling with it has
# shown issues with GRIB/GRIB2 files that are over 2GB in size
# This flag was removed in g2clib 1.6.4
# so this can be removed if the version is updated
sed -i 's/-D__64BIT__//g' makefile
- export CC_COMPILER=${CC}
echo "cd `pwd`"
# g2clib appears to compile but causes failure compiling MET if -j argument is used
# so exclude it from this call
@@ -450,6 +457,7 @@ if [ $COMPILE_G2CLIB -eq 1 ]; then
cp *.h ${LIB_DIR}/include/.
fi
+
# Compile HDF
# Depends on jpeg
# Edit 'mfhdf/hdiff/Makefile' as follows:
@@ -628,7 +636,7 @@ export LDFLAGS="-Wl,--disable-new-dtags"
# https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html
# ${parameter:+word}
# If parameter is null or unset, nothing is substituted, otherwise the expansion of word is substituted.
-export LDFLAGS="${LDFLAGS} -Wl,-rpath,${LIB_DIR}/lib${ADDTL_DIR:+:$ADDTL_DIR}${LIB_DIR}/lib${MET_NETCDF:+:$MET_NETCDF/lib}${MET_HDF5:+:$MET_HDF5/lib}${MET_BUFRLIB:+:$MET_BUFRLIB}${MET_GRIB2CLIB:+:$MET_GRIB2CLIB}${MET_PYTHON:+:$MET_PYTHON/lib}${MET_GSL:+:$MET_GSL/lib}"
+export LDFLAGS="${LDFLAGS} -Wl,-rpath,${LIB_DIR}/lib${ADDTL_DIR:+:$ADDTL_DIR}${LIB_DIR}/lib${MET_NETCDF:+:$MET_NETCDF/lib}${MET_HDF5:+:$MET_HDF5/lib}${MET_BUFRLIB:+:$MET_BUFRLIB}${MET_GRIB2CLIB:+:$MET_GRIB2CLIB}${MET_PYTHON_LIB:+:$MET_PYTHON_LIB}${MET_GSL:+:$MET_GSL/lib}"
export LDFLAGS="${LDFLAGS} -Wl,-rpath,${LIB_JASPER:+$LIB_JASPER}${LIB_LIBPNG:+:$LIB_PNG}${LIB_Z:+$LIB_Z}"
export LDFLAGS="${LDFLAGS} ${LIB_JASPER:+-L$LIB_JASPER} ${LIB_LIBPNG:+-L$LIB_LIBPNG} ${MET_HDF5:+-L$MET_HDF5/lib} ${ADDTL_DIR:+-L$ADDTL_DIR}"
export LIBS="${LIBS} -lhdf5_hl -lhdf5 -lz"
diff --git a/internal/scripts/installation/config/install_met_env.hera b/internal/scripts/installation/config/install_met_env.hera
index 803d5e1aef..1b938cddfd 100755
--- a/internal/scripts/installation/config/install_met_env.hera
+++ b/internal/scripts/installation/config/install_met_env.hera
@@ -1,28 +1,26 @@
-module use -a /contrib/anaconda/modulefiles
module load intel/2022.1.2
-module load anaconda/latest
-export TEST_BASE=/contrib/met/11.0.0
+export PATH=/scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin:${PATH}
+export TEST_BASE=/contrib/met/11.1.0
export COMPILER=intel_2022.1.2
export MET_SUBDIR=${TEST_BASE}
-export MET_TARBALL=v11.0.0.tar.gz
+export MET_TARBALL=v11.1.0.tar.gz
export USE_MODULES=TRUE
-export PYTHON_MODULE=anaconda_latest
-export MET_PYTHON=/contrib/anaconda/anaconda3/latest/
-export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.7m
-export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.7/config-3.7m-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.7m\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -Xlinker\ -export-dynamic
+export MET_PYTHON=/scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10
+export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10
+export MET_PYTHON_LD=`python3-config --ldflags --embed`
export SET_D64BIT=FALSE
-export EXTERNAL_LIBS=/contrib/met/11.0.0/external_libs/
+export EXTERNAL_LIBS=/contrib/met/11.1.0/external_libs/
#export MET_NETCDF=${EXTERNAL_LIBS}
-export MET_GSL=${EXTERNAL_LIBS}
-export MET_BUFRLIB=${EXTERNAL_LIBS}
-export BUFRLIB_NAME=-lbufr
+#export MET_GSL=${EXTERNAL_LIBS}
+#export MET_BUFRLIB=${EXTERNAL_LIBS}
+#export BUFRLIB_NAME=-lbufr
#export MET_HDF5=${EXTERNAL_LIBS}
-export MET_GRIB2CLIB=${EXTERNAL_LIBS}/lib
-export MET_GRIB2CINC=${EXTERNAL_LIBS}/include
-export GRIB2CLIB_NAME=-lgrib2c
-export LIB_JASPER=${EXTERNAL_LIBS}/lib
-export LIB_LIBPNG=${EXTERNAL_LIBS}/lib
-export LIB_Z=${EXTERNAL_LIBS}/lib
-#export SET_D64BIT=FALSE
+#export MET_GRIB2CLIB=${EXTERNAL_LIBS}/lib
+#export MET_GRIB2CINC=${EXTERNAL_LIBS}/include
+#export GRIB2CLIB_NAME=-lgrib2c
+#export LIB_JASPER=${EXTERNAL_LIBS}/lib
+#export LIB_LIBPNG=${EXTERNAL_LIBS}/lib
+#export LIB_Z=${EXTERNAL_LIBS}/lib
#export CFLAGS="-Wall -g"
#export CXXFLAGS="-Wall -g -lcurl"
+export MAKE_ARGS=-j
\ No newline at end of file
diff --git a/internal/scripts/installation/config/install_met_env.jet b/internal/scripts/installation/config/install_met_env.jet
index b90839a982..0d88a2f0fc 100644
--- a/internal/scripts/installation/config/install_met_env.jet
+++ b/internal/scripts/installation/config/install_met_env.jet
@@ -2,17 +2,17 @@ module load intel/2022.1.2
module load netcdf/4.7.0
module load hdf5/1.10.5
-export TEST_BASE=/contrib/met/11.0.0
+export TEST_BASE=/contrib/met/11.1.0
export COMPILER=intel_18.0.5.274
export MET_SUBDIR=${TEST_BASE}
-export MET_TARBALL=v11.0.0.tar.gz
+export MET_TARBALL=v11.1.0.tar.gz
export USE_MODULES=TRUE
-export MET_PYTHON=/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.0_py3.8
-export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.8
-export MET_PYTHON_LD=-L${MET_PYTHON}/lib\ -L${MET_PYTHON}/lib/python3.8/config-3.8-x86_64-linux-gnu\ -lpython3.8\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -Xlinker\ -export-dynamic
+export MET_PYTHON=/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.1_py3.10
+export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10
+export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.10/config-3.10-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.10\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -lm
export MET_NETCDF=/apps/netcdf/4.7.0/intel/18.0.5.274
export MET_HDF5=/apps/hdf5/1.10.5/intel/18.0.5.274
-export EXTERNAL_LIBS=/contrib/met/11.0.0/external_libs/
+export EXTERNAL_LIBS=/contrib/met/11.1.0/external_libs/
#export MET_GSL=${EXTERNAL_LIBS}
#export MET_BUFRLIB=${EXTERNAL_LIBS}
#export BUFRLIB_NAME=-lbufr
@@ -22,4 +22,5 @@ export EXTERNAL_LIBS=/contrib/met/11.0.0/external_libs/
#export LIB_JASPER=${EXTERNAL_LIBS}/lib
#export LIB_LIBPNG=${EXTERNAL_LIBS}/lib
#export LIB_Z=${EXTERNAL_LIBS}/lib
+export MAKE_ARGS=-j
export SET_D64BIT=FALSE
diff --git a/internal/scripts/installation/config/install_met_env.orion b/internal/scripts/installation/config/install_met_env.orion
index a144d8c492..4a25f75b57 100644
--- a/internal/scripts/installation/config/install_met_env.orion
+++ b/internal/scripts/installation/config/install_met_env.orion
@@ -1,14 +1,14 @@
module load intel/2020.2
-export TEST_BASE=/apps/contrib/MET/11.0.0
+export TEST_BASE=/apps/contrib/MET/11.1.0
export COMPILER=intel_2020
export MET_SUBDIR=${TEST_BASE}/
-export MET_TARBALL=v11.0.0.tar.gz
+export MET_TARBALL=v11.1.0.tar.gz
export USE_MODULES=TRUE
-export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.0_py3.8
-export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.8
-export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.8/config-3.8-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.8\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm
-export EXTERNAL_LIBS=/apps/contrib/MET/11.0.0/external_libs
+export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.1_py3.10
+export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10
+export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.10/config-3.10-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.10\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -lm
+export EXTERNAL_LIBS=/apps/contrib/MET/11.1.0/external_libs
export LIB_Z=${EXTERNAL_LIBS}/lib
#export MET_GSL=${EXTERNAL_LIBS}
#export MET_BUFRLIB=${EXTERNAL_LIBS}
@@ -21,6 +21,7 @@ export LIB_Z=${EXTERNAL_LIBS}/lib
#export LIB_JASPER=${EXTERNAL_LIBS}/lib
#export LIB_LIBPNG=${EXTERNAL_LIBS}/lib
#export SET_D64BIT=FALSE
+export MAKE_ARGS=-j
#export CFLAGS="-Wall -g"
#export CXXFLAGS="-Wall -g"
diff --git a/internal/scripts/installation/config/install_met_env_met_only.hera b/internal/scripts/installation/config/install_met_env_met_only.hera
index 645ca9acab..b4ac247f75 100644
--- a/internal/scripts/installation/config/install_met_env_met_only.hera
+++ b/internal/scripts/installation/config/install_met_env_met_only.hera
@@ -1,17 +1,15 @@
-module use -a /contrib/anaconda/modulefiles
module load intel/2022.1.2
-module load anaconda/latest
-export TEST_BASE=/contrib/met/11.0.0
+export PATH=/scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin:${PATH}
+export TEST_BASE=/contrib/met/11.1.0
export COMPILER=intel_2022.1.2
export MET_SUBDIR=${TEST_BASE}
-export MET_TARBALL=v11.0.0.tar.gz
+export MET_TARBALL=v11.1.0.tar.gz
export USE_MODULES=TRUE
-export PYTHON_MODULE=anaconda_latest
-export MET_PYTHON=/contrib/anaconda/anaconda3/latest
-export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.7m
-export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.7/config-3.7m-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.7m\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -Xlinker\ -export-dynamic
+export MET_PYTHON=/scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10
+export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10
+export MET_PYTHON_LD=`python3-config --ldflags --embed`
export SET_D64BIT=FALSE
-export EXTERNAL_LIBS=/contrib/met/11.0.0/external_libs/
+export EXTERNAL_LIBS=/contrib/met/11.1.0/external_libs/
export MET_NETCDF=${EXTERNAL_LIBS}
export MET_GSL=${EXTERNAL_LIBS}
export MET_BUFRLIB=${EXTERNAL_LIBS}
@@ -23,6 +21,6 @@ export GRIB2CLIB_NAME=-lgrib2c
export LIB_JASPER=${EXTERNAL_LIBS}/lib
export LIB_LIBPNG=${EXTERNAL_LIBS}/lib
export LIB_Z=${EXTERNAL_LIBS}/lib
-export SET_D64BIT=FALSE
#export CFLAGS="-Wall -g"
#export CXXFLAGS="-Wall -g -lcurl"
+export MAKE_ARGS=-j
\ No newline at end of file
diff --git a/internal/scripts/installation/config/install_met_env_met_only.jet b/internal/scripts/installation/config/install_met_env_met_only.jet
index e642880266..c4c66ebfc3 100644
--- a/internal/scripts/installation/config/install_met_env_met_only.jet
+++ b/internal/scripts/installation/config/install_met_env_met_only.jet
@@ -7,9 +7,9 @@ export COMPILER=intel_18.0.5.274
export MET_SUBDIR=${TEST_BASE}
export MET_TARBALL=v11.0.0.tar.gz
export USE_MODULES=TRUE
-export MET_PYTHON=/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.0_py3.8
-export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.8
-export MET_PYTHON_LD=-L${MET_PYTHON}/lib\ -L${MET_PYTHON}/lib/python3.8/config-3.8-x86_64-linux-gnu\ -lpython3.8\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -Xlinker\ -export-dynamic
+export MET_PYTHON=/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.1_py3.10
+export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10
+export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.10/config-3.10-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.10\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -lm
export MET_NETCDF=/apps/netcdf/4.7.0/intel/18.0.5.274
export MET_HDF5=/apps/hdf5/1.10.5/intel/18.0.5.274
export EXTERNAL_LIBS=/contrib/met/11.0.0/external_libs/
@@ -22,4 +22,5 @@ export GRIB2CLIB_NAME=-lgrib2c
export LIB_JASPER=${EXTERNAL_LIBS}/lib
export LIB_LIBPNG=${EXTERNAL_LIBS}/lib
export LIB_Z=${EXTERNAL_LIBS}/lib
+export MAKE_ARGS=-j
export SET_D64BIT=FALSE
diff --git a/internal/scripts/installation/config/install_met_env_met_only.orion b/internal/scripts/installation/config/install_met_env_met_only.orion
index ece8d88aa3..49ab2ebd68 100644
--- a/internal/scripts/installation/config/install_met_env_met_only.orion
+++ b/internal/scripts/installation/config/install_met_env_met_only.orion
@@ -1,14 +1,14 @@
module load intel/2020.2
-export TEST_BASE=/apps/contrib/MET/11.0.0
+export TEST_BASE=/apps/contrib/MET/11.1.0
export COMPILER=intel_2020
export MET_SUBDIR=${TEST_BASE}/
-export MET_TARBALL=v11.0.0.tar.gz
+export MET_TARBALL=v11.1.0.tar.gz
export USE_MODULES=TRUE
-export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.0_py3.8
-export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.8
-export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.8/config-3.8-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.8\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm
-export EXTERNAL_LIBS=/apps/contrib/MET/11.0.0/external_libs
+export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.1_py3.10
+export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10
+export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.10/config-3.10-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.10\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -lm
+export EXTERNAL_LIBS=/apps/contrib/MET/11.1.0/external_libs
export MET_GSL=${EXTERNAL_LIBS}
export MET_BUFRLIB=${EXTERNAL_LIBS}
export BUFRLIB_NAME=-lbufr
@@ -21,5 +21,6 @@ export LIB_JASPER=${EXTERNAL_LIBS}/lib
export LIB_LIBPNG=${EXTERNAL_LIBS}/lib
export LIB_Z=${EXTERNAL_LIBS}/lib
export SET_D64BIT=FALSE
+export MAKE_ARGS=-j
#export CFLAGS="-Wall -g"
#export CXXFLAGS="-Wall -g"
diff --git a/internal/scripts/installation/modulefiles/11.0.0.lua.wcoss2 b/internal/scripts/installation/modulefiles/11.0.0.lua.wcoss2
deleted file mode 100644
index c4afdd9fda..0000000000
--- a/internal/scripts/installation/modulefiles/11.0.0.lua.wcoss2
+++ /dev/null
@@ -1,27 +0,0 @@
-help([[
-]])
-
-local pkgName = myModuleName()
-local pkgVersion = myModuleVersion()
-local pkgNameVer = myModuleFullName()
-
-local hierA = hierarchyA(pkgNameVer,1)
-local compNameVer = hierA[1]
-
-
-conflict(pkgName)
-
-local opt = os.getenv("HPC_OPT") or os.getenv("OPT") or "/opt/modules"
-
-local base = pathJoin(opt,compNameVer,pkgName,pkgVersion)
-
-prepend_path("PATH", pathJoin(base,"bin"))
-
-setenv("MET_ROOT", base)
-setenv("MET_BASE", pathJoin(base, "share", "met"))
-setenv("MET_VERSION", pkgVersion)
-
-whatis("Name: ".. pkgName)
-whatis("Version: " .. pkgVersion)
-whatis("Category: applications")
-whatis("Description: Model Evaluation Tools (MET)")
diff --git a/internal/scripts/installation/modulefiles/11.0.0_acorn b/internal/scripts/installation/modulefiles/11.0.0_acorn
deleted file mode 100644
index 117c0aa323..0000000000
--- a/internal/scripts/installation/modulefiles/11.0.0_acorn
+++ /dev/null
@@ -1,34 +0,0 @@
-#%Module######################################################################
-##
-## Model Evaluation Tools
-##
-proc ModulesHelp { } {
- puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v10.1.0
- *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***"
-}
-
-# The intel compiler is required to run MET
-
-module use /apps/ops/para/libs/modulefiles/compiler/intel/19.1.3.304/
-module load intel
-module load python/3.8.6
-module load netcdf/4.7.4
-module load hdf5/1.12.2
-module load bufr/11.5.0
-module load zlib/1.2.11
-module load jasper/2.0.25
-module load libpng/1.6.37
-module load gsl/2.7
-module load g2c/1.6.4
-
-set base /apps/sw_review/emc/MET/11.0.0
-set ver 11.0.0
-set share $base/share/met
-set lib_base $base
-
-prepend-path PATH $base/exec
-
-setenv METversion V$ver
-setenv MET_ROOT $base
-
-
diff --git a/internal/scripts/installation/modulefiles/11.0.0_casper b/internal/scripts/installation/modulefiles/11.0.0_casper
deleted file mode 100644
index b0d007db53..0000000000
--- a/internal/scripts/installation/modulefiles/11.0.0_casper
+++ /dev/null
@@ -1,28 +0,0 @@
-#%Module######################################################################
-##
-## Model Evaluation Tools
-##
-proc ModulesHelp { } {
- puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0
- *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***"
-}
-
-# If they exist, remove ncdump and ncgen from /glade/p/ral/jntp/MET/MET_releases/casper/11.0.0/external_libs/bin
-
-# The intel compiler is required to run MET
-module load ncarenv/1.3
-module load intel/2021.2
-module load netcdf/4.8.0
-
-set base /glade/p/ral/jntp/MET/MET_releases/casper/11.0.0
-set ver 11.0.0
-set share $base/share/met
-
-prepend-path PATH $base/bin:/glade/p/ral/jntp/MET/MET_releases/casper/11.0.0/external_libs/bin:/glade/p/ral/jntp/MET/METplus/miniconda/miniconda3/envs/metplus_v5.0_py3.8/bin
-
-
-setenv METversion V$ver
-
-# setenv MET_BUFRLIB /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs/libs
-# setenv MET_GRIB2C /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs
-# setenv MET_GSL /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs
diff --git a/internal/scripts/installation/modulefiles/11.0.0_cheyenne b/internal/scripts/installation/modulefiles/11.0.0_cheyenne
deleted file mode 100644
index d6c23de79c..0000000000
--- a/internal/scripts/installation/modulefiles/11.0.0_cheyenne
+++ /dev/null
@@ -1,27 +0,0 @@
-#%Module######################################################################
-##
-## Model Evaluation Tools
-##
-proc ModulesHelp { } {
- puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0
- *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***"
-}
-
-# If they exist, remove ncdump and ncgen from /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs/bin
-
-# The intel compiler is required to run MET
-module load ncarenv/1.3
-module load intel/2021.2
-module load netcdf/4.8.0
-
-set base /glade/p/ral/jntp/MET/MET_releases/11.0.0
-set ver 11.0.0
-set share $base/share/met
-
-prepend-path PATH $base/bin:/glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs/bin:/glade/p/ral/jntp/MET/METplus/miniconda/miniconda3/envs/metplus_v5.0_py3.8/bin
-
-setenv METversion V$ver
-
-# setenv MET_BUFRLIB /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs/libs
-# setenv MET_GRIB2C /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs
-# setenv MET_GSL /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs
diff --git a/internal/scripts/installation/modulefiles/11.0.0_frontera b/internal/scripts/installation/modulefiles/11.0.0_frontera
deleted file mode 100644
index ebd3cb9c5f..0000000000
--- a/internal/scripts/installation/modulefiles/11.0.0_frontera
+++ /dev/null
@@ -1,25 +0,0 @@
-#%Module######################################################################
-##
-## Model Evaluation Tools
-##
-proc ModulesHelp { } {
- puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0
- *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***"
-}
-
-# If they exist, remove ncdump and ncgen from /work2/06612/tg859120/frontera/MET/11.0.0/external_libs/bin
-
-module unload python3
-module load intel/19.1.1
-module load hdf5/1.12.0
-module load netcdf/4.7.4
-
-set base /work2/06612/tg859120/frontera/MET/11.0.0
-set ver 11.0.0
-set share $base/share/met
-set lib_base $base
-
-prepend-path PATH $base/bin:/work2/06612/tg859120/frontera/miniconda/miniconda3/envs/metplus_v5.0_py3.8/bin
-
-setenv METversion V$ver
-setenv MET_ROOT $base
diff --git a/internal/scripts/installation/modulefiles/11.0.0_gaea b/internal/scripts/installation/modulefiles/11.0.0_gaea
deleted file mode 100644
index c91a2e74bb..0000000000
--- a/internal/scripts/installation/modulefiles/11.0.0_gaea
+++ /dev/null
@@ -1,17 +0,0 @@
-#%Module######################################################################
-##
-## Model Evaluation Tools
-##
-proc ModulesHelp { } {
- puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v10.1.2
- *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***"
-}
-
-module load intel/19.0.5.281
-
-set base /usw/met
-set ver 11.0.0
-set share $base/$ver/share/met
-set lib_base $base/11.0.0
-
-prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin:/lustre/f2/dev/esrl/Julie.Prestopnik/projects/miniconda/miniconda3/envs/metplus_v5.0_py3.8/bin
diff --git a/internal/scripts/installation/modulefiles/11.0.1.lua.wcoss2 b/internal/scripts/installation/modulefiles/11.0.1.lua.wcoss2
deleted file mode 100644
index c4afdd9fda..0000000000
--- a/internal/scripts/installation/modulefiles/11.0.1.lua.wcoss2
+++ /dev/null
@@ -1,27 +0,0 @@
-help([[
-]])
-
-local pkgName = myModuleName()
-local pkgVersion = myModuleVersion()
-local pkgNameVer = myModuleFullName()
-
-local hierA = hierarchyA(pkgNameVer,1)
-local compNameVer = hierA[1]
-
-
-conflict(pkgName)
-
-local opt = os.getenv("HPC_OPT") or os.getenv("OPT") or "/opt/modules"
-
-local base = pathJoin(opt,compNameVer,pkgName,pkgVersion)
-
-prepend_path("PATH", pathJoin(base,"bin"))
-
-setenv("MET_ROOT", base)
-setenv("MET_BASE", pathJoin(base, "share", "met"))
-setenv("MET_VERSION", pkgVersion)
-
-whatis("Name: ".. pkgName)
-whatis("Version: " .. pkgVersion)
-whatis("Category: applications")
-whatis("Description: Model Evaluation Tools (MET)")
diff --git a/internal/scripts/installation/modulefiles/11.0.1_acorn b/internal/scripts/installation/modulefiles/11.0.1_acorn
deleted file mode 100644
index 832194dc76..0000000000
--- a/internal/scripts/installation/modulefiles/11.0.1_acorn
+++ /dev/null
@@ -1,34 +0,0 @@
-#%Module######################################################################
-##
-## Model Evaluation Tools
-##
-proc ModulesHelp { } {
- puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.1
- *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***"
-}
-
-# The intel compiler is required to run MET
-
-module use /apps/ops/para/libs/modulefiles/compiler/intel/19.1.3.304/
-module load intel python/3.10.4
-module load ve/evs/1.0
-module load netcdf/4.7.4
-module load hdf5/1.12.2
-module load bufr/11.5.0
-module load zlib/1.2.11
-module load jasper/2.0.25
-module load libpng/1.6.37
-module load gsl/2.7
-module load g2c/1.6.4
-
-set base /apps/sw_review/emc/MET/11.0.1
-set ver 11.0.1
-set share $base/share/met
-set lib_base $base
-
-prepend-path PATH $base/exec
-
-setenv METversion V$ver
-setenv MET_ROOT $base
-
-
diff --git a/internal/scripts/installation/modulefiles/11.0.0_hera b/internal/scripts/installation/modulefiles/11.1.0_hera
similarity index 62%
rename from internal/scripts/installation/modulefiles/11.0.0_hera
rename to internal/scripts/installation/modulefiles/11.1.0_hera
index 5a38c524bb..849d9c01b9 100644
--- a/internal/scripts/installation/modulefiles/11.0.0_hera
+++ b/internal/scripts/installation/modulefiles/11.1.0_hera
@@ -3,25 +3,21 @@
## Model Evaluation Tools
##
proc ModulesHelp { } {
- puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0
+ puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.1.0
*** For help see the official MET webpage at http://www.dtcenter.org/met/users ***"
}
# The intel compiler is required to run MET
prereq intel
-prereq anaconda/latest
set base /contrib/met
-set ver 11.0.0
+set ver 11.1.0
set share $base/$ver/share/met
-set lib_base $base/11.0.0
-setenv MET_ROOT $base/$ver/MET-11.0.0
+set lib_base $base/11.1.0
+setenv MET_ROOT $base/$ver
-prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin
-
-
-#prepend-path LD_LIBRARY_PATH $lib_base/external_libs/lib
+prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin:/scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin
#setenv METversion $ver
#setenv MET_CONFIG $share/config
@@ -36,12 +32,11 @@ prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin
#setenv F77 ifort
#module load intel/2022.1.2
-#module load anaconda/latest
-#setenv libdir /contrib/met/11.0.0/external_libs/lib
-#setenv incdir /contrib/met/11.0.0/external_libs/include
-#setenv iprefix /contrib/met/11.0.0/external_libs
-#setenv basedir /contrib/met/11.0.0/met-11.0.0
+#setenv libdir /contrib/met/11.1.0/external_libs/lib
+#setenv incdir /contrib/met/11.1.0/external_libs/include
+#setenv iprefix /contrib/met/11.1.0/external_libs
+#setenv basedir /contrib/met/11.1.0/met-11.1.0
#setenv MET_HDF5 $iprefix
#setenv MET_NETCDF $incdir
@@ -54,9 +49,9 @@ prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin
#setenv MET_HDFLIB $libdir
#setenv MET_HDFEOSINC $incdir
#setenv MET_HDFEOSLIB $libdir
-#setenv MET_PYTHON /contrib/anaconda3/latest
-#setenv MET_PYTHON_CC -I/contrib/anaconda/anaconda3/latest/include/python3.7m
-#setenv MET_PYTHON_LD -L/contrib/anaconda/anaconda3/latest/lib\ -lpython3.7m\ -lpthread\ -ldl\ -lutil\ -lm\ -Xlinker\ -export-dynamic
+#setenv MET_PYTHON /scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10
+#setenv MET_PYTHON_CC -I${MET_PYTHON}/include/python3.10
+#setenv MET_PYTHON_LD `python3-config --ldflags --embed`
#setenv MET_FONT_DIR $basedir/fonts/
# CAIRO and FREETYPE were not used
@@ -64,5 +59,3 @@ prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin
#setenv MET_CAIROINC $incdir/cairo
#setenv MET_FREETYPELIB $libdir
#setenv MET_FREETYPEINC $incdir/freetype2
-
-
diff --git a/internal/scripts/installation/modulefiles/11.0.0_jet b/internal/scripts/installation/modulefiles/11.1.0_jet
similarity index 80%
rename from internal/scripts/installation/modulefiles/11.0.0_jet
rename to internal/scripts/installation/modulefiles/11.1.0_jet
index 30a6fcc946..fb1190d8f7 100644
--- a/internal/scripts/installation/modulefiles/11.0.0_jet
+++ b/internal/scripts/installation/modulefiles/11.1.0_jet
@@ -3,7 +3,7 @@
## Model Evaluation Tools
##
proc ModulesHelp { } {
- puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0
+ puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.1.0
*** For help see the official MET webpage at http://www.dtcenter.org/met/users ***"
}
@@ -11,9 +11,9 @@ prereq intel
prereq netcdf/4.7.0
prereq hdf5/1.10.5
-set base /contrib/met/11.0.0
-set ver 11.0.0
+set base /contrib/met/11.1.0
+set ver 11.1.0
set share $base/share/met
-setenv MET_ROOT $base/$ver/MET-11.0.0
+setenv MET_ROOT $base/$ver/MET-11.1.0
-prepend-path PATH $base/bin:$base/external_libs/bin:/apps/netcdf/4.7.0/intel/18.0.5.274/bin:/apps/hdf5/1.10.5/intel_seq/18.0.5.274/bin:/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.0_py3.8
\ No newline at end of file
+prepend-path PATH $base/bin:$base/external_libs/bin:/apps/netcdf/4.7.0/intel/18.0.5.274/bin:/apps/hdf5/1.10.5/intel_seq/18.0.5.274/bin:/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.1_py3.10
diff --git a/internal/scripts/installation/modulefiles/11.0.0_orion b/internal/scripts/installation/modulefiles/11.1.0_orion
similarity index 58%
rename from internal/scripts/installation/modulefiles/11.0.0_orion
rename to internal/scripts/installation/modulefiles/11.1.0_orion
index 655c4af0cb..03ebbc352a 100644
--- a/internal/scripts/installation/modulefiles/11.0.0_orion
+++ b/internal/scripts/installation/modulefiles/11.1.0_orion
@@ -3,29 +3,29 @@
## Model Evaluation Tools
##
proc ModulesHelp { } {
- puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0
+ puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.1.0
*** For help see the official MET webpage at http://www.dtcenter.org/met/users ***"
}
prereq intel/2020.2
set base /apps/contrib/MET
-set ver 11.0.0
+set ver 11.1.0
set share $base/$ver/share/met
-set lib_base $base/11.0.0
-setenv MET_ROOT $base/$ver/MET-11.0.0
+set lib_base $base/11.1.0
+setenv MET_ROOT $base/$ver/MET-11.1.0
-prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin:/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.0_py3.8/bin
+prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin:/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin
-#export CC=icc
+export CC=icc
#export CXX=icc
#export F77=ifort
#module load intel/2020.2
#module load intelpython3/2020.2
-#export libdir=/apps/contrib/MET/11.0.0/external_libs/lib
-#export incdir=/apps/contrib/MET/11.0.0/external_libs/include
-#export iprefix=/apps/contrib/MET/11.0.0/external_libs
-#export basedir=/apps/contrib/MET/11.0.0/MET-11.0.0
+#export libdir=/apps/contrib/MET/11.1.0/external_libs/lib
+#export incdir=/apps/contrib/MET/11.1.0/external_libs/include
+#export iprefix=/apps/contrib/MET/11.1.0/external_libs
+#export basedir=/apps/contrib/MET/11.1.0/MET-11.1.0
#export MET_HDF5=$iprefix
#export MET_NETCDF=$incdir
#export MET_GRIB2CINC=$incdir
@@ -37,9 +37,9 @@ prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin:/work/noaa/ovp/mini
#export MET_HDFLIB=$libdir
#export MET_HDFEOSINC=$incdir
#export MET_HDFEOSLIB=$libdir
-#export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.0_py3.8
-#export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.8
-#export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.8/config-3.8-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.8\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm
+#export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.1_py3.10
+#export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10
+#export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.10/config-3.10-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.10\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -lm
#export MET_FONT_DIR=$basedir/fonts/
#export LDFLAGS=-Wl,--disable-new-dtags -Wl,-rpath,${libdir}:${MET_PYTHON}/lib
-#export CPPFLAGS=-I/apps/contrib/met/11.0.0/external_libs/include
+#export CPPFLAGS=-I/apps/contrib/met/11.1.0/external_libs/include
diff --git a/internal/test_unit/config/TCStatConfig_ALAL2010 b/internal/test_unit/config/TCStatConfig_ALAL2010
index 02360bf454..05bf63a7a1 100644
--- a/internal/test_unit/config/TCStatConfig_ALAL2010
+++ b/internal/test_unit/config/TCStatConfig_ALAL2010
@@ -211,7 +211,8 @@ jobs = [
"-job filter -amodel AHWI -rirw_track BDECK -rirw_thresh >=30 -rirw_exact FALSE -dump_row ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_AHWI_ri.tcst",
"-job filter -amodel AHWI -rirw_track BDECK -rirw_thresh <=-30 -rirw_exact TRUE -dump_row ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_AHWI_rw.tcst",
"-job rirw -rirw_window 00 -rirw_thresh <=-15 -out_line_type CTC,CTS,MPR",
- "-job rirw -rirw_window 12 -rirw_thresh <=-15 -out_line_type CTC,CTS,MPR"
+ "-job rirw -rirw_window 12 -rirw_thresh <=-15 -out_line_type CTC,CTS,MPR",
+ "-job rirw -rirw_window 12 -rirw_thresh <=-15 -out_line_type CTC,CTS -out_stat ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_rirw.stat"
];
//
diff --git a/internal/test_unit/xml/unit_gen_vx_mask.xml b/internal/test_unit/xml/unit_gen_vx_mask.xml
index 342721af33..ca66cea64c 100644
--- a/internal/test_unit/xml/unit_gen_vx_mask.xml
+++ b/internal/test_unit/xml/unit_gen_vx_mask.xml
@@ -489,6 +489,44 @@
+
+
+
+
+
+
+ &MET_BIN;/gen_vx_mask
+ \
+ 'latlon 360 361 -90 -130 0.5 0.5' \
+ &INPUT_DIR;/shapefile/ne_110m_admin_0_countries/ne_110m_admin_0_countries.shp \
+ &OUTPUT_DIR;/gen_vx_mask/South_America_mask.nc \
+ -type shape -shape_str Continent 'south america' \
+ -name South_America -v 2
+
+
+
+
+
+
+
+
+
+
+ &MET_BIN;/gen_vx_mask
+ \
+ &OUTPUT_DIR;/gen_vx_mask/South_America_mask.nc \
+ &INPUT_DIR;/shapefile/ne_110m_admin_0_countries/ne_110m_admin_0_countries.shp \
+ &OUTPUT_DIR;/gen_vx_mask/South_America_Spain_Portugal_mask.nc \
+ -type shape -shape_str CONTINENT Europe -shape_str Name Spain,Portugal \
+ -name South_America_Spain_Portugal -value 2
+
+
+
+
@@ -500,8 +538,8 @@
PYTHON_NUMPY \
&OUTPUT_DIR;/gen_vx_mask/PYTHON_FCST_or_OBS_mask.nc \
-type data \
- -input_field 'name="&MET_BASE;/python/read_ascii_numpy.py &MET_DATA;/python/fcst.txt FCST";' \
- -mask_field 'name="&MET_BASE;/python/read_ascii_numpy.py &MET_DATA;/python/obs.txt OBS";' \
+ -input_field 'name="&MET_BASE;/python/examples/read_ascii_numpy.py &MET_DATA;/python/fcst.txt FCST";' \
+ -mask_field 'name="&MET_BASE;/python/examples/read_ascii_numpy.py &MET_DATA;/python/obs.txt OBS";' \
-thresh gt0 -union -v 3