diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 99dcf64150..8ca241ca36 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -2,7 +2,7 @@ name: Bug report about: Fix something that's not working title: 'Bugfix: ' -labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED PROJECT ASSIGNMENT, type: bug' +labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: bug' assignees: '' --- @@ -49,7 +49,7 @@ Describe the steps to reproduce the behavior: ### Projects and Milestone ### - [ ] Select **Organization** level **Project** for support of the current coordinated release -- [ ] Select **Repository** level **Project** for development toward the next official release or add **alert: NEED PROJECT ASSIGNMENT** label +- [ ] Select **Repository** level **Project** for development toward the next official release or add **alert: NEED CYCLE ASSIGNMENT** label - [ ] Select **Milestone** as the next bugfix version ## Define Related Issue(s) ## diff --git a/.github/ISSUE_TEMPLATE/enhancement_request.md b/.github/ISSUE_TEMPLATE/enhancement_request.md index 0b82c047c5..df95cc1587 100644 --- a/.github/ISSUE_TEMPLATE/enhancement_request.md +++ b/.github/ISSUE_TEMPLATE/enhancement_request.md @@ -2,7 +2,7 @@ name: Enhancement request about: Improve something that it's currently doing title: '' -labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED PROJECT ASSIGNMENT, type: enhancement' +labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: enhancement' assignees: '' --- @@ -38,7 +38,7 @@ Consider breaking the enhancement down into sub-issues. - [ ] Select **requestor(s)** ### Projects and Milestone ### -- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED PROJECT ASSIGNMENT** label +- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label - [ ] Select **Milestone** as the next official version or **Future Versions** ## Define Related Issue(s) ## diff --git a/.github/ISSUE_TEMPLATE/new_feature_request.md b/.github/ISSUE_TEMPLATE/new_feature_request.md index 4b1ae69aff..c2e0179c28 100644 --- a/.github/ISSUE_TEMPLATE/new_feature_request.md +++ b/.github/ISSUE_TEMPLATE/new_feature_request.md @@ -2,7 +2,7 @@ name: New feature request about: Make it do something new title: '' -labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED PROJECT ASSIGNMENT, type: new feature' +labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: new feature' assignees: '' --- @@ -42,7 +42,7 @@ Consider breaking the new feature down into sub-issues. - [ ] Select **requestor(s)** ### Projects and Milestone ### -- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED PROJECT ASSIGNMENT** label +- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label - [ ] Select **Milestone** as the next official version or **Future Versions** ## Define Related Issue(s) ## diff --git a/.github/ISSUE_TEMPLATE/sub-issue.md b/.github/ISSUE_TEMPLATE/sub-issue.md index 77bf2b2844..855e27d83d 100644 --- a/.github/ISSUE_TEMPLATE/sub-issue.md +++ b/.github/ISSUE_TEMPLATE/sub-issue.md @@ -2,7 +2,7 @@ name: Sub-Issue about: Break an issue down into smaller parts title: '' -labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED PROJECT ASSIGNMENT, type: sub-issue' +labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: sub-issue' assignees: '' --- @@ -28,5 +28,5 @@ This is a sub-issue of #*List the parent issue number here*. - [ ] Select **requestor(s)** ### Projects and Milestone ### -- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED PROJECT ASSIGNMENT** label +- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label - [ ] Select **Milestone** as the next official version or **Future Versions** diff --git a/.github/ISSUE_TEMPLATE/task.md b/.github/ISSUE_TEMPLATE/task.md index 561012a7f6..f175cdb678 100644 --- a/.github/ISSUE_TEMPLATE/task.md +++ b/.github/ISSUE_TEMPLATE/task.md @@ -2,7 +2,7 @@ name: Task about: Describe something that needs to be done title: '' -labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED PROJECT ASSIGNMENT, type: task' +labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: task' assignees: '' --- @@ -38,7 +38,7 @@ Consider breaking the task down into sub-issues. - [ ] Select **requestor(s)** ### Projects and Milestone ### -- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED PROJECT ASSIGNMENT** label +- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label - [ ] Select **Milestone** as the next official version or **Future Versions** ## Define Related Issue(s) ## diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index d270ba6404..c147a4ef79 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -14,7 +14,12 @@ on: - 'bugfix_**' paths-ignore: - 'docs/**' - + - '.github/pull_request_template.md' + - '.github/ISSUE_TEMPLATE/**' + - '.github/labels/**' + - '**/README.md' + - '**/LICENSE.md' + pull_request: types: [opened, reopened, synchronize] branches: @@ -22,7 +27,12 @@ on: - 'main_v**' paths-ignore: - 'docs/**' - + - '.github/pull_request_template.md' + - '.github/ISSUE_TEMPLATE/**' + - '.github/labels/**' + - '**/README.md' + - '**/LICENSE.md' + workflow_dispatch: inputs: force_tests: diff --git a/.github/workflows/update_truth.yml b/.github/workflows/update_truth.yml new file mode 100644 index 0000000000..a5cc330cab --- /dev/null +++ b/.github/workflows/update_truth.yml @@ -0,0 +1,59 @@ +name: Create Truth Data Update Pull Request + +on: + workflow_dispatch: + inputs: + pull_requests: + description: 'Pull request(s) that warranted update, e.g. "#123" or "#123 and dtcenter/MET#123"' + required: true + change_summary: + description: 'Summary of changes to truth data' + required: true + +jobs: + update_truth: + name: "Update or create truth reference branch" + runs-on: ubuntu-latest + steps: + - name: Check if branch is develop or main_vX.Y + run: | + branch_name=${GITHUB_REF#refs/heads/} + echo "branch_name=$branch_name" >> $GITHUB_ENV + if [[ $branch_name == "develop" ]] || \ + [[ $branch_name =~ ^main_v[0-9]+\.[0-9]+$ ]]; then + echo Branch is valid - $branch_name + exit 0 + fi + echo ERROR: Branch is $branch_name - must be develop or match main_vX.Y + exit 1 + - uses: actions/checkout@v3 + name: Checkout repository + with: + fetch-depth: 0 + token: ${{ secrets.METPLUS_BOT_TOKEN }} + - name: Resolve conflicts between branch and branch-ref + run: | + branch_name=${{ env.branch_name }} + cd ${GITHUB_WORKSPACE} + if [[ -z "$(git ls-remote --heads origin ${branch_name}-ref)" ]]; then + echo ERROR: ${branch_name}-ref does not exist + exit 1 + fi + + echo ${branch_name}-ref does exist -- update it + git config --global user.name "metplus-bot" + git config --global user.email "97135045+metplus-bot@users.noreply.github.com" + echo git checkout ${branch_name} + git checkout ${branch_name} + echo git merge -s ours origin/${branch_name}-ref + git merge -s ours origin/${branch_name}-ref + echo git push origin ${branch_name} + git push origin ${branch_name} + + - name: Create Pull Request + run: gh pr create --base $BASE --body "$BODY" --title "$TITLE" + env: + GH_TOKEN: ${{ github.token }} + BASE: ${{ env.branch_name }}-ref + BODY: ${{ github.event.inputs.change_summary }}
Created by @${{ github.actor}} + TITLE: Update ${{ env.branch_name }}-ref after ${{ github.event.inputs.pull_requests }} diff --git a/.readthedocs.yaml b/.readthedocs.yaml index b08da35999..cb3aab55c7 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -9,10 +9,14 @@ version: 2 #formats: all formats: [pdf] +build: + os: ubuntu-22.04 + tools: + python: "3.10" + # Optionally set the version of Python and requirements required to build your # docs python: - version: 3.8 install: - requirements: docs/requirements.txt diff --git a/Makefile.am b/Makefile.am index f3c2b7a83a..a1e75367dd 100644 --- a/Makefile.am +++ b/Makefile.am @@ -18,7 +18,7 @@ ACLOCAL_AMFLAGS = -I m4 -SUBDIRS = data src scripts/Rscripts scripts/python scripts/utility +SUBDIRS = data src scripts/Rscripts scripts/python if ENABLE_DEVELOPMENT SUBDIRS += internal/test_util diff --git a/Makefile.in b/Makefile.in index 8236187480..aa3a1d74ff 100644 --- a/Makefile.in +++ b/Makefile.in @@ -158,7 +158,7 @@ ETAGS = etags CTAGS = ctags CSCOPE = cscope DIST_SUBDIRS = data src scripts/Rscripts scripts/python \ - scripts/utility internal/test_util + internal/test_util am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/config.h.in INSTALL \ compile config.guess config.sub depcomp install-sh missing \ ylwrap @@ -350,8 +350,7 @@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ ACLOCAL_AMFLAGS = -I m4 -SUBDIRS = data src scripts/Rscripts scripts/python scripts/utility \ - $(am__append_1) +SUBDIRS = data src scripts/Rscripts scripts/python $(am__append_1) MAINTAINERCLEANFILES = \ Makefile.in \ aclocal.m4 \ diff --git a/configure b/configure index d47929b214..12f9307754 100755 --- a/configure +++ b/configure @@ -6051,6 +6051,9 @@ fi CPPFLAGS=$CPPFLAGS' -DMET_BASE="\"$(pkgdatadir)\""' +# Add -std=c++11 to CXXFLAGS +CXXFLAGS=$CXXFLAGS' -std=c++11' + # Define other variables for the makefiles FC_LIBS=-lgfortran @@ -8874,7 +8877,7 @@ done # Create configured files -ac_config_files="$ac_config_files Makefile scripts/Rscripts/Makefile scripts/Rscripts/include/Makefile scripts/python/Makefile scripts/utility/Makefile data/Makefile data/climo/Makefile data/climo/seeps/Makefile data/colortables/Makefile data/colortables/NCL_colortables/Makefile data/config/Makefile data/map/Makefile data/map/admin_by_country/Makefile data/poly/Makefile data/poly/HMT_masks/Makefile data/poly/NCEP_masks/Makefile data/wrappers/Makefile data/ps/Makefile data/table_files/Makefile data/tc_data/Makefile src/Makefile src/basic/Makefile src/basic/enum_to_string/Makefile src/basic/vx_cal/Makefile src/basic/vx_config/Makefile src/basic/vx_log/Makefile src/basic/vx_math/Makefile src/basic/vx_util/Makefile src/basic/vx_util_math/Makefile src/libcode/Makefile src/libcode/vx_afm/Makefile src/libcode/vx_analysis_util/Makefile src/libcode/vx_color/Makefile src/libcode/vx_data2d/Makefile src/libcode/vx_data2d_factory/Makefile src/libcode/vx_data2d_grib/Makefile src/libcode/vx_data2d_grib2/Makefile src/libcode/vx_data2d_nc_met/Makefile src/libcode/vx_data2d_nc_pinterp/Makefile src/libcode/vx_data2d_nccf/Makefile src/libcode/vx_geodesy/Makefile src/libcode/vx_gis/Makefile src/libcode/vx_gnomon/Makefile src/libcode/vx_grid/Makefile src/libcode/vx_gsl_prob/Makefile src/libcode/vx_nav/Makefile src/libcode/vx_solar/Makefile src/libcode/vx_nc_obs/Makefile src/libcode/vx_nc_util/Makefile src/libcode/vx_pb_util/Makefile src/libcode/vx_plot_util/Makefile src/libcode/vx_ps/Makefile src/libcode/vx_pxm/Makefile src/libcode/vx_render/Makefile src/libcode/vx_shapedata/Makefile src/libcode/vx_stat_out/Makefile src/libcode/vx_statistics/Makefile src/libcode/vx_time_series/Makefile src/libcode/vx_physics/Makefile src/libcode/vx_series_data/Makefile src/libcode/vx_regrid/Makefile src/libcode/vx_tc_util/Makefile src/libcode/vx_summary/Makefile src/libcode/vx_python3_utils/Makefile src/libcode/vx_data2d_python/Makefile src/libcode/vx_bool_calc/Makefile src/libcode/vx_pointdata_python/Makefile src/libcode/vx_seeps/Makefile src/tools/Makefile src/tools/core/Makefile src/tools/core/ensemble_stat/Makefile src/tools/core/grid_stat/Makefile src/tools/core/mode/Makefile src/tools/core/mode_analysis/Makefile src/tools/core/pcp_combine/Makefile src/tools/core/point_stat/Makefile src/tools/core/series_analysis/Makefile src/tools/core/stat_analysis/Makefile src/tools/core/wavelet_stat/Makefile src/tools/other/Makefile src/tools/other/ascii2nc/Makefile src/tools/other/lidar2nc/Makefile src/tools/other/gen_ens_prod/Makefile src/tools/other/gen_vx_mask/Makefile src/tools/other/gis_utils/Makefile src/tools/other/ioda2nc/Makefile src/tools/other/madis2nc/Makefile src/tools/other/mode_graphics/Makefile src/tools/other/modis_regrid/Makefile src/tools/other/pb2nc/Makefile src/tools/other/plot_data_plane/Makefile src/tools/other/plot_point_obs/Makefile src/tools/other/wwmca_tool/Makefile src/tools/other/gsi_tools/Makefile src/tools/other/regrid_data_plane/Makefile src/tools/other/point2grid/Makefile src/tools/other/shift_data_plane/Makefile src/tools/other/mode_time_domain/Makefile src/tools/other/grid_diag/Makefile src/tools/tc_utils/Makefile src/tools/tc_utils/tc_dland/Makefile src/tools/tc_utils/tc_pairs/Makefile src/tools/tc_utils/tc_stat/Makefile src/tools/tc_utils/tc_gen/Makefile src/tools/tc_utils/rmw_analysis/Makefile src/tools/tc_utils/tc_rmw/Makefile" +ac_config_files="$ac_config_files Makefile scripts/Rscripts/Makefile scripts/Rscripts/include/Makefile scripts/python/Makefile scripts/python/examples/Makefile scripts/python/met/Makefile scripts/python/pyembed/Makefile scripts/python/utility/Makefile data/Makefile data/climo/Makefile data/climo/seeps/Makefile data/colortables/Makefile data/colortables/NCL_colortables/Makefile data/config/Makefile data/map/Makefile data/map/admin_by_country/Makefile data/poly/Makefile data/poly/HMT_masks/Makefile data/poly/NCEP_masks/Makefile data/ps/Makefile data/table_files/Makefile data/tc_data/Makefile src/Makefile src/basic/Makefile src/basic/enum_to_string/Makefile src/basic/vx_cal/Makefile src/basic/vx_config/Makefile src/basic/vx_log/Makefile src/basic/vx_math/Makefile src/basic/vx_util/Makefile src/basic/vx_util_math/Makefile src/libcode/Makefile src/libcode/vx_afm/Makefile src/libcode/vx_analysis_util/Makefile src/libcode/vx_color/Makefile src/libcode/vx_data2d/Makefile src/libcode/vx_data2d_factory/Makefile src/libcode/vx_data2d_grib/Makefile src/libcode/vx_data2d_grib2/Makefile src/libcode/vx_data2d_nc_met/Makefile src/libcode/vx_data2d_nc_pinterp/Makefile src/libcode/vx_data2d_nccf/Makefile src/libcode/vx_geodesy/Makefile src/libcode/vx_gis/Makefile src/libcode/vx_gnomon/Makefile src/libcode/vx_grid/Makefile src/libcode/vx_gsl_prob/Makefile src/libcode/vx_nav/Makefile src/libcode/vx_solar/Makefile src/libcode/vx_nc_obs/Makefile src/libcode/vx_nc_util/Makefile src/libcode/vx_pb_util/Makefile src/libcode/vx_plot_util/Makefile src/libcode/vx_ps/Makefile src/libcode/vx_pxm/Makefile src/libcode/vx_render/Makefile src/libcode/vx_shapedata/Makefile src/libcode/vx_stat_out/Makefile src/libcode/vx_statistics/Makefile src/libcode/vx_time_series/Makefile src/libcode/vx_physics/Makefile src/libcode/vx_series_data/Makefile src/libcode/vx_regrid/Makefile src/libcode/vx_tc_util/Makefile src/libcode/vx_summary/Makefile src/libcode/vx_python3_utils/Makefile src/libcode/vx_data2d_python/Makefile src/libcode/vx_bool_calc/Makefile src/libcode/vx_pointdata_python/Makefile src/libcode/vx_seeps/Makefile src/tools/Makefile src/tools/core/Makefile src/tools/core/ensemble_stat/Makefile src/tools/core/grid_stat/Makefile src/tools/core/mode/Makefile src/tools/core/mode_analysis/Makefile src/tools/core/pcp_combine/Makefile src/tools/core/point_stat/Makefile src/tools/core/series_analysis/Makefile src/tools/core/stat_analysis/Makefile src/tools/core/wavelet_stat/Makefile src/tools/other/Makefile src/tools/other/ascii2nc/Makefile src/tools/other/lidar2nc/Makefile src/tools/other/gen_ens_prod/Makefile src/tools/other/gen_vx_mask/Makefile src/tools/other/gis_utils/Makefile src/tools/other/ioda2nc/Makefile src/tools/other/madis2nc/Makefile src/tools/other/mode_graphics/Makefile src/tools/other/modis_regrid/Makefile src/tools/other/pb2nc/Makefile src/tools/other/plot_data_plane/Makefile src/tools/other/plot_point_obs/Makefile src/tools/other/wwmca_tool/Makefile src/tools/other/gsi_tools/Makefile src/tools/other/regrid_data_plane/Makefile src/tools/other/point2grid/Makefile src/tools/other/shift_data_plane/Makefile src/tools/other/mode_time_domain/Makefile src/tools/other/grid_diag/Makefile src/tools/tc_utils/Makefile src/tools/tc_utils/tc_dland/Makefile src/tools/tc_utils/tc_pairs/Makefile src/tools/tc_utils/tc_stat/Makefile src/tools/tc_utils/tc_gen/Makefile src/tools/tc_utils/rmw_analysis/Makefile src/tools/tc_utils/tc_rmw/Makefile" if test -n "$MET_DEVELOPMENT"; then @@ -9762,7 +9765,10 @@ do "scripts/Rscripts/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/Rscripts/Makefile" ;; "scripts/Rscripts/include/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/Rscripts/include/Makefile" ;; "scripts/python/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/python/Makefile" ;; - "scripts/utility/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/utility/Makefile" ;; + "scripts/python/examples/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/python/examples/Makefile" ;; + "scripts/python/met/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/python/met/Makefile" ;; + "scripts/python/pyembed/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/python/pyembed/Makefile" ;; + "scripts/python/utility/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/python/utility/Makefile" ;; "data/Makefile") CONFIG_FILES="$CONFIG_FILES data/Makefile" ;; "data/climo/Makefile") CONFIG_FILES="$CONFIG_FILES data/climo/Makefile" ;; "data/climo/seeps/Makefile") CONFIG_FILES="$CONFIG_FILES data/climo/seeps/Makefile" ;; @@ -9774,7 +9780,6 @@ do "data/poly/Makefile") CONFIG_FILES="$CONFIG_FILES data/poly/Makefile" ;; "data/poly/HMT_masks/Makefile") CONFIG_FILES="$CONFIG_FILES data/poly/HMT_masks/Makefile" ;; "data/poly/NCEP_masks/Makefile") CONFIG_FILES="$CONFIG_FILES data/poly/NCEP_masks/Makefile" ;; - "data/wrappers/Makefile") CONFIG_FILES="$CONFIG_FILES data/wrappers/Makefile" ;; "data/ps/Makefile") CONFIG_FILES="$CONFIG_FILES data/ps/Makefile" ;; "data/table_files/Makefile") CONFIG_FILES="$CONFIG_FILES data/table_files/Makefile" ;; "data/tc_data/Makefile") CONFIG_FILES="$CONFIG_FILES data/tc_data/Makefile" ;; diff --git a/configure.ac b/configure.ac index 60cc07bcd7..e54e5ea3b3 100644 --- a/configure.ac +++ b/configure.ac @@ -1086,6 +1086,9 @@ AM_CONDITIONAL([ENABLE_DEVELOPMENT], [test -n "$MET_DEVELOPMENT"]) CPPFLAGS=$CPPFLAGS' -DMET_BASE="\"$(pkgdatadir)\""' +# Add -std=c++11 to CXXFLAGS +CXXFLAGS=$CXXFLAGS' -std=c++11' + # Define other variables for the makefiles AC_SUBST(FC_LIBS, [-lgfortran]) @@ -1204,7 +1207,10 @@ AC_CONFIG_FILES([Makefile scripts/Rscripts/Makefile scripts/Rscripts/include/Makefile scripts/python/Makefile - scripts/utility/Makefile + scripts/python/examples/Makefile + scripts/python/met/Makefile + scripts/python/pyembed/Makefile + scripts/python/utility/Makefile data/Makefile data/climo/Makefile data/climo/seeps/Makefile @@ -1216,7 +1222,6 @@ AC_CONFIG_FILES([Makefile data/poly/Makefile data/poly/HMT_masks/Makefile data/poly/NCEP_masks/Makefile - data/wrappers/Makefile data/ps/Makefile data/table_files/Makefile data/tc_data/Makefile diff --git a/data/Makefile.am b/data/Makefile.am index ac1483ba59..6cebff2c79 100644 --- a/data/Makefile.am +++ b/data/Makefile.am @@ -24,8 +24,7 @@ SUBDIRS = \ poly \ ps \ table_files \ - tc_data \ - wrappers + tc_data topdir = $(pkgdatadir) diff --git a/data/Makefile.in b/data/Makefile.in index acf549d2de..2bf69df44f 100644 --- a/data/Makefile.in +++ b/data/Makefile.in @@ -362,8 +362,7 @@ SUBDIRS = \ poly \ ps \ table_files \ - tc_data \ - wrappers + tc_data topdir = $(pkgdatadir) top_DATA = \ diff --git a/data/wrappers/read_tmp_dataplane.py b/data/wrappers/read_tmp_dataplane.py deleted file mode 100644 index 98bbe728d8..0000000000 --- a/data/wrappers/read_tmp_dataplane.py +++ /dev/null @@ -1,37 +0,0 @@ -######################################################################## -# -# Reads temporary file into memory. -# -# usage: /path/to/python read_tmp_dataplane.py dataplane.tmp -# -######################################################################## - -import sys -import numpy as np -import netCDF4 as nc - -met_info = {} -netcdf_filename = sys.argv[1] - -# read NetCDF file -ds = nc.Dataset(netcdf_filename, 'r') -met_data = ds['met_data'][:] -met_attrs = {} - -# grid is defined as a dictionary or string -grid = {} -for attr, attr_val in ds.__dict__.items(): - if 'grid.' in attr: - grid_attr = attr.split('.')[1] - grid[grid_attr] = attr_val - else: - met_attrs[attr] = attr_val - -if grid: - met_attrs['grid'] = grid - -met_attrs['name'] = met_attrs['name_str'] -del met_attrs['name_str'] -met_info['met_data'] = met_data -met_info['attrs'] = met_attrs - diff --git a/data/wrappers/read_tmp_point_nc.py b/data/wrappers/read_tmp_point_nc.py deleted file mode 100644 index 0ef8eefc3a..0000000000 --- a/data/wrappers/read_tmp_point_nc.py +++ /dev/null @@ -1,26 +0,0 @@ -######################################################################## -# -# Reads temporary point obs. file into memory. -# -# usage: /path/to/python read_tmp_point_nc.py tmp_output_filename -# -######################################################################## - -import os -import sys - -# add share/met/python directory to system path to find met_point_obs -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), - os.pardir, 'python'))) -from met_point_obs import met_point_obs -from met_point_obs_nc import nc_point_obs - -netcdf_filename = sys.argv[1] - -# read NetCDF file -print('{p} reading{f}'.format(p=met_point_obs.get_prompt(), f=netcdf_filename)) -point_obs_data = nc_point_obs() -point_obs_data.read_data(netcdf_filename) - -met_point_data = point_obs_data.get_point_data() -met_point_data['met_point_data'] = point_obs_data diff --git a/data/wrappers/write_tmp_dataplane.py b/data/wrappers/write_tmp_dataplane.py deleted file mode 100644 index 476d2348c3..0000000000 --- a/data/wrappers/write_tmp_dataplane.py +++ /dev/null @@ -1,75 +0,0 @@ -######################################################################## -# -# Adapted from a script provided by George McCabe -# Adapted by Randy Bullock -# -# usage: /path/to/python write_tmp_dataplane.py \ -# tmp_output_filename .py -# -######################################################################## - -import os -import sys -import importlib.util -import netCDF4 as nc - -print("Python Script:\t" + repr(sys.argv[0])) -print("User Command:\t" + repr(' '.join(sys.argv[2:]))) -print("Temporary File:\t" + repr(sys.argv[1])) - -netcdf_filename = sys.argv[1] -pyembed_module_name = sys.argv[2] -sys.argv = sys.argv[2:] - -# add share/met/python directory to system path to find met_point_obs -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), - os.pardir, 'python'))) - -# append user script dir to system path -pyembed_dir, pyembed_file = os.path.split(pyembed_module_name) -if pyembed_dir: - sys.path.insert(0, pyembed_dir) - -if not pyembed_module_name.endswith('.py'): - pyembed_module_name += '.py' - -user_base = os.path.basename(pyembed_module_name).replace('.py','') - -spec = importlib.util.spec_from_file_location(user_base, pyembed_module_name) -met_in = importlib.util.module_from_spec(spec) -spec.loader.exec_module(met_in) - -met_info = {'met_data': met_in.met_data} -if hasattr(met_in.met_data, 'attrs') and met_in.met_data.attrs: - attrs = met_in.met_data.attrs -else: - attrs = met_in.attrs -met_info['attrs'] = attrs - -# determine fill value -try: - fill = met_data.get_fill_value() -except: - fill = -9999. - -# write NetCDF file -ds = nc.Dataset(netcdf_filename, 'w') - -# create dimensions and variable -nx, ny = met_in.met_data.shape -ds.createDimension('x', nx) -ds.createDimension('y', ny) -dp = ds.createVariable('met_data', met_in.met_data.dtype, ('x', 'y'), fill_value=fill) -dp[:] = met_in.met_data - -# append attributes -for attr, attr_val in met_info['attrs'].items(): - if attr == 'name': - setattr(ds, 'name_str', attr_val) - elif type(attr_val) == dict: - for key in attr_val: - setattr(ds, attr + '.' + key, attr_val[key]) - else: - setattr(ds, attr, attr_val) - -ds.close() diff --git a/data/wrappers/write_tmp_mpr.py b/data/wrappers/write_tmp_mpr.py deleted file mode 100644 index 3eee0379f5..0000000000 --- a/data/wrappers/write_tmp_mpr.py +++ /dev/null @@ -1,43 +0,0 @@ -######################################################################## -# -# Adapted from a script provided by George McCabe -# Adapted by Randy Bullock -# -# usage: /path/to/python write_tmp_mpr.py \ -# tmp_output_filename .py -# -######################################################################## - -import os -import sys -import importlib.util - -print("Python Script:\t" + repr(sys.argv[0])) -print("User Command:\t" + repr(' '.join(sys.argv[2:]))) -print("Temporary File:\t" + repr(sys.argv[1])) - -tmp_filename = sys.argv[1] -pyembed_module_name = sys.argv[2] -sys.argv = sys.argv[2:] - -# add share/met/python directory to system path to find met_point_obs -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), - os.pardir, 'python'))) - -# append user script dir to system path -pyembed_dir, pyembed_file = os.path.split(pyembed_module_name) -if pyembed_dir: - sys.path.insert(0, pyembed_dir) - -if not pyembed_module_name.endswith('.py'): - pyembed_module_name += '.py' - -user_base = os.path.basename(pyembed_module_name).replace('.py','') - -spec = importlib.util.spec_from_file_location(user_base, pyembed_module_name) -met_in = importlib.util.module_from_spec(spec) -spec.loader.exec_module(met_in) - -f = open(tmp_filename, 'w') -for line in met_in.mpr_data: - f.write(str(line) + '\n') diff --git a/data/wrappers/write_tmp_point.py b/data/wrappers/write_tmp_point.py deleted file mode 100644 index 916fca5549..0000000000 --- a/data/wrappers/write_tmp_point.py +++ /dev/null @@ -1,43 +0,0 @@ -######################################################################## -# -# Adapted from a script provided by George McCabe -# Adapted by Randy Bullock -# -# usage: /path/to/python write_tmp_point.py \ -# tmp_output_filename .py -# -######################################################################## - -import os -import sys -import importlib.util - -print("Python Script:\t" + repr(sys.argv[0])) -print("User Command:\t" + repr(' '.join(sys.argv[2:]))) -print("Temporary File:\t" + repr(sys.argv[1])) - -tmp_filename = sys.argv[1] -pyembed_module_name = sys.argv[2] -sys.argv = sys.argv[2:] - -# add share/met/python directory to system path to find met_point_obs -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), - os.pardir, 'python'))) - -# append user script dir to system path -pyembed_dir, pyembed_file = os.path.split(pyembed_module_name) -if pyembed_dir: - sys.path.insert(0, pyembed_dir) - -if not pyembed_module_name.endswith('.py'): - pyembed_module_name += '.py' - -user_base = os.path.basename(pyembed_module_name).replace('.py','') - -spec = importlib.util.spec_from_file_location(user_base, pyembed_module_name) -met_in = importlib.util.module_from_spec(spec) -spec.loader.exec_module(met_in) - -f = open(tmp_filename, 'w') -for line in met_in.point_data: - f.write(str(line) + '\n') diff --git a/data/wrappers/write_tmp_point_nc.py b/data/wrappers/write_tmp_point_nc.py deleted file mode 100644 index 063a2e98cc..0000000000 --- a/data/wrappers/write_tmp_point_nc.py +++ /dev/null @@ -1,55 +0,0 @@ -######################################################################## -# -# Adapted from a script provided by George McCabe -# Adapted by Howard Soh -# -# usage: /path/to/python write_tmp_point_nc.py \ -# tmp_output_filename .py -# -######################################################################## - -import os -import sys -import importlib.util - -# add share/met/python directory to system path to find met_point_obs -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), - os.pardir, 'python'))) - -from met_point_obs import met_point_obs -from met_point_obs_nc import nc_point_obs - -PROMPT = met_point_obs.get_prompt() -print("{p} Python Script:\t".format(p=PROMPT) + repr(sys.argv[0])) -print("{p} User Command:\t".format(p=PROMPT) + repr(' '.join(sys.argv[2:]))) -print("{p} Temporary File:\t".format(p=PROMPT) + repr(sys.argv[1])) - -tmp_filename = sys.argv[1] -pyembed_module_name = sys.argv[2] -sys.argv = sys.argv[2:] - -# append user script dir to system path -pyembed_dir, pyembed_file = os.path.split(pyembed_module_name) -if pyembed_dir: - sys.path.insert(0, pyembed_dir) - -if not pyembed_module_name.endswith('.py'): - pyembed_module_name += '.py' - -user_base = os.path.basename(pyembed_module_name).replace('.py','') - -spec = importlib.util.spec_from_file_location(user_base, pyembed_module_name) -met_in = importlib.util.module_from_spec(spec) -spec.loader.exec_module(met_in) - -if hasattr(met_in, 'point_obs_data'): - met_in.point_obs_data.save_ncfile(tmp_filename) -else: - if hasattr(met_in.met_point_data, 'point_obs_data'): - met_in.met_point_data['point_obs_data'].save_ncfile(tmp_filename) - else: - tmp_point_obs = nc_point_obs() - tmp_point_obs.put_data(met_in.met_point_data) - tmp_point_obs.save_ncfile(tmp_filename) - -#print('{p} writing {f}'.format(p=PROMPT, f=tmp_filename)) diff --git a/docs/Users_Guide/appendixA.rst b/docs/Users_Guide/appendixA.rst index 6fe28f380f..099498402c 100644 --- a/docs/Users_Guide/appendixA.rst +++ b/docs/Users_Guide/appendixA.rst @@ -12,240 +12,252 @@ File-IO Q. How do I improve the speed of MET tools using Gen-Vx-Mask? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + .. dropdown:: Answer + + The main reason to run gen_vx_mask is to make the MET + statistics tools (e.g. point_stat, grid_stat, or ensemble_stat) run + faster. The verification masking regions in those tools can be specified + as Lat/Lon polyline files or the NetCDF output of gen_vx_mask. However, + determining which grid points are inside/outside a polyline region can be + slow if the polyline contains many points or the grid is dense. Running + gen_vx_mask once to create a binary mask is much more efficient than + recomputing the mask when each MET statistics tool is run. If the polyline + only contains a small number of points or the grid is sparse running + gen_vx_mask first would only save a second or two. -A. -The main reason to run gen_vx_mask is to make the MET -statistics tools (e.g. point_stat, grid_stat, or ensemble_stat) run -faster. The verification masking regions in those tools can be specified -as Lat/Lon polyline files or the NetCDF output of gen_vx_mask. However, -determining which grid points are inside/outside a polyline region can be -slow if the polyline contains many points or the grid is dense. Running -gen_vx_mask once to create a binary mask is much more efficient than -recomputing the mask when each MET statistics tool is run. If the polyline -only contains a small number of points or the grid is sparse running -gen_vx_mask first would only save a second or two. - + Q. How do I use map_data? -^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -The MET repository includes several map data files. Users can modify which -map datasets are included in the plots created by modifying the -configuration files for those tools. The default map datasets are defined -by the map_data dictionary in the ConfigMapData file. + .. dropdown:: Answer + + The MET repository includes several map data files. Users can modify which + map datasets are included in the plots created by modifying the + configuration files for those tools. The default map datasets are defined + by the map_data dictionary in the ConfigMapData file. -.. code-block:: none + .. code-block:: none - map_data = { + map_data = { - line_color = [ 25, 25, 25 ]; // rgb triple values, 0-255 - line_width = 0.5; - line_dash = ""; + line_color = [ 25, 25, 25 ]; // rgb triple values, 0-255 + line_width = 0.5; + line_dash = ""; - source = [ - { file_name = "MET_BASE/map/country_data"; }, - { file_name = "MET_BASE/map/usa_state_data"; }, - { file_name = "MET_BASE/map/major_lakes_data"; } - ]; - } + source = [ + { file_name = "MET_BASE/map/country_data"; }, + { file_name = "MET_BASE/map/usa_state_data"; }, + { file_name = "MET_BASE/map/major_lakes_data"; } + ]; + } -Users can modify the ConfigMapData contents prior to running 'make install'. -This will change the default map data for all of the MET tools which plots. -Alternatively, users can copy/paste/modify the map_data dictionary into the -configuration file for a MET tool. For example, you could add map_data to -the end of the MODE configuration file to customize plots created by MODE. + Users can modify the ConfigMapData contents prior to running + 'make install'. + This will change the default map data for all of the MET tools which plots. + Alternatively, users can copy/paste/modify the map_data dictionary into the + configuration file for a MET tool. For example, you could add map_data to + the end of the MODE configuration file to customize plots created by MODE. -Here is an example of running plot_data_plane and specifying the map_data -in the configuration string on the command line: + Here is an example of running plot_data_plane and specifying the map_data + in the configuration string on the command line: -.. code-block:: none - - plot_data_plane - sample.grib china_tmp_2m_admin.ps \ - 'name="TMP"; level="Z2"; \ - map_data = { source = [ { file_name = \ - "${MET_BUILD_BASE}/data/map/admin_by_country/admin_China_data"; } \ - ]; }' + .. code-block:: none + + plot_data_plane + sample.grib china_tmp_2m_admin.ps \ + 'name="TMP"; level="Z2"; \ + map_data = { source = [ { file_name = \ + "${MET_BUILD_BASE}/data/map/admin_by_country/admin_China_data"; } \ + ]; }' Q. How can I understand the number of matched pairs? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Statistics are computed on matched forecast/observation pairs data. -For example, if the dimension of the grid is 37x37 up to -1369 matched pairs are possible. However, if the forecast or -observation contains bad data at a point, that matched pair would -not be included in the calculations. There are a number of reasons that -observations could be rejected - mismatches in station id, variable names, -valid times, bad values, data off the grid, etc. -For example, if the forecast field contains missing data around the -edge of the domain, then that is a reason there may be 992 matched pairs -instead of 1369. Users can use the ncview tool to look at an example -netCDF file or run their files through plot_data_plane to help identify -any potential issues. - -One common support question is "Why am I getting 0 matched pairs from -Point-Stat?". As mentioned above, there are many reasons why point -observations can be excluded from your analysis. If running point_stat with -at least verbosity level 2 (-v 2, the default value), zero matched pairs -will result in the following type of log messages to be printed: - -.. code-block:: none - - DEBUG 2: Processing TMP/Z2 versus TMP/Z2, for observation type ADPSFC, over region FULL, for interpolation method UW_MEAN(1), using 0 pairs. - DEBUG 2: Number of matched pairs = 0 - DEBUG 2: Observations processed = 1166 - DEBUG 2: Rejected: station id = 0 - DEBUG 2: Rejected: obs var name = 1166 - DEBUG 2: Rejected: valid time = 0 - DEBUG 2: Rejected: bad obs value = 0 - DEBUG 2: Rejected: off the grid = 0 - DEBUG 2: Rejected: topography = 0 - DEBUG 2: Rejected: level mismatch = 0 - DEBUG 2: Rejected: quality marker = 0 - DEBUG 2: Rejected: message type = 0 - DEBUG 2: Rejected: masking region = 0 - DEBUG 2: Rejected: bad fcst value = 0 - DEBUG 2: Rejected: bad climo mean = 0 - DEBUG 2: Rejected: bad climo stdev = 0 - DEBUG 2: Rejected: mpr filter = 0 - DEBUG 2: Rejected: duplicates = 0 - -This list of the rejection reason counts above matches the order in -which the filtering logic is applied in the code. In this example, -none of the point observations match the variable name requested -in the configuration file. So all of the 1166 observations are rejected -for the same reason. + .. dropdown:: Answer + + Statistics are computed on matched forecast/observation pairs data. + For example, if the dimension of the grid is 37x37 up to + 1369 matched pairs are possible. However, if the forecast or + observation contains bad data at a point, that matched pair would + not be included in the calculations. There are a number of reasons that + observations could be rejected - mismatches in station id, variable names, + valid times, bad values, data off the grid, etc. + For example, if the forecast field contains missing data around the + edge of the domain, then that is a reason there may be 992 matched pairs + instead of 1369. Users can use the ncview tool to look at an example + netCDF file or run their files through plot_data_plane to help identify + any potential issues. + + One common support question is "Why am I getting 0 matched pairs from + Point-Stat?". As mentioned above, there are many reasons why point + observations can be excluded from your analysis. If running point_stat with + at least verbosity level 2 (-v 2, the default value), zero matched pairs + will result in the following type of log messages to be printed: + + .. code-block:: none + + DEBUG 2: Processing TMP/Z2 versus TMP/Z2, for observation type ADPSFC, over region FULL, for interpolation method UW_MEAN(1), using 0 pairs. + DEBUG 2: Number of matched pairs = 0 + DEBUG 2: Observations processed = 1166 + DEBUG 2: Rejected: station id = 0 + DEBUG 2: Rejected: obs var name = 1166 + DEBUG 2: Rejected: valid time = 0 + DEBUG 2: Rejected: bad obs value = 0 + DEBUG 2: Rejected: off the grid = 0 + DEBUG 2: Rejected: topography = 0 + DEBUG 2: Rejected: level mismatch = 0 + DEBUG 2: Rejected: quality marker = 0 + DEBUG 2: Rejected: message type = 0 + DEBUG 2: Rejected: masking region = 0 + DEBUG 2: Rejected: bad fcst value = 0 + DEBUG 2: Rejected: bad climo mean = 0 + DEBUG 2: Rejected: bad climo stdev = 0 + DEBUG 2: Rejected: mpr filter = 0 + DEBUG 2: Rejected: duplicates = 0 + + This list of the rejection reason counts above matches the order in + which the filtering logic is applied in the code. In this example, + none of the point observations match the variable name requested + in the configuration file. So all of the 1166 observations are rejected + for the same reason. Q. What types of NetCDF files can MET read? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -There are three flavors of NetCDF that MET can read directly. + .. dropdown:: Answer + + There are three flavors of NetCDF that MET can read directly. -1. Gridded NetCDF output from one of the MET tools + 1. Gridded NetCDF output from one of the MET tools -2. Output from the WRF model that has been post-processed using the wrf_interp utility + 2. Output from the WRF model that has been post-processed using + the wrf_interp utility -3. NetCDF data following the `climate-forecast (CF) convention - `_ + 3. NetCDF data following the `climate-forecast (CF) convention + `_ -Lastly, users can write python scripts to pass data that's gridded to the -MET tools in memory. If the data doesn't fall into one of those categories, -then it's not a gridded dataset that MET can handle directly. Satellite data, -in general, will not be gridded. Typically it contains a dense mesh of data at -lat/lon points, but typically those lat/lon points are not evenly spaced onto -a regular grid. + Lastly, users can write python scripts to pass data that's gridded to the + MET tools in memory. If the data doesn't fall into one of those categories, + then it's not a gridded dataset that MET can handle directly. + Satellite data, in general, will not be gridded. Typically it + contains a dense mesh of data at lat/lon points, but typically + those lat/lon points are not evenly spaced onto + a regular grid. -While MET's point2grid tool does support some satellite data inputs, it is -limited. Using python embedding is another option for handling new datasets -not supported natively by MET. + While MET's point2grid tool does support some satellite data inputs, it is + limited. Using python embedding is another option for handling new datasets + not supported natively by MET. Q. How do I choose a time slice in a NetCDF file? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -When processing NetCDF files, the level information needs to be -specified to tell MET which 2D slice of data to use. The index is selected from -a value when it starts with "@" for vertical level (pressure or height) -and time. The actual time, @YYYYMMDD_HHMM, is allowed instead of selecting -the time index. - -Let's use plot_data_plane as an example: - -.. code-block:: none - - plot_data_plane \ - MERGE_20161201_20170228.nc \ - obs.ps \ - 'name="APCP"; level="(5,*,*)";' - - plot_data_plane \ - gtg_obs_forecast.20130730.i00.f00.nc \ - altitude_20000.ps \ - 'name = "edr"; level = "(@20130730_0000,@20000,*,*)";' - -Assuming that the first array is the time, this will select the 6-th -time slice of the APCP data and plot it since these indices are 0-based. + .. dropdown:: Answer + + When processing NetCDF files, the level information needs to be + specified to tell MET which 2D slice of data to use. + The index is selected from + a value when it starts with "@" for vertical level (pressure or height) + and time. The actual time, @YYYYMMDD_HHMM, is allowed instead of selecting + the time index. + + Let's use plot_data_plane as an example: + + .. code-block:: none + + plot_data_plane \ + MERGE_20161201_20170228.nc \ + obs.ps \ + 'name="APCP"; level="(5,*,*)";' + + plot_data_plane \ + gtg_obs_forecast.20130730.i00.f00.nc \ + altitude_20000.ps \ + 'name = "edr"; level = "(@20130730_0000,@20000,*,*)";' + + Assuming that the first array is the time, this will select the 6-th + time slice of the APCP data and plot it since these indices are 0-based. Q. How do I use the UNIX time conversion? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Regarding the timing information in the NetCDF variable attributes: + .. dropdown:: Answer + + Regarding the timing information in the NetCDF variable attributes: -.. code-block:: none - - APCP_24:init_time_ut = 1306886400 ; - -“ut” stands for UNIX time, which is the number of seconds -since Jan 1, 1970. It is a convenient way of storing timing -information since it is easy to add/subtract. The UNIX date command -can be used to convert back/forth between unix time and time strings: + .. code-block:: none -To convert unix time to ymd_hms date: + APCP_24:init_time_ut = 1306886400 ; -.. code-block:: none - - date -ud '1970-01-01 UTC '1306886400' seconds' +%Y%m%d_%H%M%S 20110601_000000 - -To convert ymd_hms to unix date: - -.. code-block:: none - - date -ud ''2011-06-01' UTC '00:00:00'' +%s 1306886400 - -Regarding TRMM data, it may be easier to work with the binary data and -use the trmm2nc.R script described on this -`page `_ -under observation datasets. - -Follow the TRMM binary links to either the 3 or 24-hour accumulations, -save the files, and run them through that script. That is faster -and easier than trying to get an ASCII dump. That Rscript can also -subset the TRMM data if needed. Look for the section of it titled -"Output domain specification" and define the lat/lon's that needs -to be included in the output. - -Q. Does MET use a fixed-width output format for its ASCII output files? + “ut” stands for UNIX time, which is the number of seconds + since Jan 1, 1970. It is a convenient way of storing timing + information since it is easy to add/subtract. The UNIX date command + can be used to convert back/forth between unix time and time strings: + + To convert unix time to ymd_hms date: + + .. code-block:: none + + date -ud '1970-01-01 UTC '1306886400' seconds' +%Y%m%d_%H%M%S 20110601_000000 + + To convert ymd_hms to unix date: + + .. code-block:: none + + date -ud ''2011-06-01' UTC '00:00:00'' +%s 1306886400 + + Regarding TRMM data, it may be easier to work with the binary data and + use the trmm2nc.R script described on this + `page `_ + under observation datasets. + + Follow the TRMM binary links to either the 3 or 24-hour accumulations, + save the files, and run them through that script. That is faster + and easier than trying to get an ASCII dump. That Rscript can also + subset the TRMM data if needed. Look for the section of it titled + "Output domain specification" and define the lat/lon's that needs + to be included in the output. + +Q. Does MET use a fixed-width output format for its ASCII output files? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -MET does not use the Fortran-like fixed width format in its -ASCII output file. Instead, the column widths are adjusted for each -run to insert at least one space between adjacent columns. The header -columns of the MET output contain user-defined strings which may be -of arbitrary length. For example, columns such as MODEL, OBTYPE, and -DESC may be set by the user to any string value. Additionally, the -amount of precision written is also configurable. The -"output_precision" config file entry can be changed from its default -value of 5 decimal places to up to 12 decimal places, which would also -impact the column widths of the output. - -Due to these issues, it is not possible to select a reasonable fixed -width for each column ahead of time. The AsciiTable class in MET does -a lot of work to line up the output columns, to make sure there is -at least one space between them. - -If a fixed-width format is needed, the easiest option would be -writing a script to post-process the MET output into the fixed-width -format that is needed or that the code expects. + .. dropdown:: Answer + + MET does not use the Fortran-like fixed width format in its + ASCII output file. Instead, the column widths are adjusted for each + run to insert at least one space between adjacent columns. The header + columns of the MET output contain user-defined strings which may be + of arbitrary length. For example, columns such as MODEL, OBTYPE, and + DESC may be set by the user to any string value. Additionally, the + amount of precision written is also configurable. The + "output_precision" config file entry can be changed from its default + value of 5 decimal places to up to 12 decimal places, which would also + impact the column widths of the output. + + Due to these issues, it is not possible to select a reasonable fixed + width for each column ahead of time. The AsciiTable class in MET does + a lot of work to line up the output columns, to make sure there is + at least one space between them. + + If a fixed-width format is needed, the easiest option would be + writing a script to post-process the MET output into the fixed-width + format that is needed or that the code expects. Q. Do the ASCII output files created by MET use scientific notation? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -By default, the ASCII output files created by MET make use of -scientific notation when appropriate. The formatting of the -numbers that the AsciiTable class writes is handled by a call -to printf. The "%g" formatting option can result in -scientific notation: -http://www.cplusplus.com/reference/cstdio/printf/ + .. dropdown:: Answer + + By default, the ASCII output files created by MET make use of + scientific notation when appropriate. The formatting of the + numbers that the AsciiTable class writes is handled by a call + to printf. The "%g" formatting option can result in + scientific notation: + http://www.cplusplus.com/reference/cstdio/printf/ -It has been recommended that a configuration option be added to -MET to disable the use of scientific notation. That enhancement -is planned for a future release. + It has been recommended that a configuration option be added to + MET to disable the use of scientific notation. That enhancement + is planned for a future release. Gen-Vx-Mask ----------- @@ -253,67 +265,69 @@ Gen-Vx-Mask Q. I have a list of stations to use for verification. I also have a poly region defined. If I specify both of these should the result be a union of them? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -These settings are defined in the "mask" section of the Point-Stat -configuration file. You can define masking regions in one of 3 ways, -as a "grid", a "poly" line file, or a "sid" list of station ID's. + .. dropdown:: Answer + + These settings are defined in the "mask" section of the Point-Stat + configuration file. You can define masking regions in one of 3 ways, + as a "grid", a "poly" line file, or a "sid" list of station ID's. -If you specify one entry for "poly" and one entry for "sid", you -should see output for those two different masks. Note that each of -these settings is an array of values, as indicated by the square -brackets "[]" in the default config file. If you specify 5 grids, -3 poly's, and 2 SID lists, you'd get output for those 10 separate -masking regions. Point-Stat does not compute unions or intersections -of masking regions. Instead, they are each processed separately. + If you specify one entry for "poly" and one entry for "sid", you + should see output for those two different masks. Note that each of + these settings is an array of values, as indicated by the square + brackets "[]" in the default config file. If you specify 5 grids, + 3 poly's, and 2 SID lists, you'd get output for those 10 separate + masking regions. Point-Stat does not compute unions or intersections + of masking regions. Instead, they are each processed separately. -Is it true that you really want to use a polyline to define an area -and then use a SID list to capture additional points outside of -that polyline? + Is it true that you really want to use a polyline to define an area + and then use a SID list to capture additional points outside of + that polyline? -If so, your options are: + If so, your options are: -1. Define one single SID list which include all the points currently - inside the polyline as well as the extra ones outside. + 1. Define one single SID list which include all the points currently + inside the polyline as well as the extra ones outside. -2. Continue verifying using one polyline and one SID list and - write partial sums and contingency table counts. + 2. Continue verifying using one polyline and one SID list and + write partial sums and contingency table counts. -Then aggregate the results together by running a Stat-Analysis job. + Then aggregate the results together by running a Stat-Analysis job. Q. How do I define a masking region with a GFS file? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Grab a sample GFS file: + .. dropdown:: Answer + + Grab a sample GFS file: + + .. code-block:: none -.. code-block:: none - - wget - http://www.ftp.ncep.noaa.gov/data/nccf/com/gfs/prod/gfs/2016102512/gfs.t12z.pgrb2.0p50.f000 - -Use the MET regrid_data_plane tool to put some data on a -lat/lon grid over Europe: + wget + http://www.ftp.ncep.noaa.gov/data/nccf/com/gfs/prod/gfs/2016102512/gfs.t12z.pgrb2.0p50.f000 -.. code-block:: none + Use the MET regrid_data_plane tool to put some data on a + lat/lon grid over Europe: - regrid_data_plane gfs.t12z.pgrb2.0p50.f000 \ - 'latlon 100 100 25 0 0.5 0.5' gfs_euro.nc -field 'name="TMP"; level="Z2";' + .. code-block:: none -Run the MET gen_vx_mask tool to apply your polyline to the European domain: + regrid_data_plane gfs.t12z.pgrb2.0p50.f000 \ + 'latlon 100 100 25 0 0.5 0.5' gfs_euro.nc -field 'name="TMP"; level="Z2";' -.. code-block:: none + Run the MET gen_vx_mask tool to apply your polyline to the European domain: - gen_vx_mask gfs_euro.nc POLAND.poly POLAND_mask.nc + .. code-block:: none -Run the MET plot_data_plane tool to display the resulting mask field: + gen_vx_mask gfs_euro.nc POLAND.poly POLAND_mask.nc -.. code-block:: none - - plot_data_plane POLAND_mask.nc POLAND_mask.ps 'name="POLAND"; level="(*,*)";' + Run the MET plot_data_plane tool to display the resulting mask field: -In this example, the mask is in roughly the right spot, but there -are obvious problems with the latitude and longitude values used -to define that mask for Poland. + .. code-block:: none + + plot_data_plane POLAND_mask.nc POLAND_mask.ps 'name="POLAND"; level="(*,*)";' + + In this example, the mask is in roughly the right spot, but there + are obvious problems with the latitude and longitude values used + to define that mask for Poland. Grid-Stat --------- @@ -321,277 +335,286 @@ Grid-Stat Q. How do I define a complex masking region? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -A user can define intersections and unions of multiple fields to define masks. -Prior to running Grid-Stat, the user can run the Gen-VX-Mask tool one or -more times to define a more complex masking area by thresholding multiple -fields. + .. dropdown:: Answer + + A user can define intersections and unions of multiple fields to + define masks. + Prior to running Grid-Stat, the user can run the Gen-VX-Mask tool one or + more times to define a more complex masking area by thresholding multiple + fields. -For example, using a forecast GRIB file (fcst.grb) which contains 2 records, -one for 2-m temperature and a second for 6-hr accumulated precip. The only -grid points that are desired are grid points below freezing with non-zero -precip. The user should run Gen-Vx-Mask twice - once to define the -temperature mask and a second time to intersect that with the precip mask: + For example, using a forecast GRIB file (fcst.grb) which contains + 2 records, one for 2-m temperature and a second for 6-hr + accumulated precip. The only + grid points that are desired are grid points below freezing with non-zero + precip. The user should run Gen-Vx-Mask twice - once to define the + temperature mask and a second time to intersect that with the precip mask: -.. code-block:: none + .. code-block:: none - gen_vx_mask fcst.grb fcst.grb tmp_mask.nc \ - -type data \ - -mask_field 'name="TMP"; level="Z2"' -thresh le273 - gen_vx_mask tmp_mask.nc fcst.grb tmp_and_precip_mask.nc \ - -type data \ - -input_field 'name="TMP_Z2"; level="(*,*)";' \ - -mask_field 'name="APCP"; level="A6";' -thresh gt0 \ - -intersection -name "FREEZING_PRECIP" + gen_vx_mask fcst.grb fcst.grb tmp_mask.nc \ + -type data \ + -mask_field 'name="TMP"; level="Z2"' -thresh le273 + gen_vx_mask tmp_mask.nc fcst.grb tmp_and_precip_mask.nc \ + -type data \ + -input_field 'name="TMP_Z2"; level="(*,*)";' \ + -mask_field 'name="APCP"; level="A6";' -thresh gt0 \ + -intersection -name "FREEZING_PRECIP" -The first one is pretty straight-forward. + The first one is pretty straight-forward. -1. The input field (fcst.grb) defines the domain for the mask. + 1. The input field (fcst.grb) defines the domain for the mask. -2. Since we're doing data masking and the data we want lives in - fcst.grb, we pass it in again as the mask_file. + 2. Since we're doing data masking and the data we want lives in + fcst.grb, we pass it in again as the mask_file. -3. Lastly "-mask_field" specifies the data we want from the mask file - and "-thresh" specifies the event threshold. + 3. Lastly "-mask_field" specifies the data we want from the mask file + and "-thresh" specifies the event threshold. -The second call is a bit tricky. + The second call is a bit tricky. -1. Do data masking (-type data) + 1. Do data masking (-type data) -2. Read the NetCDF variable named "TMP_Z2" from the input file (tmp_mask.nc) + 2. Read the NetCDF variable named "TMP_Z2" from the input file + (tmp_mask.nc) -3. Define the mask by reading 6-hour precip from the mask file - (fcst.grb) and looking for values > 0 (-mask_field) + 3. Define the mask by reading 6-hour precip from the mask file + (fcst.grb) and looking for values > 0 (-mask_field) -4. Apply intersection logic when combining the "input" value with - the "mask" value (-intersection). + 4. Apply intersection logic when combining the "input" value with + the "mask" value (-intersection). -5. Name the output NetCDF variable as "FREEZING_PRECIP" (-name). - This is totally optional, but convenient. + 5. Name the output NetCDF variable as "FREEZING_PRECIP" (-name). + This is totally optional, but convenient. -A user can write a script with multiple calls to Gen-Vx-Mask to -apply complex masking logic and then pass the output mask file -to Grid-Stat in its configuration file. + A user can write a script with multiple calls to Gen-Vx-Mask to + apply complex masking logic and then pass the output mask file + to Grid-Stat in its configuration file. Q. How do I use neighborhood methods to compute fraction skill score? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -A common application of fraction skill score (FSS) is comparing forecast -and observed thunderstorms. When computing FSS, first threshold the fields -to define events and non-events. Then look at successively larger and -larger areas around each grid point to see how the forecast event frequency -compares to the observed event frequency. - -Applying this method to rainfall (and monsoons) is also reasonable. -Keep in mind that Grid-Stat is the tool that computes FSS. Grid-Stat will -need to be run once for each evaluation time. As an example, evaluating -once per day, run Grid-Stat 122 times for the 122 days of a monsoon season. -This will result in 122 FSS values. These can be viewed as a time series, -or the Stat-Analysis tool could be used to aggregate them together into -a single FSS value, like this: - -.. code-block:: none - - stat_analysis -job aggregate -line_type NBRCNT \ - -lookin out/grid_stat - -Be sure to pick thresholds (e.g. for the thunderstorms and monsoons) -that capture the "events" that are of interest in studying. - -Also be aware that MET uses the "vld_thresh" setting in the configuration -file to decide how to handle data along the edge of the domain. Let us say -it is computing a fractional coverage field using a 5x5 neighborhood -and it is at the edge of the domain. 15 points contain valid data and -10 points are outside the domain. Grid-Stat computes the valid data ratio -as 15/25 = 0.6. Then it applies the valid data threshold. Suppose -vld_thresh = 0.5. Since 0.6 > 0.5 MET will compute a fractional coverage -value for that point using the 15 valid data points. Next suppose -vld_thresh = 1.0. Since 0.6 is less than 1.0, MET will just skip that -point by setting it to bad data. - -Setting vld_thresh = 1.0 will ensure that FSS will only be computed at -points where all NxN values contain valid data. Setting it to 0.5 only -requires half of them. + .. dropdown:: Answer + + A common application of fraction skill score (FSS) is comparing forecast + and observed thunderstorms. When computing FSS, first threshold the fields + to define events and non-events. Then look at successively larger and + larger areas around each grid point to see how the forecast event frequency + compares to the observed event frequency. + + Applying this method to rainfall (and monsoons) is also reasonable. + Keep in mind that Grid-Stat is the tool that computes FSS. Grid-Stat will + need to be run once for each evaluation time. As an example, evaluating + once per day, run Grid-Stat 122 times for the 122 days of a monsoon season. + This will result in 122 FSS values. These can be viewed as a time series, + or the Stat-Analysis tool could be used to aggregate them together into + a single FSS value, like this: + + .. code-block:: none + + stat_analysis -job aggregate -line_type NBRCNT \ + -lookin out/grid_stat + + Be sure to pick thresholds (e.g. for the thunderstorms and monsoons) + that capture the "events" that are of interest in studying. + + Also be aware that MET uses the "vld_thresh" setting in the configuration + file to decide how to handle data along the edge of the domain. Let us say + it is computing a fractional coverage field using a 5x5 neighborhood + and it is at the edge of the domain. 15 points contain valid data and + 10 points are outside the domain. Grid-Stat computes the valid data ratio + as 15/25 = 0.6. Then it applies the valid data threshold. Suppose + vld_thresh = 0.5. Since 0.6 > 0.5 MET will compute a fractional coverage + value for that point using the 15 valid data points. Next suppose + vld_thresh = 1.0. Since 0.6 is less than 1.0, MET will just skip that + point by setting it to bad data. + + Setting vld_thresh = 1.0 will ensure that FSS will only be computed at + points where all NxN values contain valid data. Setting it to 0.5 only + requires half of them. Q. Is an example of verifying forecast probabilities? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -There is an example of verifying probabilities in the test scripts -included with the MET release. Take a look in: - -.. code-block:: none - - ${MET_BUILD_BASE}/scripts/config/GridStatConfig_POP_12 - -The config file should look something like this: - -.. code-block:: none - - fcst = { - wind_thresh = [ NA ]; - field = [ - { - name = "LCDC"; - level = [ "L0" ]; - prob = TRUE; - cat_thresh = [ >=0.0, >=0.1, >=0.2, >=0.3, >=0.4, >=0.5, >=0.6, >=0.7, >=0.8, >=0.9]; - } - ]; - }; + .. dropdown:: Answer - obs = { - wind_thresh = [ NA ]; - field = [ - { - name = "WIND"; - level = [ "Z2" ]; - cat_thresh = [ >=34 ]; - } - ]; - }; - -The PROB flag is set to TRUE to tell grid_stat to process this as -probability data. The cat_thresh is set to partition the probability -values between 0 and 1. Note that if the probability data contains -values from 0 to 100, MET automatically divides by 100 to rescale to -the 0 to 1 range. + There is an example of verifying probabilities in the test scripts + included with the MET release. Take a look in: + + .. code-block:: none + + ${MET_BUILD_BASE}/scripts/config/GridStatConfig_POP_12 + + The config file should look something like this: + + .. code-block:: none + + fcst = { + wind_thresh = [ NA ]; + field = [ + { + name = "LCDC"; + level = [ "L0" ]; + prob = TRUE; + cat_thresh = [ >=0.0, >=0.1, >=0.2, >=0.3, >=0.4, >=0.5, >=0.6, >=0.7, >=0.8, >=0.9]; + } + ]; + }; + + obs = { + wind_thresh = [ NA ]; + field = [ + { + name = "WIND"; + level = [ "Z2" ]; + cat_thresh = [ >=34 ]; + } + ]; + }; + + The PROB flag is set to TRUE to tell grid_stat to process this as + probability data. The cat_thresh is set to partition the probability + values between 0 and 1. Note that if the probability data contains + values from 0 to 100, MET automatically divides by 100 to rescale to + the 0 to 1 range. Q. What is an example of using Grid-Stat with regridding and masking turned on? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Run Grid-Stat using the following commands and the attached config file - -.. code-block:: none - - mkdir out - grid_stat \ - gfs_4_20160220_0000_012.grb2 \ - ST4.2016022012.06h \ - GridStatConfig \ - -outdir out - -Note the following two sections of the Grid-Stat config file: - -.. code-block:: none - - regrid = { - to_grid = OBS; - vld_thresh = 0.5; - method = BUDGET; - width = 2; - } - -This tells Grid-Stat to do verification on the "observation" grid. -Grid-Stat reads the GFS and Stage4 data and then automatically regrids -the GFS data to the Stage4 domain using budget interpolation. -Use "FCST" to verify the forecast domain. And use either a named -grid or a grid specification string to regrid both the forecast and -observation to a common grid. For example, to_grid = "G212"; will -regrid both to NCEP Grid 212 before comparing them. - -.. code-block:: none - - mask = { grid = [ "FULL" ]; - poly = [ "MET_BASE/poly/CONUS.poly" ]; } + .. dropdown:: Answer -This will compute statistics over the FULL model domain as well -as the CONUS masking area. + Run Grid-Stat using the following commands and the attached config file + + .. code-block:: none + + mkdir out + grid_stat \ + gfs_4_20160220_0000_012.grb2 \ + ST4.2016022012.06h \ + GridStatConfig \ + -outdir out + + Note the following two sections of the Grid-Stat config file: + + .. code-block:: none -To demonstrate that Grid-Stat worked as expected, run the following -commands to plot its NetCDF matched pairs output file: + regrid = { + to_grid = OBS; + vld_thresh = 0.5; + method = BUDGET; + width = 2; + } -.. code-block:: none - - plot_data_plane \ - out/grid_stat_120000L_20160220_120000V_pairs.nc \ - out/DIFF_APCP_06_A06_APCP_06_A06_CONUS.ps \ - 'name="DIFF_APCP_06_A06_APCP_06_A06_CONUS"; level="(*,*)";' + This tells Grid-Stat to do verification on the "observation" grid. + Grid-Stat reads the GFS and Stage4 data and then automatically regrids + the GFS data to the Stage4 domain using budget interpolation. + Use "FCST" to verify the forecast domain. And use either a named + grid or a grid specification string to regrid both the forecast and + observation to a common grid. For example, to_grid = "G212"; will + regrid both to NCEP Grid 212 before comparing them. -Examine the resulting plot of that difference field. + .. code-block:: none -Lastly, there is another option for defining that masking region. -Rather than passing the ascii CONUS.poly file to grid_stat, run the -gen_vx_mask tool and pass the NetCDF output of that tool to grid_stat. -The advantage to gen_vx_mask is that it will make grid_stat run a -bit faster. It can be used to construct much more complex masking areas. + mask = { grid = [ "FULL" ]; + poly = [ "MET_BASE/poly/CONUS.poly" ]; } + + This will compute statistics over the FULL model domain as well + as the CONUS masking area. + + To demonstrate that Grid-Stat worked as expected, run the following + commands to plot its NetCDF matched pairs output file: + + .. code-block:: none + + plot_data_plane \ + out/grid_stat_120000L_20160220_120000V_pairs.nc \ + out/DIFF_APCP_06_A06_APCP_06_A06_CONUS.ps \ + 'name="DIFF_APCP_06_A06_APCP_06_A06_CONUS"; level="(*,*)";' + + Examine the resulting plot of that difference field. + + Lastly, there is another option for defining that masking region. + Rather than passing the ascii CONUS.poly file to grid_stat, run the + gen_vx_mask tool and pass the NetCDF output of that tool to grid_stat. + The advantage to gen_vx_mask is that it will make grid_stat run a + bit faster. It can be used to construct much more complex masking areas. Q. How do I use one mask for the forecast field and a different mask for the observation field? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -You can't define different -masks for the forecast and observation fields in MET tools. MET only lets you -define a single mask (a masking grid or polyline) and then you choose -whether you want to apply it to the FCST, OBS, or BOTH of them. - -Nonetheless, there is a way you can accomplish this logic using the -gen_vx_mask tool. You run it once to pre-process the forecast field -and a second time to pre-process the observation field. And then pass -those output files to your desired MET tool. - -Below is an example using sample data that is included with the MET -release tarball. To illustrate, this command will read 3-hour -precip and 2-meter temperature, and resets the precip at any grid -point where the temperature is less than 290 K to a value of 0: - -.. code-block:: none - - gen_vx_mask \ - data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 \ - data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 \ - APCP_03_where_2m_TMPge290.nc \ - -type data \ - -input_field 'name="APCP"; level="A3";' \ - -mask_field 'name="TMP"; level="Z2";' \ - -thresh 'lt290&&ne-9999' -v 4 -value 0 + .. dropdown:: Answer -So this is a bit confusing. Here's what is happening: + You can't define different + masks for the forecast and observation fields in MET tools. + MET only lets you + define a single mask (a masking grid or polyline) and then you choose + whether you want to apply it to the FCST, OBS, or BOTH of them. -* The first argument is the input file which defines the grid. + Nonetheless, there is a way you can accomplish this logic using the + gen_vx_mask tool. You run it once to pre-process the forecast field + and a second time to pre-process the observation field. And then pass + those output files to your desired MET tool. -* The second argument is used to define the masking region and - since I'm reading data from the same input file, I've listed - that file twice. + Below is an example using sample data that is included with the MET + release tarball. To illustrate, this command will read 3-hour + precip and 2-meter temperature, and resets the precip at any grid + point where the temperature is less than 290 K to a value of 0: -* The third argument is the output file name. + .. code-block:: none -* The type of masking is "data" masking where we read a 2D field of - data and apply a threshold. + gen_vx_mask \ + data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 \ + data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 \ + APCP_03_where_2m_TMPge290.nc \ + -type data \ + -input_field 'name="APCP"; level="A3";' \ + -mask_field 'name="TMP"; level="Z2";' \ + -thresh 'lt290&&ne-9999' -v 4 -value 0 -* By default, gen_vx_mask initializes each grid point to a value - of 0. Specifying "-input_field" tells it to initialize each grid - point to the value of that field (in my example 3-hour precip). - -* The "-mask_field" option defines the data field that should be - thresholded. + So this is a bit confusing. Here's what is happening: -* The "-thresh" option defines the threshold to be applied. - -* The "-value" option tells it what "mask" value to write to the - output, and I've chosen 0. + * The first argument is the input file which defines the grid. -The example threshold is less than 290 and not -9999 (which is MET's -internal missing data value). So any grid point where the 2 meter -temperature is less than 290 K and is not bad data will be replaced -by a value of 0. + * The second argument is used to define the masking region and + since I'm reading data from the same input file, I've listed + that file twice. -To more easily demonstrate this, I changed to using "-value 10" and ran -the output through plot_data_plane: + * The third argument is the output file name. -.. code-block:: none - - plot_data_plane \ - APCP_03_where_2m_TMPge290.nc \ - APCP_03_where_2m_TMPge290.ps \ - 'name="data_mask"; level="(*,*)";' + * The type of masking is "data" masking where we read a 2D field of + data and apply a threshold. + + * By default, gen_vx_mask initializes each grid point to a value + of 0. Specifying "-input_field" tells it to initialize each grid + point to the value of that field (in my example 3-hour precip). + + * The "-mask_field" option defines the data field that should be + thresholded. + + * The "-thresh" option defines the threshold to be applied. + + * The "-value" option tells it what "mask" value to write to the + output, and I've chosen 0. + + The example threshold is less than 290 and not -9999 (which is MET's + internal missing data value). So any grid point where the 2 meter + temperature is less than 290 K and is not bad data will be replaced + by a value of 0. + + To more easily demonstrate this, I changed to using "-value 10" and ran + the output through plot_data_plane: + + .. code-block:: none + + plot_data_plane \ + APCP_03_where_2m_TMPge290.nc \ + APCP_03_where_2m_TMPge290.ps \ + 'name="data_mask"; level="(*,*)";' -In the resulting plot, anywhere you see the pink value of 10, that's -where gen_vx_mask has masked out the grid point. + In the resulting plot, anywhere you see the pink value of 10, that's + where gen_vx_mask has masked out the grid point. Pcp-Combine ----------- @@ -599,371 +622,384 @@ Pcp-Combine Q. How do I add and subtract with Pcp-Combine? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -An example of running the MET pcp_combine tool to put NAM 3-hourly -precipitation accumulations data into user-desired 3 hour intervals is -provided below. + .. dropdown:: Answer + + An example of running the MET pcp_combine tool to put NAM 3-hourly + precipitation accumulations data into user-desired 3 hour intervals is + provided below. -If the user wanted a 0-3 hour accumulation, this is already available -in the 03 UTC file. Run this file -through pcp_combine as a pass-through to put it into NetCDF format: + If the user wanted a 0-3 hour accumulation, this is already available + in the 03 UTC file. Run this file + through pcp_combine as a pass-through to put it into NetCDF format: -.. code-block:: none - - pcp_combine -add 03_file.grb 03 APCP_00_03.nc - -If the user wanted the 3-6 hour accumulation, they would subtract -0-6 and 0-3 accumulations: + .. code-block:: none -.. code-block:: none - - pcp_combine -subtract 06_file.grb 06 03_file.grb 03 APCP_03_06.nc + pcp_combine -add 03_file.grb 03 APCP_00_03.nc + + If the user wanted the 3-6 hour accumulation, they would subtract + 0-6 and 0-3 accumulations: -Similarly, if they wanted the 6-9 hour accumulation, they would -subtract 0-9 and 0-6 accumulations: + .. code-block:: none -.. code-block:: none + pcp_combine -subtract 06_file.grb 06 03_file.grb 03 APCP_03_06.nc - pcp_combine -subtract 09_file.grb 09 06_file.grb 06 APCP_06_09.nc + Similarly, if they wanted the 6-9 hour accumulation, they would + subtract 0-9 and 0-6 accumulations: -And so on. + .. code-block:: none -Run the 0-3 and 12-15 through pcp_combine even though they already have -the 3-hour accumulation. That way, all of the NAM files will be in the -same file format, and can use the same configuration file settings for -the other MET tools (grid_stat, mode, etc.). If the NAM files are a mix -of GRIB and NetCDF, the logic would need to be a bit more complicated. + pcp_combine -subtract 09_file.grb 09 06_file.grb 06 APCP_06_09.nc + + And so on. + + Run the 0-3 and 12-15 through pcp_combine even though they already have + the 3-hour accumulation. That way, all of the NAM files will be in the + same file format, and can use the same configuration file settings for + the other MET tools (grid_stat, mode, etc.). If the NAM files are a mix + of GRIB and NetCDF, the logic would need to be a bit more complicated. Q. How do I combine 12-hour accumulated precipitation from two different initialization times? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -The "-sum" command assumes the same initialization time. Use the "-add" -option instead. + .. dropdown:: Answer + + The "-sum" command assumes the same initialization time. Use the "-add" + option instead. -.. code-block:: none + .. code-block:: none - pcp_combine -add \ - WRFPRS_1997-06-03_APCP_A12.nc 'name="APCP_12"; level="(*,*)";' \ - WRFPRS_d01_1997-06-04_00_APCP_A12.grb 12 \ - Sum.nc + pcp_combine -add \ + WRFPRS_1997-06-03_APCP_A12.nc 'name="APCP_12"; level="(*,*)";' \ + WRFPRS_d01_1997-06-04_00_APCP_A12.grb 12 \ + Sum.nc -For the first file, list the file name followed by a config string -describing the field to use from the NetCDF file. For the second file, -list the file name followed by the accumulation interval to use -(12 for 12 hours). The output file, Sum.nc, will contain the -combine 12-hour accumulated precipitation. + For the first file, list the file name followed by a config string + describing the field to use from the NetCDF file. For the second file, + list the file name followed by the accumulation interval to use + (12 for 12 hours). The output file, Sum.nc, will contain the + combine 12-hour accumulated precipitation. -Here is a small excerpt from the pcp_combine usage statement: + Here is a small excerpt from the pcp_combine usage statement: -Note: For “-add” and "-subtract”, the accumulation intervals may be -substituted with config file strings. For that first file, we replaced -the accumulation interval with a config file string. + Note: For “-add” and "-subtract”, the accumulation intervals may be + substituted with config file strings. For that first file, we replaced + the accumulation interval with a config file string. -Here are 3 commands you could use to plot these data files: + Here are 3 commands you could use to plot these data files: -.. code-block:: none + .. code-block:: none - plot_data_plane WRFPRS_1997-06-03_APCP_A12.nc \ - WRFPRS_1997-06-03_APCP_A12.ps 'name="APCP_12"; level="(*,*)";' + plot_data_plane WRFPRS_1997-06-03_APCP_A12.nc \ + WRFPRS_1997-06-03_APCP_A12.ps 'name="APCP_12"; level="(*,*)";' -.. code-block:: none + .. code-block:: none - plot_data_plane WRFPRS_d01_1997-06-04_00_APCP_A12.grb \ - WRFPRS_d01_1997-06-04_00_APCP_A12.ps 'name="APCP" level="A12";' + plot_data_plane WRFPRS_d01_1997-06-04_00_APCP_A12.grb \ + WRFPRS_d01_1997-06-04_00_APCP_A12.ps 'name="APCP" level="A12";' -.. code-block:: none + .. code-block:: none - plot_data_plane sum.nc sum.ps 'name="APCP_24"; level="(*,*)";' + plot_data_plane sum.nc sum.ps 'name="APCP_24"; level="(*,*)";' Q. How do I correct a precipitation time range? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Typically, accumulated precipitation is stored in GRIB files using an -accumulation interval with a "time range" indicator value of 4. Here is -a description of the different time range indicator values and -meanings: http://www.nco.ncep.noaa.gov/pmb/docs/on388/table5.html + .. dropdown:: Answer + + Typically, accumulated precipitation is stored in GRIB files using an + accumulation interval with a "time range" indicator value of 4. Here is + a description of the different time range indicator values and + meanings: http://www.nco.ncep.noaa.gov/pmb/docs/on388/table5.html -For example, take a look at the APCP in the GRIB files included in the -MET tar ball: + For example, take a look at the APCP in the GRIB files included in the + MET tar ball: -.. code-block:: none + .. code-block:: none - wgrib ${MET_BUILD_BASE}/data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 | grep APCP - 1:0:d=05080700:APCP:kpds5=61:kpds6=1:kpds7=0:TR=4:P1=0: \ - P2=12:TimeU=1:sfc:0- 12hr acc:NAve=0 - 2:31408:d=05080700:APCP:kpds5=61:kpds6=1:kpds7=0:TR=4: \ - P1=9:P2=12:TimeU=1:sfc:9- 12hr acc:NAve=0 + wgrib ${MET_BUILD_BASE}/data/sample_fcst/2005080700/wrfprs_ruc13_12.tm00_G212 | grep APCP + 1:0:d=05080700:APCP:kpds5=61:kpds6=1:kpds7=0:TR=4:P1=0: \ + P2=12:TimeU=1:sfc:0- 12hr acc:NAve=0 + 2:31408:d=05080700:APCP:kpds5=61:kpds6=1:kpds7=0:TR=4: \ + P1=9:P2=12:TimeU=1:sfc:9- 12hr acc:NAve=0 -The "TR=4" indicates that these records contain an accumulation -between times P1 and P2. In the first record, the precip is accumulated -between 0 and 12 hours. In the second record, the precip is accumulated -between 9 and 12 hours. + The "TR=4" indicates that these records contain an accumulation + between times P1 and P2. In the first record, the precip is accumulated + between 0 and 12 hours. In the second record, the precip is accumulated + between 9 and 12 hours. -However, the GRIB data uses a time range indicator of 5, not 4. + However, the GRIB data uses a time range indicator of 5, not 4. -.. code-block:: none + .. code-block:: none - wgrib rmf_gra_2016040600.24 | grep APCP - 291:28360360:d=16040600:APCP:kpds5=61:kpds6=1:kpds7=0: \ - TR=5:P1=0:P2=24:TimeU=1:sfc:0-24hr diff:NAve=0 + wgrib rmf_gra_2016040600.24 | grep APCP + 291:28360360:d=16040600:APCP:kpds5=61:kpds6=1:kpds7=0: \ + TR=5:P1=0:P2=24:TimeU=1:sfc:0-24hr diff:NAve=0 -pcp_combine is looking in "rmf_gra_2016040600.24" for a 24 hour -*accumulation*, but since the time range indicator is no 4, it doesn't -find a match. + pcp_combine is looking in "rmf_gra_2016040600.24" for a 24 hour + *accumulation*, but since the time range indicator is no 4, it doesn't + find a match. -If possible switch the time range indicator to 4 on the GRIB files. If -this is not possible, there is another workaround. Instead of telling -pcp_combine to look for a particular accumulation interval, give it a -more complete description of the chosen field to use from each file. -Here is an example: + If possible switch the time range indicator to 4 on the GRIB files. If + this is not possible, there is another workaround. Instead of telling + pcp_combine to look for a particular accumulation interval, give it a + more complete description of the chosen field to use from each file. + Here is an example: -.. code-block:: none + .. code-block:: none - pcp_combine -add rmf_gra_2016040600.24 'name="APCP"; level="L0-24";' \ - rmf_gra_2016040600_APCP_00_24.nc - -The resulting file should have the accumulation listed at 24h rather than 0-24. + pcp_combine -add rmf_gra_2016040600.24 'name="APCP"; level="L0-24";' \ + rmf_gra_2016040600_APCP_00_24.nc + + The resulting file should have the accumulation listed at + 24h rather than 0-24. Q. How do I use Pcp-Combine as a pass-through to simply reformat from GRIB to NetCDF or to change output variable name? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -The pcp_combine tool is typically used to modify the accumulation interval -of precipitation amounts in model and/or analysis datasets. For example, -when verifying model output in GRIB format containing runtime accumulations -of precipitation, run the pcp_combine -subtract option every 6 hours to -create 6-hourly precipitation amounts. In this example, it is not really -necessary to run pcp_combine on the 6-hour GRIB forecast file since the -model output already contains the 0 to 6 hour accumulation. However, the -output of pcp_combine is typically passed to point_stat, grid_stat, or mode -for verification. Having the 6-hour forecast in GRIB format and all other -forecast hours in NetCDF format (output of pcp_combine) makes the logic -for configuring the other MET tools messy. To make the configuration -consistent for all forecast hours, one option is to choose to run -pcp_combine as a pass-through to simply reformat from GRIB to NetCDF. -Listed below is an example of passing a single record to the -pcp_combine -add option to do the reformatting: - -.. code-block:: none - - $MET_BUILD/bin/pcp_combine -add forecast_F06.grb \ - 'name="APCP"; level="A6";' \ - forecast_APCP_06_F06.nc -name APCP_06 - -Reformatting from GRIB to NetCDF may be done for any other reason the -user may have. For example, the -name option can be used to define the -NetCDF output variable name. Presuming this file is then passed to -another MET tool, the new variable name (CompositeReflectivity) will -appear in the output of downstream tools: - -.. code-block:: none - - $MET_BUILD/bin/pcp_combine -add forecast.grb \ - 'name="REFC"; level="L0"; GRIB1_ptv=129; lead_time="120000";' \ - forecast.nc -name CompositeReflectivity + .. dropdown:: Answer + + The pcp_combine tool is typically used to modify the accumulation interval + of precipitation amounts in model and/or analysis datasets. For example, + when verifying model output in GRIB format containing runtime accumulations + of precipitation, run the pcp_combine -subtract option every 6 hours to + create 6-hourly precipitation amounts. In this example, it is not really + necessary to run pcp_combine on the 6-hour GRIB forecast file since the + model output already contains the 0 to 6 hour accumulation. However, the + output of pcp_combine is typically passed to point_stat, grid_stat, or mode + for verification. Having the 6-hour forecast in GRIB format and all other + forecast hours in NetCDF format (output of pcp_combine) makes the logic + for configuring the other MET tools messy. To make the configuration + consistent for all forecast hours, one option is to choose to run + pcp_combine as a pass-through to simply reformat from GRIB to NetCDF. + Listed below is an example of passing a single record to the + pcp_combine -add option to do the reformatting: + + .. code-block:: none + + $MET_BUILD/bin/pcp_combine -add forecast_F06.grb \ + 'name="APCP"; level="A6";' \ + forecast_APCP_06_F06.nc -name APCP_06 + + Reformatting from GRIB to NetCDF may be done for any other reason the + user may have. For example, the -name option can be used to define the + NetCDF output variable name. Presuming this file is then passed to + another MET tool, the new variable name (CompositeReflectivity) will + appear in the output of downstream tools: + + .. code-block:: none + + $MET_BUILD/bin/pcp_combine -add forecast.grb \ + 'name="REFC"; level="L0"; GRIB1_ptv=129; lead_time="120000";' \ + forecast.nc -name CompositeReflectivity Q. How do I use “-pcprx" to run a project faster? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -To run a project faster, the “-pcprx” option may be used to narrow the -search down to whatever regular expression you provide. Here are a two -examples: + .. dropdown:: Answer -.. code-block:: none - - # Only using Stage IV data (ST4) - pcp_combine -sum 00000000_000000 06 \ - 20161015_18 12 ST4.2016101518.APCP_12_SUM.nc -pcprx "ST4.*.06h" + To run a project faster, the “-pcprx” option may be used to narrow the + search down to whatever regular expression you provide. Here are a two + examples: + + .. code-block:: none - # Specify that files starting with pgbq[number][number]be used: - pcp_combine \ - -sum 20160221_18 06 20160222_18 24 \ - gfs_APCP_24_20160221_18_F00_F24.nc \ - -pcpdir /scratch4/BMC/shout/ptmp/Andrew.Kren/pre2016c3_corr/temp \ - -pcprx 'pgbq[0-9][0-9].gfs.2016022118' -v 3 + # Only using Stage IV data (ST4) + pcp_combine -sum 00000000_000000 06 \ + 20161015_18 12 ST4.2016101518.APCP_12_SUM.nc -pcprx "ST4.*.06h" + + # Specify that files starting with pgbq[number][number]be used: + pcp_combine \ + -sum 20160221_18 06 20160222_18 24 \ + gfs_APCP_24_20160221_18_F00_F24.nc \ + -pcpdir /scratch4/BMC/shout/ptmp/Andrew.Kren/pre2016c3_corr/temp \ + -pcprx 'pgbq[0-9][0-9].gfs.2016022118' -v 3 Q. How do I enter the time format correctly? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Here is an **incorrect example** of running pcp_combine with sub-hourly -accumulation intervals: + .. dropdown:: Answer + + Here is an **incorrect example** of running pcp_combine with sub-hourly + accumulation intervals: -.. code-block:: none + .. code-block:: none - # incorrect example: - pcp_combine -subtract forecast.grb 0055 \ - forecast2.grb 0005 forecast.nc -field APCP + # incorrect example: + pcp_combine -subtract forecast.grb 0055 \ + forecast2.grb 0005 forecast.nc -field APCP -The time signature is entered incorrectly. Let’s assume that "0055" -meant 0 hours and 55 minutes and "0005" meant 0 hours and 5 minutes. + The time signature is entered incorrectly. Let’s assume that "0055" + meant 0 hours and 55 minutes and "0005" meant 0 hours and 5 minutes. -Looking at the usage statement for pcp_combine (just type pcp_combine with -no arguments): "accum1" indicates the accumulation interval to be used -from in_file1 in HH[MMSS] format (required). + Looking at the usage statement for pcp_combine (just type pcp_combine with + no arguments): "accum1" indicates the accumulation interval to be used + from in_file1 in HH[MMSS] format (required). -The time format listed "HH[MMSS]" means specifying hours or -hours/minutes/seconds. The incorrect example is using hours/minutes. + The time format listed "HH[MMSS]" means specifying hours or + hours/minutes/seconds. The incorrect example is using hours/minutes. -Below is the **correct example**. Add the seconds to the end of the -time strings, like this: + Below is the **correct example**. Add the seconds to the end of the + time strings, like this: -.. code-block:: none + .. code-block:: none - # correct example: - pcp_combine -subtract forecast.grb 005500 \ - forecast2.grb 000500 forecast.nc -field APCP + # correct example: + pcp_combine -subtract forecast.grb 005500 \ + forecast2.grb 000500 forecast.nc -field APCP Q. How do I use Pcp-Combine when my GRIB data doesn't have the appropriate accumulation interval time range indicator? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Run wgrib on the data files and the output is listed below: + .. dropdown:: Answer + + Run wgrib on the data files and the output is listed below: -.. code-block:: none + .. code-block:: none - 279:503477484:d=15062313:APCP:kpds5=61:kpds6=1:kpds7=0:TR= 10:P1=3:P2=247:TimeU=0:sfc:1015min \ - fcst:NAve=0 \ - 279:507900854:d=15062313:APCP:kpds5=61:kpds6=1:kpds7=0:TR= 10:P1=3:P2=197:TimeU=0:sfc:965min \ - fcst:NAve=0 + 279:503477484:d=15062313:APCP:kpds5=61:kpds6=1:kpds7=0:TR= 10:P1=3:P2=247:TimeU=0:sfc:1015min \ + fcst:NAve=0 \ + 279:507900854:d=15062313:APCP:kpds5=61:kpds6=1:kpds7=0:TR= 10:P1=3:P2=197:TimeU=0:sfc:965min \ + fcst:NAve=0 -Notice the output which says "TR=10". TR means time range indicator and -a value of 10 means that the level information contains an instantaneous -forecast time, not an accumulation interval. + Notice the output which says "TR=10". TR means time range indicator and + a value of 10 means that the level information contains an instantaneous + forecast time, not an accumulation interval. -Here's a table describing the TR values: -http://www.nco.ncep.noaa.gov/pmb/docs/on388/table5.html + Here's a table describing the TR values: + http://www.nco.ncep.noaa.gov/pmb/docs/on388/table5.html -The default logic for pcp_combine is to look for GRIB code 61 (i.e. APCP) -defined with an accumulation interval (TR = 4). Since the data doesn't -meet that criteria, the default logic of pcp_combine won't work. The -arguments need to be more specific to tell pcp_combine exactly what to do. + The default logic for pcp_combine is to look for GRIB code 61 (i.e. APCP) + defined with an accumulation interval (TR = 4). Since the data doesn't + meet that criteria, the default logic of pcp_combine won't work. The + arguments need to be more specific to tell pcp_combine exactly what to do. -Try the command: + Try the command: -.. code-block:: none + .. code-block:: none - pcp_combine -subtract \ - forecast.grb 'name="APCP"; level="L0"; lead_time="165500";' \ - forecast2.grb 'name="APCP"; level="L0"; lead_time="160500";' \ - forecast.nc -name APCP_A005000 + pcp_combine -subtract \ + forecast.grb 'name="APCP"; level="L0"; lead_time="165500";' \ + forecast2.grb 'name="APCP"; level="L0"; lead_time="160500";' \ + forecast.nc -name APCP_A005000 -Some things to point out here: + Some things to point out here: -1. Notice in the wgrib output that the forecast times are 1015 min and - 965 min. In HHMMSS format, that's "165500" and "160500". + 1. Notice in the wgrib output that the forecast times are 1015 min and + 965 min. In HHMMSS format, that's "165500" and "160500". -2. An accumulation interval can’t be specified since the data isn't stored - that way. Instead, use a config file string to describe the data to use. + 2. An accumulation interval can’t be specified since the data + isn't stored that way. Instead, use a config file string to + describe the data to use. -3. The config file string specifies a "name" (APCP) and "level" string. APCP - is defined at the surface, so a level value of 0 (L0) was specified. + 3. The config file string specifies a "name" (APCP) and "level" string. + APCP + is defined at the surface, so a level value of 0 (L0) was specified. -4. Technically, the "lead_time" doesn’t need to be specified at all, - pcp_combine - would find the single APCP record in each input GRIB file and use them. - But just in case, the lead_time option was included to be extra certain to - get exactly the data that is needed. + 4. Technically, the "lead_time" doesn’t need to be specified at all, + pcp_combine + would find the single APCP record in each input GRIB file and use them. + But just in case, the lead_time option was included to be extra + certain to get exactly the data that is needed. -5. The default output variable name pcp_combine would write would be - "APCP_L0". However, to indicate that its a 50-minute - "accumulation interval" use a - different output variable name (APCP_A005000). Any string name is - possible. Maybe "Precip50Minutes" or "RAIN50". But whatever string is - chosen will be used in the Grid-Stat, Point-Stat, or MODE config file to - tell that tool what variable to process. + 5. The default output variable name pcp_combine would write would be + "APCP_L0". However, to indicate that its a 50-minute + "accumulation interval" use a + different output variable name (APCP_A005000). Any string name is + possible. Maybe "Precip50Minutes" or "RAIN50". But whatever string is + chosen will be used in the Grid-Stat, Point-Stat, or MODE config file + to tell that tool what variable to process. Q. How do I use “-sum”, “-add”, and “-subtract“ to achieve the same accumulation interval? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Here is an example of using pcp_combine to put GFS into 24- hour intervals -for comparison against 24-hourly StageIV precipitation with GFS data -through the pcp_combine tool. Be aware that the 24-hour StageIV data is -defined as an accumulation from 12Z on one day to 12Z on the next day: -http://www.emc.ncep.noaa.gov/mmb/ylin/pcpanl/stage4/ - -Therefore, only the 24-hour StageIV data can be used to evaluate 12Z to -12Z accumulations from the model. Alternatively, the 6- hour StageIV -accumulations could be used to evaluate any 24 hour accumulation from -the model. For the latter, run the 6-hour StageIV files through pcp_combine -to generate the desired 24-hour accumulation. - -Here is an example. Run pcp_combine to compute 24-hour accumulations for -GFS. In this example, process the 20150220 00Z initialization of GFS. - -.. code-block:: none - - pcp_combine \ - -sum 20150220_00 06 20150221_00 24 \ - gfs_APCP_24_20150220_00_F00_F24.nc \ - -pcprx "gfs_4_20150220_00.*grb2" \ - -pcpdir /d1/model_data/20150220 + .. dropdown:: Answer -pcp_combine is looking in the */d1/SBU/GFS/model_data/20150220* directory -at files which match this regular expression "gfs_4_20150220_00.*grb2". -That directory contains data for 00, 06, 12, and 18 hour initializations, -but the "-pcprx" option narrows the search down to the 00 hour -initialization which makes it run faster. It inspects all the matching -files, looking for 6-hour APCP data to sum up to a 24-hour accumulation -valid at 20150221_00. This results in a 24-hour accumulation between -forecast hours 0 and 24. - -The following command will compute the 24-hour accumulation between forecast -hours 12 and 36: - -.. code-block:: none - - pcp_combine \ - -sum 20150220_00 06 20150221_12 24 \ - gfs_APCP_24_20150220_00_F12_F36.nc \ - -pcprx "gfs_4_20150220_00.*grb2" \ - -pcpdir /d1/model_data/20150220 - -The "-sum" command is meant to make things easier by searching the -directory. But instead of using "-sum", another option would be the -"- add" command. Explicitly list the 4 files that need to be extracted -from the 6-hour APCP and add them up to 24. In the directory structure, -the previous "-sum" job could be rewritten with "-add" like this: - -.. code-block:: none - - pcp_combine -add \ - /d1/model_data/20150220/gfs_4_20150220_0000_018.grb2 06 \ - /d1/model_data/20150220/gfs_4_20150220_0000_024.grb2 06 \ - /d1/model_data/20150220/gfs_4_20150220_0000_030.grb2 06 \ - /d1/model_data/20150220/gfs_4_20150220_0000_036.grb2 06 \ - gfs_APCP_24_20150220_00_F12_F36_add_option.nc - -This example explicitly tells pcp_combine which files to read and -what accumulation interval (6 hours) to extract from them. The resulting -output should be identical to the output of the "-sum" command. + Here is an example of using pcp_combine to put GFS into 24- hour intervals + for comparison against 24-hourly StageIV precipitation with GFS data + through the pcp_combine tool. Be aware that the 24-hour StageIV data is + defined as an accumulation from 12Z on one day to 12Z on the next day: + http://www.emc.ncep.noaa.gov/mmb/ylin/pcpanl/stage4/ + + Therefore, only the 24-hour StageIV data can be used to evaluate 12Z to + 12Z accumulations from the model. Alternatively, the 6- hour StageIV + accumulations could be used to evaluate any 24 hour accumulation from + the model. For the latter, run the 6-hour StageIV files through + pcp_combine to generate the desired 24-hour accumulation. + + Here is an example. Run pcp_combine to compute 24-hour accumulations for + GFS. In this example, process the 20150220 00Z initialization of GFS. + + .. code-block:: none + + pcp_combine \ + -sum 20150220_00 06 20150221_00 24 \ + gfs_APCP_24_20150220_00_F00_F24.nc \ + -pcprx "gfs_4_20150220_00.*grb2" \ + -pcpdir /d1/model_data/20150220 + + pcp_combine is looking in the */d1/SBU/GFS/model_data/20150220* directory + at files which match this regular expression "gfs_4_20150220_00.*grb2". + That directory contains data for 00, 06, 12, and 18 hour initializations, + but the "-pcprx" option narrows the search down to the 00 hour + initialization which makes it run faster. It inspects all the matching + files, looking for 6-hour APCP data to sum up to a 24-hour accumulation + valid at 20150221_00. This results in a 24-hour accumulation between + forecast hours 0 and 24. + + The following command will compute the 24-hour accumulation between + forecast hours 12 and 36: + + .. code-block:: none + + pcp_combine \ + -sum 20150220_00 06 20150221_12 24 \ + gfs_APCP_24_20150220_00_F12_F36.nc \ + -pcprx "gfs_4_20150220_00.*grb2" \ + -pcpdir /d1/model_data/20150220 + + The "-sum" command is meant to make things easier by searching the + directory. But instead of using "-sum", another option would be the + "- add" command. Explicitly list the 4 files that need to be extracted + from the 6-hour APCP and add them up to 24. In the directory structure, + the previous "-sum" job could be rewritten with "-add" like this: + + .. code-block:: none + + pcp_combine -add \ + /d1/model_data/20150220/gfs_4_20150220_0000_018.grb2 06 \ + /d1/model_data/20150220/gfs_4_20150220_0000_024.grb2 06 \ + /d1/model_data/20150220/gfs_4_20150220_0000_030.grb2 06 \ + /d1/model_data/20150220/gfs_4_20150220_0000_036.grb2 06 \ + gfs_APCP_24_20150220_00_F12_F36_add_option.nc + + This example explicitly tells pcp_combine which files to read and + what accumulation interval (6 hours) to extract from them. The resulting + output should be identical to the output of the "-sum" command. Q. What is the difference between “-sum” vs. “-add”? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -The -sum and -add options both do the same thing. It's just that -'-sum' could find files more quickly with the use of the -pcprx flag. -This could also be accomplished by using a calling script. + .. dropdown:: Answer + + The -sum and -add options both do the same thing. It's just that + '-sum' could find files more quickly with the use of the -pcprx flag. + This could also be accomplished by using a calling script. Q. How do I select a specific GRIB record? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -In this example, record 735 needs to be selected. - -.. code-block:: none + .. dropdown:: Answer - pcp_combine -add 20160101_i12_f015_HRRR_wrfnat.grb2 \ - 'name="APCP"; level="R735";' \ - -name "APCP_01" HRRR_wrfnat.20160101_i12_f015.nc + In this example, record 735 needs to be selected. + + .. code-block:: none -Instead of having the level as "L0", tell it to use "R735" to select -grib record 735. + pcp_combine -add 20160101_i12_f015_HRRR_wrfnat.grb2 \ + 'name="APCP"; level="R735";' \ + -name "APCP_01" HRRR_wrfnat.20160101_i12_f015.nc + + Instead of having the level as "L0", tell it to use "R735" to select + grib record 735. Plot-Data-Plane --------------- @@ -971,112 +1007,117 @@ Plot-Data-Plane Q. How do I inspect Gen-Vx-Mask output? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Check to see if the call to Gen-Vx-Mask actually did create good output -with Plot-Data-Plane. The following commands assume that the MET executables -are found in your path. - -.. code-block:: none + .. dropdown:: Answer - plot_data_plane \ - out/gen_vx_mask/CONUS_poly.nc \ - out/gen_vx_mask/CONUS_poly.ps \ - 'name="CONUS"; level="(*,*)";' + Check to see if the call to Gen-Vx-Mask actually did create good output + with Plot-Data-Plane. The following commands assume that the MET + executables are found in your path. -View that postscript output file, using something like "gv" -for ghostview: + .. code-block:: none -.. code-block:: none - - gv out/gen_vx_mask/CONUS_poly.ps + plot_data_plane \ + out/gen_vx_mask/CONUS_poly.nc \ + out/gen_vx_mask/CONUS_poly.ps \ + 'name="CONUS"; level="(*,*)";' + + View that postscript output file, using something like "gv" + for ghostview: + + .. code-block:: none + + gv out/gen_vx_mask/CONUS_poly.ps -Please review a map of 0's and 1's over the USA to determine if the output -file is what the user expects. It always a good idea to start with -plot_data_plane when working with data to make sure MET -is plotting the data correctly and in the expected location. + Please review a map of 0's and 1's over the USA to determine if the output + file is what the user expects. It always a good idea to start with + plot_data_plane when working with data to make sure MET + is plotting the data correctly and in the expected location. Q. How do I specify the GRIB version? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -When MET reads Gridded data files, it must determine the type of -file it's reading. The first thing it checks is the suffix of the file. -The following are all interpreted as GRIB1: .grib, .grb, and .gb. -While these mean GRIB2: .grib2, .grb2, and .gb2. + .. dropdown:: Answer + + When MET reads Gridded data files, it must determine the type of + file it's reading. The first thing it checks is the suffix of the file. + The following are all interpreted as GRIB1: .grib, .grb, and .gb. + While these mean GRIB2: .grib2, .grb2, and .gb2. -There are 2 choices to control how MET interprets a grib file. Renaming -the files to use a particular suffix, or keep them -named and explicitly tell MET to interpret them as GRIB1 or GRIB2 using -the "file_type" configuration option. + There are 2 choices to control how MET interprets a grib file. Renaming + the files to use a particular suffix, or keep them + named and explicitly tell MET to interpret them as GRIB1 or GRIB2 using + the "file_type" configuration option. -The examples below use the plot_data_plane tool to plot the data. Set + The examples below use the plot_data_plane tool to plot the data. Set -.. code-block:: none - - "file_type = GRIB2;" + .. code-block:: none -To keep the files named this as they are, add "file_type = GRIB2;" to all the -MET configuration files (i.e. Grid-Stat, MODE, and so on) that you use: + "file_type = GRIB2;" -.. code-block:: none - - plot_data_plane \ - test_2.5_prog.grib \ - test_2.5_prog.ps \ - 'name="TSTM"; level="A0"; file_type=GRIB2;' \ - -plot_range 0 100 + To keep the files named this as they are, add "file_type = GRIB2;" + to all the MET configuration files (i.e. Grid-Stat, MODE, and so on) + that you use: + + .. code-block:: none + plot_data_plane \ + test_2.5_prog.grib \ + test_2.5_prog.ps \ + 'name="TSTM"; level="A0"; file_type=GRIB2;' \ + -plot_range 0 100 Q. How do I test the variable naming convention? (Record number example.) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Make sure MET can read GRIB2 data. Plot the data from that GRIB2 file -by running: + .. dropdown:: Answer + + Make sure MET can read GRIB2 data. Plot the data from that GRIB2 file + by running: -.. code-block:: none + .. code-block:: none - plot_data_plane LTIA98_KWBR_201305180600.grb2 tmp_z2.ps 'name="TMP"; level="R2"; + plot_data_plane LTIA98_KWBR_201305180600.grb2 tmp_z2.ps 'name="TMP"; level="R2"; -"R2" tells MET to plot record number 2. Record numbers 1 and 2 both -contain temperature data and 2-meters. Here's some wgrib2 output: + "R2" tells MET to plot record number 2. Record numbers 1 and 2 both + contain temperature data and 2-meters. Here's some wgrib2 output: -.. code-block:: none + .. code-block:: none - 1:0:d=2013051806:TMP:2 m above ground:anl:analysis/forecast error 2:3323062:d=2013051806:TMP:2 m above ground:anl: + 1:0:d=2013051806:TMP:2 m above ground:anl:analysis/forecast error 2:3323062:d=2013051806:TMP:2 m above ground:anl: -The GRIB id info has been the same between records 1 and 2. + The GRIB id info has been the same between records 1 and 2. Q. How do I compute and verify wind speed? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Here's how to compute and verify wind speed using MET. Good news, MET -already includes logic for deriving wind speed on the fly. The GRIB -abbreviation for wind speed is WIND. To request WIND from a GRIB1 or -GRIB2 file, MET first checks to see if it already exists in the current -file. If so, it'll use it as is. If not, it'll search for the corresponding -U and V records and derive wind speed to use on the fly. - -In this example the RTMA file is named rtma.grb2 and the UPP file is -named wrf.grb, please try running the following commands to plot wind speed: - -.. code-block:: none - - plot_data_plane wrf.grb wrf_wind.ps \ - 'name"WIND"; level="Z10";' -v 3 - plot_data_plane rtma.grb2 rtma_wind.ps \ - 'name"WIND"; level="Z10";' -v 3 + .. dropdown:: Answer -In the first call, the log message should be similar to this: + Here's how to compute and verify wind speed using MET. Good news, MET + already includes logic for deriving wind speed on the fly. The GRIB + abbreviation for wind speed is WIND. To request WIND from a GRIB1 or + GRIB2 file, MET first checks to see if it already exists in the current + file. If so, it'll use it as is. If not, it'll search for the corresponding + U and V records and derive wind speed to use on the fly. + + In this example the RTMA file is named rtma.grb2 and the UPP file is + named wrf.grb, please try running the following commands to + plot wind speed: + + .. code-block:: none + + plot_data_plane wrf.grb wrf_wind.ps \ + 'name"WIND"; level="Z10";' -v 3 + plot_data_plane rtma.grb2 rtma_wind.ps \ + 'name"WIND"; level="Z10";' -v 3 -.. code-block:: none + In the first call, the log message should be similar to this: - DEBUG 3: MetGrib1DataFile::data_plane_array() -> - Attempt to derive winds from U and V components. + .. code-block:: none -In the second one, this won't appear since wind speed already exists -in the RTMA file. + DEBUG 3: MetGrib1DataFile::data_plane_array() -> + Attempt to derive winds from U and V components. + + In the second one, this won't appear since wind speed already exists + in the RTMA file. Stat-Analysis ------------- @@ -1084,184 +1125,196 @@ Stat-Analysis Q. How does '-aggregate_stat' work? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -In Stat-Analysis, there is a "-vx_mask" job filtering option. That option -reads the VX_MASK column from the input STAT lines and applies string -matching with the values in that column. Presumably, all of the MPR lines -will have the value of "FULL" in the VX_MASK column. - -Stat-Analysis has the ability to read MPR lines and recompute statistics -from them using the same library code that the other MET tools use. The -job command options which begin with "-out" are used to specify settings -to be applied to the output of that process. For example, the "-fcst_thresh" -option filters strings from the input "FCST_THRESH" header column. The -"-out_fcst_thresh" option defines the threshold to be applied to the output -of Stat-Analysis. So reading MPR lines and applying a threshold to define -contingency table statistics (CTS) would be done using the -"-out_fcst_thresh" option. - -Stat-Analysis does have the ability to filter MPR lat/lon locations -using the "-mask_poly" option for a lat/lon polyline and the "-mask_grid" -option to define a retention grid. - -However, there is currently no "-mask_sid" option. - -With met-5.2 and later versions, one option is to apply column string -matching using the "-column_str" option to define the list of station -ID's you would like to aggregate. That job would look something like this: - -.. code-block:: none + .. dropdown:: Answer - stat_analysis -lookin path/to/mpr/directory \ - -job aggregate_stat -line_type MPR -out_line_type CNT \ - -column_str OBS_SID SID1,SID2,SID3,...,SIDN \ - -set_hdr VX_MASK SID_GROUP_NAME \ - -out_stat mpr_to_cnt.stat - -Where SID1...SIDN is a comma-separated list of the station id's in the -group. Notice that a value for the output VX_MASK column using the -"-set_hdr" option has been specified. Otherwise, this would show a list -of the unique values found in that column. Presumably, all the input -VX_MASK columns say "FULL" so that's what the output would say. Use -"-set_hdr" to explicitly set the output value. + In Stat-Analysis, there is a "-vx_mask" job filtering option. That option + reads the VX_MASK column from the input STAT lines and applies string + matching with the values in that column. Presumably, all of the MPR lines + will have the value of "FULL" in the VX_MASK column. + + Stat-Analysis has the ability to read MPR lines and recompute statistics + from them using the same library code that the other MET tools use. The + job command options which begin with "-out" are used to specify settings + to be applied to the output of that process. For example, + the "-fcst_thresh" + option filters strings from the input "FCST_THRESH" header column. The + "-out_fcst_thresh" option defines the threshold to be applied to the output + of Stat-Analysis. So reading MPR lines and applying a threshold to define + contingency table statistics (CTS) would be done using the + "-out_fcst_thresh" option. + + Stat-Analysis does have the ability to filter MPR lat/lon locations + using the "-mask_poly" option for a lat/lon polyline and the "-mask_grid" + option to define a retention grid. + + However, there is currently no "-mask_sid" option. + + With MET-5.2 and later versions, one option is to apply column string + matching using the "-column_str" option to define the list of station + ID's you would like to aggregate. That job would look something like this: + + .. code-block:: none + + stat_analysis -lookin path/to/mpr/directory \ + -job aggregate_stat -line_type MPR -out_line_type CNT \ + -column_str OBS_SID SID1,SID2,SID3,...,SIDN \ + -set_hdr VX_MASK SID_GROUP_NAME \ + -out_stat mpr_to_cnt.stat + + Where SID1...SIDN is a comma-separated list of the station id's in the + group. Notice that a value for the output VX_MASK column using the + "-set_hdr" option has been specified. Otherwise, this would show a list + of the unique values found in that column. Presumably, all the input + VX_MASK columns say "FULL" so that's what the output would say. Use + "-set_hdr" to explicitly set the output value. Q. What is the best way to average the FSS scores within several days or even several months using 'Aggregate to Average Scores'? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Below is the best way to aggregate together the Neighborhood Continuous -(NBRCNT) lines across multiple days, specifically the fractions skill -score (FSS). The Stat-Analysis tool is designed to do this. This example -is for aggregating scores for the accumulated precipitation (APCP) field. + .. dropdown:: Answer + + Below is the best way to aggregate together the Neighborhood Continuous + (NBRCNT) lines across multiple days, specifically the fractions skill + score (FSS). The Stat-Analysis tool is designed to do this. This example + is for aggregating scores for the accumulated precipitation (APCP) field. -Run the "aggregate" job type in stat_analysis to do this: + Run the "aggregate" job type in stat_analysis to do this: -.. code-block:: none + .. code-block:: none - stat_analysis -lookin directory/file*_nbrcnt.txt \ - -job aggregate -line_type NBRCNT -by FCST_VAR,FCST_LEAD,FCST_THRESH,INTERP_MTHD,INTERP_PNTS -out_stat agg_nbrcnt.txt + stat_analysis -lookin directory/file*_nbrcnt.txt \ + -job aggregate -line_type NBRCNT -by FCST_VAR,FCST_LEAD,FCST_THRESH,INTERP_MTHD,INTERP_PNTS -out_stat agg_nbrcnt.txt -This job reads all the files that are passed to it on the command line with -the "-lookin" option. List explicit filenames to read them directly. -Listing a top-level directory name will search that directory for files -ending in ".stat". + This job reads all the files that are passed to it on the command line with + the "-lookin" option. List explicit filenames to read them directly. + Listing a top-level directory name will search that directory for files + ending in ".stat". -In this case, the job running is to "aggregate" the "NBRCNT" line type. + In this case, the job running is to "aggregate" the "NBRCNT" line type. -In this case, the "-by" option is being used and lists several header -columns. Stat-Analysis will run this job separately for each unique -combination of those header column entries. + In this case, the "-by" option is being used and lists several header + columns. Stat-Analysis will run this job separately for each unique + combination of those header column entries. -The output is printed to the screen, or use the "-out_stat" option to -also write the aggregated output to a file named "agg_nbrcnt.txt". + The output is printed to the screen, or use the "-out_stat" option to + also write the aggregated output to a file named "agg_nbrcnt.txt". Q. How do I use '-by' to capture unique entries? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Here is a stat-analysis job that could be used to run, read the MPR lines, -define the probabilistic forecast thresholds, define the single observation -threshold, and compute a PSTD output line. Using "-by FCST_VAR" tells it -to run the job separately for each unique entry found in the FCST_VAR column. - -.. code-block:: none + .. dropdown:: Answer - stat_analysis \ - -lookin point_stat_model2_120000L_20160501_120000V.stat \ - -job aggregate_stat -line_type MPR -out_line_type PSTD \ - -out_fcst_thresh ge0,ge0.1,ge0.2,ge0.3,ge0.4,ge0.5,ge0.6,ge0.7,ge0.8,ge0.9,ge1.0 \ - -out_obs_thresh eq1.0 \ - -by FCST_VAR \ - -out_stat out_pstd.txt + Here is a stat-analysis job that could be used to run, read the + MPR lines, define the probabilistic forecast thresholds, define the + single observation threshold, and compute a PSTD output line. + Using "-by FCST_VAR" tells it to run the job separately for + each unique entry found in the FCST_VAR column. + + .. code-block:: none + + stat_analysis \ + -lookin point_stat_model2_120000L_20160501_120000V.stat \ + -job aggregate_stat -line_type MPR -out_line_type PSTD \ + -out_fcst_thresh ge0,ge0.1,ge0.2,ge0.3,ge0.4,ge0.5,ge0.6,ge0.7,ge0.8,ge0.9,ge1.0 \ + -out_obs_thresh eq1.0 \ + -by FCST_VAR \ + -out_stat out_pstd.txt -The output statistics are written to "out_pstd.txt". + The output statistics are written to "out_pstd.txt". Q. How do I use '-filter' to refine my output? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A. -Here is an example of running a Stat-Analysis filter job to discard any -CNT lines (continuous statistics) where the forecast rate and observation -rate are less than 0.05. This is an alternative way of tossing out those -cases without having to modify the source code. - -.. code-block:: none - - stat_analysis \ - -lookin out/grid_stat/grid_stat_120000L_20050807_120000V.stat \ - -job filter -dump_row filter_cts.txt -line_type CTS \ - -column_min BASER 0.05 -column_min FMEAN 0.05 - DEBUG 2: STAT Lines read = 436 - DEBUG 2: STAT Lines retained = 36 - DEBUG 2: - DEBUG 2: Processing Job 1: -job filter -line_type CTS -column_min BASER - 0.05 -column_min - FMEAN 0.05 -dump_row filter_cts.txt - DEBUG 1: Creating - STAT output file "filter_cts.txt" - FILTER: -job filter -line_type - CTS -column_min - BASER 0.05 -column_min - FMEAN 0.05 -dump_row filter_cts.txt - DEBUG 2: Job 1 used 36 out of 36 STAT lines. - -This job reads find 56 CTS lines, but only keeps 36 of them where both -the BASER and FMEAN columns are at least 0.05. + .. dropdown:: Answer + + Here is an example of running a Stat-Analysis filter job to discard any + CNT lines (continuous statistics) where the forecast rate and observation + rate are less than 0.05. This is an alternative way of tossing out those + cases without having to modify the source code. + + .. code-block:: none + + stat_analysis \ + -lookin out/grid_stat/grid_stat_120000L_20050807_120000V.stat \ + -job filter -dump_row filter_cts.txt -line_type CTS \ + -column_min BASER 0.05 -column_min FMEAN 0.05 + DEBUG 2: STAT Lines read = 436 + DEBUG 2: STAT Lines retained = 36 + DEBUG 2: + DEBUG 2: Processing Job 1: -job filter -line_type CTS -column_min BASER + 0.05 -column_min + FMEAN 0.05 -dump_row filter_cts.txt + DEBUG 1: Creating + STAT output file "filter_cts.txt" + FILTER: -job filter -line_type + CTS -column_min + BASER 0.05 -column_min + FMEAN 0.05 -dump_row filter_cts.txt + DEBUG 2: Job 1 used 36 out of 36 STAT lines. + + This job reads find 56 CTS lines, but only keeps 36 of them where both + the BASER and FMEAN columns are at least 0.05. Q. How do I use the “-by” flag to stratify results? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Adding "-by FCST_VAR" is a great way to associate a single value, -of say RMSE, with each of the forecast variables (UGRD,VGRD and WIND). + .. dropdown:: Answer + + Adding "-by FCST_VAR" is a great way to associate a single value, + of say RMSE, with each of the forecast variables (UGRD,VGRD and WIND). -Run the following job on the output from Grid-Stat generated when the -"make test" command is run: + Run the following job on the output from Grid-Stat generated when the + "make test" command is run: -.. code-block:: none - - stat_analysis -lookin out/grid_stat \ - -job aggregate_stat -line_type SL1L2 -out_line_type CNT \ - -by FCST_VAR,FCST_LEV \ - -out_stat cnt.txt + .. code-block:: none + + stat_analysis -lookin out/grid_stat \ + -job aggregate_stat -line_type SL1L2 -out_line_type CNT \ + -by FCST_VAR,FCST_LEV \ + -out_stat cnt.txt -The resulting cnt.txt file includes separate output for 6 different -FCST_VAR values at different levels. + The resulting cnt.txt file includes separate output for 6 different + FCST_VAR values at different levels. Q. How do I speed up run times? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A. -By default, Stat-Analysis has two options enabled which slow it down. -Disabling these two options will create quicker run times: -1. The computation of rank correlation statistics, Spearman's Rank - Correlation and Kendall's Tau. Disable them using "-rank_corr_flag FALSE". + .. dropdown:: Answer -2. The computation of bootstrap confidence intervals. Disable them using - "-n_boot_rep 0". + By default, Stat-Analysis has two options enabled which slow it down. + Disabling these two options will create quicker run times: -Two more suggestions for faster run times. + 1. The computation of rank correlation statistics, Spearman's Rank + Correlation and Kendall's Tau. Disable them using + "-rank_corr_flag FALSE". -1. Instead of using "-fcst_var u", use "-by fcst_var". This will compute - statistics separately for each unique entry found in the FCST_VAR column. + 2. The computation of bootstrap confidence intervals. Disable them using + "-n_boot_rep 0". -2. Instead of using "-out" to write the output to a text file, use "-out_stat" - which will write a full STAT output file, including all the header columns. - This will create a long list of values in the OBTYPE column. To avoid the - long, OBTYPE column value, manually set the output using - "-set_hdr OBTYPE ALL_TYPES". Or set its value to whatever is needed. + Two more suggestions for faster run times. -.. code-block:: none - - stat_analysis \ - -lookin diag_conv_anl.2015060100.stat \ - -job aggregate_stat -line_type MPR -out_line_type CNT -by FCST_VAR \ - -out_stat diag_conv_anl.2015060100_cnt.txt -set_hdr OBTYPE ALL_TYPES \ - -n_boot_rep 0 -rank_corr_flag FALSE -v 4 + 1. Instead of using "-fcst_var u", use "-by fcst_var". This will compute + statistics separately for each unique entry found in the + FCST_VAR column. + + 2. Instead of using "-out" to write the output to a text file, + use "-out_stat" + which will write a full STAT output file, including all the + header columns. + This will create a long list of values in the OBTYPE column. + To avoid the + long, OBTYPE column value, manually set the output using + "-set_hdr OBTYPE ALL_TYPES". Or set its value to whatever is needed. + + .. code-block:: none -Adding the "-by FCST_VAR" option to compute stats for all variables and -runs quickly. + stat_analysis \ + -lookin diag_conv_anl.2015060100.stat \ + -job aggregate_stat -line_type MPR -out_line_type CNT -by FCST_VAR \ + -out_stat diag_conv_anl.2015060100_cnt.txt -set_hdr OBTYPE ALL_TYPES \ + -n_boot_rep 0 -rank_corr_flag FALSE -v 4 + + Adding the "-by FCST_VAR" option to compute stats for all variables and + runs quickly. TC-Stat ------- @@ -1269,64 +1322,67 @@ TC-Stat Q. How do I use the “-by” flag to stratify results? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -To perform tropical cyclone evaluations for multiple models use the -"-by AMODEL" option with the tc_stat tool. Here is an example. + .. dropdown:: Answer + + To perform tropical cyclone evaluations for multiple models use the + "-by AMODEL" option with the tc_stat tool. Here is an example. -In this case the tc_stat job looked at the 48 hour lead time for the HWRF -and H3HW models. Without the “-by AMODEL” option, the output would be -all grouped together. + In this case the tc_stat job looked at the 48 hour lead time for the HWRF + and H3HW models. Without the “-by AMODEL” option, the output would be + all grouped together. -.. code-block:: none + .. code-block:: none - tc_stat \ - -lookin d2014_vx_20141117_reset/al/tc_pairs/tc_pairs_H3WI_* \ - -lookin d2014_vx_20141117_reset/al/tc_pairs/tc_pairs_HWFI_* \ - -job summary -lead 480000 -column TRACK -amodel HWFI,H3WI \ - -by AMODEL -out sample.out + tc_stat \ + -lookin d2014_vx_20141117_reset/al/tc_pairs/tc_pairs_H3WI_* \ + -lookin d2014_vx_20141117_reset/al/tc_pairs/tc_pairs_HWFI_* \ + -job summary -lead 480000 -column TRACK -amodel HWFI,H3WI \ + -by AMODEL -out sample.out -This will result in all 48 hour HWFI and H3WI track forecasts to be -aggregated (statistics and scores computed) for each model separately. + This will result in all 48 hour HWFI and H3WI track forecasts to be + aggregated (statistics and scores computed) for each model separately. Q. How do I use rapid intensification verification? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -To get the most output, run something like this: + .. dropdown:: Answer -.. code-block:: none + To get the most output, run something like this: - tc_stat \ - -lookin path/to/tc_pairs/output \ - -job rirw -dump_row test \ - -out_line_type CTC,CTS,MPR + .. code-block:: none -By default, rapid intensification (RI) is defined as a 24-hour exact -change exceeding 30kts. To define RI differently, modify that definition -using the ADECK, BDECK, or both using -rirw_time, -rirw_exact, -and -rirw_thresh options. Set -rirw_window to something larger than 0 -to enable false alarms to be considered hits when they were "close enough" -in time. + tc_stat \ + -lookin path/to/tc_pairs/output \ + -job rirw -dump_row test \ + -out_line_type CTC,CTS,MPR -.. code-block:: none + By default, rapid intensification (RI) is defined as a 24-hour exact + change exceeding 30kts. To define RI differently, modify that definition + using the ADECK, BDECK, or both using -rirw_time, -rirw_exact, + and -rirw_thresh options. Set -rirw_window to something larger than 0 + to enable false alarms to be considered hits when they were "close enough" + in time. - tc_stat \ - -lookin path/to/tc_pairs/output \ - -job rirw -dump_row test \ - -rirw_time 36 -rirw_window 12 \ - -out_line_type CTC,CTS,MPR + .. code-block:: none -To evaluate Rapid Weakening (RW) by setting "-rirw_thresh <=-30". -To stratify your results by lead time, you could add the "-by LEAD" option. + tc_stat \ + -lookin path/to/tc_pairs/output \ + -job rirw -dump_row test \ + -rirw_time 36 -rirw_window 12 \ + -out_line_type CTC,CTS,MPR -.. code-block:: none + To evaluate Rapid Weakening (RW) by setting "-rirw_thresh <=-30". + To stratify your results by lead time, you could add the + "-by LEAD" option. - tc_stat \ - -lookin path/to/tc_pairs/output \ - -job rirw -dump_row test \ - -rirw_time 36 -rirw_window 12 \ - -rirw_thresh <=-30 -by LEAD \ - -out_line_type CTC,CTS,MPR + .. code-block:: none + + tc_stat \ + -lookin path/to/tc_pairs/output \ + -job rirw -dump_row test \ + -rirw_time 36 -rirw_window 12 \ + -rirw_thresh <=-30 -by LEAD \ + -out_line_type CTC,CTS,MPR Utilities --------- @@ -1334,131 +1390,136 @@ Utilities Q. What would be an example of scripting to call MET? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -The following is an example of how to call MET from a bash script -including passing in variables. This shell script is listed below to run -Grid-Stat, call Plot-Data-Plane to plot the resulting difference field, -and call convert to reformat from PostScript to PNG. - -.. code-block:: none - - #!/bin/sh - for case in `echo "FCST OBS"`; do - export TO_GRID=${case} - grid_stat gfs.t00z.pgrb2.0p25.f000 \ - nam.t00z.conusnest.hiresf00.tm00.grib2 GridStatConfig - plot_data_plane \ - *TO_GRID_${case}*_pairs.nc TO_GRID_${case}.ps 'name="DIFF_TMP_P500_TMP_P500_FULL"; \ - level="(*,*)";' - convert -rotate 90 -background white -flatten TO_GRID_${case}.ps - TO_GRID_${case}.png - done - + .. dropdown:: Answer + + The following is an example of how to call MET from a bash script + including passing in variables. This shell script is listed below to run + Grid-Stat, call Plot-Data-Plane to plot the resulting difference field, + and call convert to reformat from PostScript to PNG. + + .. code-block:: none + + #!/bin/sh + for case in `echo "FCST OBS"`; do + export TO_GRID=${case} + grid_stat gfs.t00z.pgrb2.0p25.f000 \ + nam.t00z.conusnest.hiresf00.tm00.grib2 GridStatConfig + plot_data_plane \ + *TO_GRID_${case}*_pairs.nc TO_GRID_${case}.ps 'name="DIFF_TMP_P500_TMP_P500_FULL"; \ + level="(*,*)";' + convert -rotate 90 -background white -flatten TO_GRID_${case}.ps + TO_GRID_${case}.png + done Q. How do I convert TRMM data files? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Here is an example of NetCDF that the MET software is not expecting. Here -is an option for accessing that same TRMM data, following links from the -MET website: -http://dtcenter.org/community-code/model-evaluation-tools-met/input-data - -.. code-block:: none - - # Pull binary 3-hourly TRMM data file - wget - ftp://disc2.nascom.nasa.gov/data/TRMM/Gridded/3B42_V7/201009/3B42.100921.00z.7. - precipitation.bin - # Pull Rscript from MET website - wget http://dtcenter.org/sites/default/files/community-code/met/r-scripts/trmmbin2nc.R - # Edit that Rscript by setting - out_lat_ll = -50 - out_lon_ll = 0 - out_lat_ur = 50 - out_lon_ur = 359.75 - # Run the Rscript - Rscript trmmbin2nc.R 3B42.100921.00z.7.precipitation.bin \ - 3B42.100921.00z.7.precipitation.nc - # Plot the result - plot_data_plane 3B42.100921.00z.7.precipitation.nc \ - 3B42.100921.00z.7.precipitation.ps 'name="APCP_03"; level="(*,*)";' - -It may be possible that the domain of the data is smaller. Here are some options: - -1. In that Rscript, choose different boundaries (i.e. out_lat/lon_ll/ur) - to specify the tile of data to be selected. - -2. As of version 5.1, MET includes support for regridding the data it reads. - Keep TRMM on it's native domain and use the MET tools to do the regridding. - For example, the Regrid-Data-Plane" tool reads a NetCDF file, regrids - the data, and writes a NetCDF file. Alternatively, the "regrid" section - of the configuration files for the MET tools may be used to do the - regridding on the fly. For example, run Grid-Stat to compare to the model - output to TRMM and say - -.. code-block:: none - - "regrid = { field = FCST; - ...}" - -That tells Grid-Stat to automatically regrid the TRMM observations to -the model domain. + .. dropdown:: Answer + + Here is an example of NetCDF that the MET software is not expecting. Here + is an option for accessing that same TRMM data, following links from the + MET website: + http://dtcenter.org/community-code/model-evaluation-tools-met/input-data + + .. code-block:: none + + # Pull binary 3-hourly TRMM data file + wget + ftp://disc2.nascom.nasa.gov/data/TRMM/Gridded/3B42_V7/201009/3B42.100921.00z.7. + precipitation.bin + # Pull Rscript from MET website + wget http://dtcenter.org/sites/default/files/community-code/met/r-scripts/trmmbin2nc.R + # Edit that Rscript by setting + out_lat_ll = -50 + out_lon_ll = 0 + out_lat_ur = 50 + out_lon_ur = 359.75 + # Run the Rscript + Rscript trmmbin2nc.R 3B42.100921.00z.7.precipitation.bin \ + 3B42.100921.00z.7.precipitation.nc + # Plot the result + plot_data_plane 3B42.100921.00z.7.precipitation.nc \ + 3B42.100921.00z.7.precipitation.ps 'name="APCP_03"; level="(*,*)";' + + It may be possible that the domain of the data is smaller. + Here are some options: + + 1. In that Rscript, choose different boundaries (i.e. out_lat/lon_ll/ur) + to specify the tile of data to be selected. + + 2. As of version 5.1, MET includes support for regridding the + data it reads. Keep TRMM on it's native domain and use the + MET tools to do the regridding. + For example, the Regrid-Data-Plane" tool reads a NetCDF file, regrids + the data, and writes a NetCDF file. Alternatively, the "regrid" section + of the configuration files for the MET tools may be used to do the + regridding on the fly. For example, run Grid-Stat to compare to + the model output to TRMM and say + + .. code-block:: none + + "regrid = { field = FCST; + ...}" + + That tells Grid-Stat to automatically regrid the TRMM observations to + the model domain. Q. How do I convert a PostScript to png? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A. -Use the linux “convert” tool to convert a Plot-Data-Plane PostScript -file to a png: -.. code-block:: none + .. dropdown:: Answer + + Use the linux “convert” tool to convert a Plot-Data-Plane PostScript + file to a png: + + .. code-block:: none - convert -rotate 90 -background white plot_dbz.ps plot_dbz.png + convert -rotate 90 -background white plot_dbz.ps plot_dbz.png -To convert a MODE PostScript to png + To convert a MODE PostScript to png -.. code-block:: none + .. code-block:: none - convert mode_out.ps mode_out.png + convert mode_out.ps mode_out.png -Will result in all 6-7 pages in the PostScript file be written out to a -seperate .png with the following naming convention: + Will result in all 6-7 pages in the PostScript file be written out to a + seperate .png with the following naming convention: -mode_out-0.png, mode_out-1.png, mode_out-2.png, etc. + mode_out-0.png, mode_out-1.png, mode_out-2.png, etc. Q. How does pairwise differences using plot_tcmpr.R work? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -One necessary step in computing pairwise differences is "event equalizing" -the data. This means extracting a subset of cases that are common to -both models. + .. dropdown:: Answer + + One necessary step in computing pairwise differences is "event equalizing" + the data. This means extracting a subset of cases that are common to + both models. -While the tc_stat tool does not compute pairwise differences, it can apply -the "event_equalization" logic to extract the cases common to two models. -This is done using the config file "event_equal = TRUE;" option or -setting "-event_equal true" on the command line. + While the tc_stat tool does not compute pairwise differences, it can apply + the "event_equalization" logic to extract the cases common to two models. + This is done using the config file "event_equal = TRUE;" option or + setting "-event_equal true" on the command line. -Most of the hurricane track analysis and plotting is done using the -plot_tcmpr.R Rscript. It makes a call to the tc_stat tool to track -data down to the desired subset, compute pairwise differences if needed, -and then plot the result. + Most of the hurricane track analysis and plotting is done using the + plot_tcmpr.R Rscript. It makes a call to the tc_stat tool to track + data down to the desired subset, compute pairwise differences if needed, + and then plot the result. -.. code-block:: none + .. code-block:: none - Rscript ${MET_BUILD_BASE}/scripts/Rscripts/plot_tcmpr.R \ - -lookin tc_pairs_output.tcst \ - -filter '-amodel AHWI,GFSI' \ - -series AMODEL AHWI,GFSI,AHWI-GFSI \ - -plot MEAN,BOXPLOT + Rscript ${MET_BUILD_BASE}/scripts/Rscripts/plot_tcmpr.R \ + -lookin tc_pairs_output.tcst \ + -filter '-amodel AHWI,GFSI' \ + -series AMODEL AHWI,GFSI,AHWI-GFSI \ + -plot MEAN,BOXPLOT -The resulting plots include three series - one for AHWI, one for GFSI, -and one for their pairwise difference. + The resulting plots include three series - one for AHWI, one for GFSI, + and one for their pairwise difference. -It's a bit cumbersome to understand all the options available, but this may -be really useful. If nothing else, it could be adapted to dump out the -pairwise differences that are needed. + It's a bit cumbersome to understand all the options available, but this may + be really useful. If nothing else, it could be adapted to dump out the + pairwise differences that are needed. Miscellaneous @@ -1466,266 +1527,281 @@ Miscellaneous Q. Regrid-Data-Plane - How do I define a LatLon grid? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A. -Here is an example of the NetCDF variable attributes that MET uses to -define a LatLon grid: -.. code-block:: none + .. dropdown:: Answer + + Here is an example of the NetCDF variable attributes that MET uses to + define a LatLon grid: + + .. code-block:: none - :Projection = "LatLon" ; - :lat_ll = "25.063000 degrees_north" ; - :lon_ll = "-124.938000 degrees_east" ; - :delta_lat = "0.125000 degrees" ; - :delta_lon = "0.125000 degrees" ; - :Nlat = "224 grid_points" ; - :Nlon = "464 grid_points" ; + :Projection = "LatLon" ; + :lat_ll = "25.063000 degrees_north" ; + :lon_ll = "-124.938000 degrees_east" ; + :delta_lat = "0.125000 degrees" ; + :delta_lon = "0.125000 degrees" ; + :Nlat = "224 grid_points" ; + :Nlon = "464 grid_points" ; -This can be created by running the Regrid-Data-Plane" tool to regrid -some GFS data to a LatLon grid: + This can be created by running the Regrid-Data-Plane" tool to regrid + some GFS data to a LatLon grid: -.. code-block:: none + .. code-block:: none - regrid_data_plane \ - gfs_2012040900_F012.grib G110 \ - gfs_g110.nc -field 'name="TMP"; level="Z2";' + regrid_data_plane \ + gfs_2012040900_F012.grib G110 \ + gfs_g110.nc -field 'name="TMP"; level="Z2";' -Use ncdump to look at the attributes. As an exercise, try defining -these global attributes (and removing the other projection-related ones) -and then try again. + Use ncdump to look at the attributes. As an exercise, try defining + these global attributes (and removing the other projection-related ones) + and then try again. Q. Pre-processing - How do I use wgrib2, pcp_combine regrid and reformat to format NetCDF files? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -If you are extracting only one or two fields from a file, using MET's -Regrid-Data-Plane can be used to generate a Lat-Lon projection. If -regridding all fields, the wgrib2 utility may be more useful. Here's an -example of using wgrib2 and pcp_combine to generate NetCDF files -MET can read: + .. dropdown:: Answer + + If you are extracting only one or two fields from a file, using MET's + Regrid-Data-Plane can be used to generate a Lat-Lon projection. If + regridding all fields, the wgrib2 utility may be more useful. Here's an + example of using wgrib2 and pcp_combine to generate NetCDF files + MET can read: -.. code-block:: none + .. code-block:: none - wgrib2 gfsrain06.grb -new_grid latlon 112:131:0.1 \ - 25:121:0.1 gfsrain06_regrid.grb2 - -And then run that GRIB2 file through pcp_combine using the "-add" option -with only one file provided: + wgrib2 gfsrain06.grb -new_grid latlon 112:131:0.1 \ + 25:121:0.1 gfsrain06_regrid.grb2 -.. code-block:: none + And then run that GRIB2 file through pcp_combine using the "-add" option + with only one file provided: - pcp_combine -add gfsrain06_regrid.grb2 'name="APCP"; \ - level="A6";' gfsrain06_regrid.nc + .. code-block:: none -Then the output NetCDF file does not have this problem: + pcp_combine -add gfsrain06_regrid.grb2 'name="APCP"; \ + level="A6";' gfsrain06_regrid.nc -.. code-block:: none + Then the output NetCDF file does not have this problem: - ncdump -h 2a_wgrib2_regrid.nc | grep "_ll" - :lat_ll = "25.000000 degrees_north" ; - :lon_ll = "112.000000 degrees_east" ; + .. code-block:: none + + ncdump -h 2a_wgrib2_regrid.nc | grep "_ll" + :lat_ll = "25.000000 degrees_north" ; + :lon_ll = "112.000000 degrees_east" ; Q. TC-Pairs - How do I get rid of WARNING: TrackInfo Using Specify Model Suffix? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Below is a command example to run: - -.. code-block:: none - - tc_pairs \ - -adeck aep142014.h4hw.dat \ - -bdeck bep142014.dat \ - -config TCPairsConfig_v5.0 \ - -out tc_pairs_v5.0_patch \ - -log tc_pairs_v5.0_patch.log \ - -v 3 - -Below is a warning message: - -.. code-block:: none - - WARNING: TrackInfo::add(const ATCFLine &) -> - skipping ATCFLine since the valid time is not - increasing (20140801_000000 < 20140806_060000): - WARNING: AL, 03, 2014080100, 03, H4HW, 000, - 120N, 547W, 38, 1009, XX, 34, NEQ, 0084, 0000, - 0000, 0083, -99, -99, 59, 0, 0, , 0, , 0, 0, - -As a sanity check, the MET-TC code makes sure that the valid time of -the track data doesn't go backwards in time. This warning states that -this is -occurring. The very likely reason for this is that the data being used -are probably passing tc_pairs duplicate track data. - -Using grep, notice that the same track data shows up in -"aal032014.h4hw.dat" and "aal032014_hfip_d2014_BERTHA.dat". Try this: - -.. code-block:: none - - grep H4HW aal*.dat | grep 2014080100 | grep ", 000," - aal032014.h4hw.dat:AL, 03, 2014080100, 03, H4HW, 000, - 120N, 547W, 38, 1009, XX, 34, NEQ, 0084, - 0000, 0000, 0083, -99, -99, 59, 0, 0, , - 0, , 0, 0, , , , , 0, 0, 0, 0, THERMO PARAMS, - -9999, -9999, -9999, Y, 10, DT, -999 - aal032014_hfip_d2014_BERTHA.dat:AL, 03, 2014080100, - 03, H4HW, 000, 120N, 547W, 38, 1009, XX, 34, NEQ, - 0084, 0000, 0000, 0083, -99, -99, 59, 0, 0, , 0, , 0, - 0, , , , , 0, 0, 0, 0, THERMOPARAMS, -9999 ,-9999 , - -9999 ,Y ,10 ,DT ,-999 - -Those 2 lines are nearly identical, except for the spelling of -"THERMO PARAMS" with a space vs "THERMOPARAMS" with no space. - -Passing tc_pairs duplicate track data results in this sort of warning. -The DTC had the same sort of problem when setting up a real-time -verification system. The same track data was making its way into -multiple ATCF files. - -If this really is duplicate track data, work on the logic for where/how -to store the track data. However, if the H4HW data in the first file -actually differs from that in the second file, there is another option. -You can specify a model suffix to be used for each ADECK source, as in -this example (suffix=_EXP): - -.. code-block:: none - - tc_pairs \ - -adeck aal032014.h4hw.dat suffix=_EXP \ - -adeck aal032014_hfip_d2014_BERTHA.dat \ - -bdeck bal032014.dat \ - -config TCPairsConfig_match \ - -out tc_pairs_v5.0_patch \ - -log tc_pairs_v5.0_patch.log -v 3 - -Any model names found in "aal032014.h4hw.dat" will now have _EXP tacked -onto the end. Note that if a list of model names in the TCPairsConfig file -needs specifying, include the _EXP variants to get them to show up in -the output or it won’t show up. - -That'll get rid of the warnings because they will be storing the track -data from the first source using a slightly different model name. This -feature was added for users who are testing multiple versions of a -model on the same set of storms. They might be using the same ATCF ID -in all their output. But this enables them to distinguish the output -in tc_pairs. + .. dropdown:: Answer + + Below is a command example to run: + + .. code-block:: none + + tc_pairs \ + -adeck aep142014.h4hw.dat \ + -bdeck bep142014.dat \ + -config TCPairsConfig_v5.0 \ + -out tc_pairs_v5.0_patch \ + -log tc_pairs_v5.0_patch.log \ + -v 3 + + Below is a warning message: + + .. code-block:: none + + WARNING: TrackInfo::add(const ATCFLine &) -> + skipping ATCFLine since the valid time is not + increasing (20140801_000000 < 20140806_060000): + WARNING: AL, 03, 2014080100, 03, H4HW, 000, + 120N, 547W, 38, 1009, XX, 34, NEQ, 0084, 0000, + 0000, 0083, -99, -99, 59, 0, 0, , 0, , 0, 0, + + As a sanity check, the MET-TC code makes sure that the valid time of + the track data doesn't go backwards in time. This warning states that + this is + occurring. The very likely reason for this is that the data being used + are probably passing tc_pairs duplicate track data. + + Using grep, notice that the same track data shows up in + "aal032014.h4hw.dat" and "aal032014_hfip_d2014_BERTHA.dat". Try this: + + .. code-block:: none + + grep H4HW aal*.dat | grep 2014080100 | grep ", 000," + aal032014.h4hw.dat:AL, 03, 2014080100, 03, H4HW, 000, + 120N, 547W, 38, 1009, XX, 34, NEQ, 0084, + 0000, 0000, 0083, -99, -99, 59, 0, 0, , + 0, , 0, 0, , , , , 0, 0, 0, 0, THERMO PARAMS, + -9999, -9999, -9999, Y, 10, DT, -999 + aal032014_hfip_d2014_BERTHA.dat:AL, 03, 2014080100, + 03, H4HW, 000, 120N, 547W, 38, 1009, XX, 34, NEQ, + 0084, 0000, 0000, 0083, -99, -99, 59, 0, 0, , 0, , 0, + 0, , , , , 0, 0, 0, 0, THERMOPARAMS, -9999 ,-9999 , + -9999 ,Y ,10 ,DT ,-999 + + Those 2 lines are nearly identical, except for the spelling of + "THERMO PARAMS" with a space vs "THERMOPARAMS" with no space. + + Passing tc_pairs duplicate track data results in this sort of warning. + The DTC had the same sort of problem when setting up a real-time + verification system. The same track data was making its way into + multiple ATCF files. + + If this really is duplicate track data, work on the logic for where/how + to store the track data. However, if the H4HW data in the first file + actually differs from that in the second file, there is another option. + You can specify a model suffix to be used for each ADECK source, as in + this example (suffix=_EXP): + + .. code-block:: none + + tc_pairs \ + -adeck aal032014.h4hw.dat suffix=_EXP \ + -adeck aal032014_hfip_d2014_BERTHA.dat \ + -bdeck bal032014.dat \ + -config TCPairsConfig_match \ + -out tc_pairs_v5.0_patch \ + -log tc_pairs_v5.0_patch.log -v 3 + + Any model names found in "aal032014.h4hw.dat" will now have _EXP tacked + onto the end. Note that if a list of model names in the TCPairsConfig file + needs specifying, include the _EXP variants to get them to show up in + the output or it won’t show up. + + That'll get rid of the warnings because they will be storing the track + data from the first source using a slightly different model name. This + feature was added for users who are testing multiple versions of a + model on the same set of storms. They might be using the same ATCF ID + in all their output. But this enables them to distinguish the output + in tc_pairs. Q. Why is the grid upside down? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -The user provides a gridded data file to MET and it runs without error, -but the data is packed upside down. - -Try using the "file_type" entry. The "file_type" entry specifies the -input file type (e.g. GRIB1, GRIB2, NETCDF_MET, NETCDF_PINT, NETCDF_NCCF) -rather than letting the code determine it itself. For valid file_type -values, see "File types" in the *data/config/ConfigConstants* file. This -entry should be defined within the "fcst" or "obs" dictionaries. -Sometimes, directly specifying the type of file will help MET figure -out what to properly do with the data. - -Another option is to use the Regrid-Data-Plane tool. The Regrid-Data-Plane -tool may be run to read data from any gridded data file MET supports -(i.e. GRIB1, GRIB2, and a variety of NetCDF formats), interpolate to a -user-specified grid, and write the field(s) out in NetCDF format. See the -Regrid-Data-Plane tool :numref:`regrid-data-plane` in the MET -User's Guide for more -detailed information. While the Regrid-Data-Plane tool is useful as a -stand-alone tool, the capability is also included to automatically regrid -data in most of the MET tools that handle gridded data. This "regrid" -entry is a dictionary containing information about how to handle input -gridded data files. The "regird" entry specifies regridding logic and -has a "to_grid" entry that can be set to NONE, FCST, OBS, a named grid, -the path to a gridded data file defining the grid, or an explicit grid -specification string. See the :ref:`regrid` entry in -the Configuration File Overview in the MET User's Guide for a more detailed -description of the configuration file entries that control automated -regridding. - -A single model level can be plotted using the plot_data_plane utility. -This tool can assist the user by showing the data to be verified to -ensure that times and locations matchup as expected. + .. dropdown:: Answer + + The user provides a gridded data file to MET and it runs without error, + but the data is packed upside down. + + Try using the "file_type" entry. The "file_type" entry specifies the + input file type (e.g. GRIB1, GRIB2, NETCDF_MET, NETCDF_PINT, NETCDF_NCCF) + rather than letting the code determine it itself. For valid file_type + values, see "File types" in the *data/config/ConfigConstants* file. This + entry should be defined within the "fcst" or "obs" dictionaries. + Sometimes, directly specifying the type of file will help MET figure + out what to properly do with the data. + + Another option is to use the Regrid-Data-Plane tool. The Regrid-Data-Plane + tool may be run to read data from any gridded data file MET supports + (i.e. GRIB1, GRIB2, and a variety of NetCDF formats), interpolate to a + user-specified grid, and write the field(s) out in NetCDF format. See the + Regrid-Data-Plane tool :numref:`regrid-data-plane` in the MET + User's Guide for more + detailed information. While the Regrid-Data-Plane tool is useful as a + stand-alone tool, the capability is also included to automatically regrid + data in most of the MET tools that handle gridded data. This "regrid" + entry is a dictionary containing information about how to handle input + gridded data files. The "regird" entry specifies regridding logic and + has a "to_grid" entry that can be set to NONE, FCST, OBS, a named grid, + the path to a gridded data file defining the grid, or an explicit grid + specification string. See the :ref:`regrid` entry in + the Configuration File Overview in the MET User's Guide for a more detailed + description of the configuration file entries that control automated + regridding. + + A single model level can be plotted using the plot_data_plane utility. + This tool can assist the user by showing the data to be verified to + ensure that times and locations matchup as expected. Q. Why was the MET written largely in C++ instead of FORTRAN? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -MET relies upon the object-oriented aspects of C++, particularly in -using the MODE tool. Due to time and budget constraints, it also makes -use of a pre-existing forecast verification library that was developed -at NCAR. + .. dropdown:: Answer + + MET relies upon the object-oriented aspects of C++, particularly in + using the MODE tool. Due to time and budget constraints, it also makes + use of a pre-existing forecast verification library that was developed + at NCAR. Q. How does MET differ from the previously mentioned existing verification packages? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -MET is an actively maintained, evolving software package that is being -made freely available to the public through controlled version releases. + .. dropdown:: Answer + + MET is an actively maintained, evolving software package that is being + made freely available to the public through controlled version releases. Q. Will the MET work on data in native model coordinates? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -No - it will not. In the future, we may add options to allow additional -model grid coordinate systems. + .. dropdown:: Answer + + No - it will not. In the future, we may add options to allow additional + model grid coordinate systems. Q. How do I get help if my questions are not answered in the User's Guide? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -First, look on our -`MET User's Guide website `_. -If that doesn't answer your question, create a post in the -`METplus GitHub Discussions Forum `_. + .. dropdown:: Answer + + First, look on our + `MET User's Guide website `_. + If that doesn't answer your question, create a post in the + `METplus GitHub Discussions Forum `_. Q. What graphical features does MET provide? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -MET provides some :ref:`plotting and graphics support`. The plotting -tools, including plot_point_obs, plot_data_plane, and plot_mode_field, can -help users visualize the data. - -MET is intended to be a set of command line tools for evaluating forecast -quality. So, the development effort is focused on providing the latest, -state of the art verification approaches, rather than on providing nice -plotting features. However, the ASCII output statistics of MET may be plotted -with a wide variety of plotting packages, including R, NCL, IDL, and GNUPlot. -METViewer is also currently being developed and used by the DTC and NOAA -It creates basic plots of MET output verification statistics. The types of -plots include series plots with confidence intervals, box plots, x-y scatter -plots and histograms. - -R is a language and environment for statistical computing and graphics. -It's a free package that runs on most operating systems and provides nice -plotting features and a wide array of powerful statistical analysis tools. -There are sample scripts on the -`MET website `_ -that you can use and modify to perform the type of analysis you need. If -you create your own scripts, we encourage you to submit them to us through the -`METplus GitHub Discussions Forum `_ -so that we can post them for other users. + .. dropdown:: Answer + + MET provides some :ref:`plotting and graphics support`. + The plotting + tools, including plot_point_obs, plot_data_plane, and plot_mode_field, can + help users visualize the data. + + MET is intended to be a set of command line tools for evaluating forecast + quality. So, the development effort is focused on providing the latest, + state of the art verification approaches, rather than on providing nice + plotting features. However, the ASCII output statistics of MET may + be plotted + with a wide variety of plotting packages, including R, NCL, IDL, + and GNUPlot. + METViewer is also currently being developed and used by the DTC and NOAA + It creates basic plots of MET output verification statistics. The types of + plots include series plots with confidence intervals, box plots, + x-y scatter plots and histograms. + + R is a language and environment for statistical computing and graphics. + It's a free package that runs on most operating systems and provides nice + plotting features and a wide array of powerful statistical analysis tools. + There are sample scripts on the + `MET website `_ + that you can use and modify to perform the type of analysis you need. If + you create your own scripts, we encourage you to submit them to us + through the + `METplus GitHub Discussions Forum `_ + so that we can post them for other users. Q. How do I find the version of the tool I am using? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -Type the name of the tool followed by **-version**. For example, -type “pb2nc **-version**”. + .. dropdown:: Answer + + Type the name of the tool followed by **--version**. For example, + type “pb2nc **--version**”. Q. What are MET's conventions for latitude, longitude, azimuth and bearing angles? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A. -MET considers north latitude and east longitude positive. Latitudes -have range from :math:`-90^\circ` to :math:`+90^\circ`. Longitudes have -range from :math:`-180^\circ` to :math:`+180^\circ`. Plane angles such -as azimuths and bearing (example: horizontal wind direction) have -range :math:`0^\circ` to :math:`360^\circ` and are measured clockwise -from the north. + .. dropdown:: Answer + + MET considers north latitude and east longitude positive. Latitudes + have range from :math:`-90^\circ` to :math:`+90^\circ`. Longitudes have + range from :math:`-180^\circ` to :math:`+180^\circ`. Plane angles such + as azimuths and bearing (example: horizontal wind direction) have + range :math:`0^\circ` to :math:`360^\circ` and are measured clockwise + from the north. .. _Troubleshooting: @@ -1743,142 +1819,157 @@ on other things to check if you are having problems installing or running MET. MET won't compile ----------------- -* Have you specified the locations of NetCDF, GNU Scientific Library, - and BUFRLIB, and optional additional libraries using corresponding - MET\_ environment variables prior to running configure? + .. dropdown:: Troubleshooting Help -* Have these libraries been compiled and installed using the same set - of compilers used to build MET? + * Have you specified the locations of NetCDF, GNU Scientific Library, + and BUFRLIB, and optional additional libraries using corresponding + MET\_ environment variables prior to running configure? -* Are you using NetCDF version 3.4 or version 4? Currently, only NetCDF - version 3.6 can be used with MET. + * Have these libraries been compiled and installed using the same set + of compilers used to build MET? BUFRLIB Errors during MET installation -------------------------------------- -.. code-block:: none + .. dropdown:: Troubleshooting Help + + .. code-block:: none - error message: /usr/bin/ld: cannot find -lbufr - The linker can not find the BUFRLIB library archive file it needs. + error message: /usr/bin/ld: cannot find -lbufr + The linker can not find the BUFRLIB library archive file it needs. - export MET_BUFRLIB=/home/username/BUFRLIB_v10.2.3:$MET_BUFRLIB + export MET_BUFRLIB=/home/username/BUFRLIB_v11.3.0:$MET_BUFRLIB -It isn't making it's way into the configuration because BUFRLIB_v10.2.3 -isn't showing up in the output of make. This may indicate the wrong shell -type. The .bashrc file sets the environment for the Bourne shell, but -the above error could indicate that the c- shell is being used instead. + It isn't making it's way into the configuration because BUFRLIB_v11.3.0 + isn't showing up in the output of make. This may indicate the wrong shell + type. The .bashrc file sets the environment for the Bourne shell, but + the above error could indicate that the c- shell is being used instead. -Try the following 2 things: + Try the following 2 things: -1. Check to make sure this file exists: + 1. Check to make sure this file exists: - .. code-block:: none + .. code-block:: none - ls /home/username/BUFRLIB_v10.2.3/libbufr.a + ls /home/username/BUFRLIB_v11.3.0/libbufr.a -2. Rerun the MET configure command using the following option on the - command line: + 2. Rerun the MET configure command using the following option on the + command line: - .. code-block:: none - - MET_BUFRLIB=/home/username/BUFRLIB_v10.2.3 + .. code-block:: none + + MET_BUFRLIB=/home/username/BUFRLIB_v11.3.0 -After doing that, please try recompiling MET. If it fails, please submit the following log files: "make_install.log" as well as "config.log" with a new post in the `METplus GitHub Discussions Forum `_. + After doing that, please try recompiling MET. If it fails, please + submit the following log files: "make_install.log" as well as + "config.log" with a new post in the + `METplus GitHub Discussions Forum `_. Command line double quotes -------------------------- -Single quotes, double quotes, and escape characters can be difficult for -MET to parse. If there are problems, especially in Python code, try -breaking the command up like the below example. + .. dropdown:: Troubleshooting Help -.. code-block:: none + Single quotes, double quotes, and escape characters can be difficult for + MET to parse. If there are problems, especially in Python code, try + breaking the command up like the below example. - ['regrid_data_plane', - '/h/data/global/WXQC/data/umm/1701150006', - 'G003', '/h/data/global/WXQC/data/met/nc_mdl/umm/1701150006', '- field', - '\'name="HGT"; level="P500";\'', '-v', '6'] + .. code-block:: none + + ['regrid_data_plane', + '/h/data/global/WXQC/data/umm/1701150006', + 'G003', '/h/data/global/WXQC/data/met/nc_mdl/umm/1701150006', '- field', + '\'name="HGT"; level="P500";\'', '-v', '6'] Environment variable settings ----------------------------- -In the below incorrect example for many environment variables have both -the main variable set and the INC and LIB variables set: + .. dropdown:: Troubleshooting Help + + In the below incorrect example for many environment variables have both + the main variable set and the INC and LIB variables set: -.. code-block:: none + .. code-block:: none - export MET_GSL=$MET_LIB_DIR/gsl - export MET_GSLINC=$MET_LIB_DIR/gsl/include/gsl - export MET_GSLLIB=$MET_LIB_DIR/gsl/lib - -**only MET_GSL *OR *MET_GSLINC *AND *MET_GSLLIB need to be set.** -So, for example, either set: + export MET_GSL=$MET_LIB_DIR/gsl + export MET_GSLINC=$MET_LIB_DIR/gsl/include/gsl + export MET_GSLLIB=$MET_LIB_DIR/gsl/lib + + **only MET_GSL *OR *MET_GSLINC *AND *MET_GSLLIB need to be set.** + So, for example, either set: -.. code-block:: none + .. code-block:: none - export MET_GSL=$MET_LIB_DIR/gsl + export MET_GSL=$MET_LIB_DIR/gsl -or set: + or set: -.. code-block:: none + .. code-block:: none - export MET_GSLINC=$MET_LIB_DIR/gsl/include/gsl export MET_GSLLIB=$MET_LIB_DIR/gsl/lib + export MET_GSLINC=$MET_LIB_DIR/gsl/include/gsl export MET_GSLLIB=$MET_LIB_DIR/gsl/lib -Additionally, MET does not use MET_HDF5INC and MET_HDF5LIB. -It only uses MET_HDF5. + Additionally, MET does not use MET_HDF5INC and MET_HDF5LIB. + It only uses MET_HDF5. -Our online tutorial can help figure out what should be set and what the -value should be: -https://met.readthedocs.io/en/latest/Users_Guide/installation.html + Our online tutorial can help figure out what should be set and what the + value should be: + https://met.readthedocs.io/en/latest/Users_Guide/installation.html NetCDF install issues --------------------- -This example shows a problem with NetCDF in the make_install.log file: + .. dropdown:: Troubleshooting Help + + This example shows a problem with NetCDF in the make_install.log file: -.. code-block:: none + .. code-block:: none - /usr/bin/ld: warning: libnetcdf.so.11, - needed by /home/zzheng25/metinstall/lib/libnetcdf_c++4.so, - may conflict with libnetcdf.so.7 + /usr/bin/ld: warning: libnetcdf.so.11, + needed by /home/zzheng25/metinstall/lib/libnetcdf_c++4.so, + may conflict with libnetcdf.so.7 -Below are examples of too many MET_NETCDF options: + Below are examples of too many MET_NETCDF options: -.. code-block:: none + .. code-block:: none - MET_NETCDF='/home/username/metinstall/' - MET_NETCDFINC='/home/username/local/include' - MET_NETCDFLIB='/home/username/local/lib' + MET_NETCDF='/home/username/metinstall/' + MET_NETCDFINC='/home/username/local/include' + MET_NETCDFLIB='/home/username/local/lib' -Either MET_NETCDF **OR** MET_NETCDFINC **AND** MET_NETCDFLIB need to be set. -If the NetCDF include files are in */home/username/local/include* and the -NetCDF library files are in */home/username/local/lib*, unset the -MET_NETCDF environment variable, then run "make clean", reconfigure, -and then run "make install" and "make test" again. + Either MET_NETCDF **OR** MET_NETCDFINC **AND** MET_NETCDFLIB + need to be set. + If the NetCDF include files are in */home/username/local/include* and the + NetCDF library files are in */home/username/local/lib*, unset the + MET_NETCDF environment variable, then run "make clean", reconfigure, + and then run "make install" and "make test" again. Error while loading shared libraries ------------------------------------ -* Add the lib dir to your LD_LIBRARY_PATH. For example, if you receive - the following error: "./mode_analysis: error while loading shared - libraries: libgsl.so.19: cannot open shared object file: - No such file or directory", you should add the path to the - gsl lib (for example, */home/user/MET/gsl-2.1/lib*) - to your LD_LIBRARY_PATH. + .. dropdown:: Troubleshooting Help + + * Add the lib dir to your LD_LIBRARY_PATH. For example, if you receive + the following error: "./mode_analysis: error while loading shared + libraries: libgsl.so.19: cannot open shared object file: + No such file or directory", you should add the path to the + gsl lib (for example, */home/user/MET/gsl-2.1/lib*) + to your LD_LIBRARY_PATH. General troubleshooting ----------------------- -* For configuration files used, make certain to use empty square brackets - (e.g. [ ]) to indicate no stratification is desired. Do NOT use empty - double quotation marks inside square brackets (e.g. [""]). + .. dropdown:: Troubleshooting Help + + * For configuration files used, make certain to use empty square brackets + (e.g. [ ]) to indicate no stratification is desired. Do NOT use empty + double quotation marks inside square brackets (e.g. [""]). -* Have you designated all the required command line arguments? + * Have you designated all the required command line arguments? -* Try rerunning with a higher verbosity level. Increasing the verbosity - level to 4 or 5 prints much more diagnostic information to the screen. + * Try rerunning with a higher verbosity level. Increasing the verbosity + level to 4 or 5 prints much more diagnostic information to the screen. Where to get help ================= @@ -1890,8 +1981,7 @@ is available through the How to contribute code ====================== - + If you have code you would like to contribute, we will gladly consider your contribution. Please create a post in the `METplus GitHub Discussions Forum `_. - diff --git a/docs/Users_Guide/appendixB.rst b/docs/Users_Guide/appendixB.rst index e6f343bca4..ea3120c10e 100644 --- a/docs/Users_Guide/appendixB.rst +++ b/docs/Users_Guide/appendixB.rst @@ -25,6 +25,8 @@ The following map projections are currently supported in MET: * Semi Lat/Lon +.. _App_B-grid_specification_strings: + Grid Specification Strings ========================== diff --git a/docs/Users_Guide/appendixF.rst b/docs/Users_Guide/appendixF.rst index c490cc07e3..4a8fe86cae 100644 --- a/docs/Users_Guide/appendixF.rst +++ b/docs/Users_Guide/appendixF.rst @@ -7,118 +7,182 @@ Appendix F Python Embedding Introduction ============ -MET includes the ability to embed Python to a limited degree. Users may use Python scripts and whatever associated Python packages they wish in order to prepare 2D gridded data fields, point observations, and matched pairs as input to the MET tools. We fully expect that this degree of embedding will increase in the future. In addition, plans are in place to extend Python with MET in upcoming releases, allowing users to invoke MET tools directly from their Python script. While MET version 8.0 was built on Python 2.x, MET versions 9.0 and beyond are built on Python 3.6+. +MET includes the ability to embed Python to a limited degree. Users may use their own Python scripts and any associated Python packages they wish in order to prepare 2D gridded data fields, point observations, and matched pairs as input to the MET tools. We fully expect that this degree of embedding will increase in the future. In addition, plans are in place to extend Python with MET in upcoming releases, allowing users to invoke MET tools directly from their Python script. While MET version 8.0 was built on Python 2.x, MET versions 9.0 and beyond are built on Python 3.6+. .. _compiling_python_support: -Compiling Python Support -======================== +Compiling MET for Python Embedding +================================== -In order to use Python embedding, the user's local Python installation must have the C-language Python header files and libraries. Sometimes when Python is installed locally, these header files and libraries are deleted at the end of the installation process, leaving only the binary executable and run-time shared object files. But the Python header files and libraries must be present to compile support in MET for Python embedding. Assuming the requisite Python files are present, and that Python embedding is enabled when building MET (which is done by passing the **--enable-python** option to the **configure** command line), the MET C++ code will use these in the compilation process to link directly to the Python libraries. +In order to use Python embedding, a local Python installation must be available when compiling the MET software with the following requirements: -The local Python installation must also support a minimum set of required packages. The MET build includes some python wrapper scripts to facilitate the passing of data in memory as well as the reading and writing of temporary files. The packages required by those wrapper scripts are **sys, os, argparse, importlib, numpy and netCDF4**. While most of these are standard packages and readily available, numpy and netCDF4 may not be. Users are advised to confirm their availability prior to compiling MET with python embedding support. +1. Python version 3.10.4+ -In addition to the **configure** option mentioned above, three variables, **MET_PYTHON_BIN_EXE**, **MET_PYTHON_CC**, and **MET_PYTHON_LD**, must also be set for the configuration process. These may either be set as environment variables or as command line options to **configure**. These constants are passed as compiler command line options when building MET to enable the compiler to find the requisite Python executable, header files, and libraries in the user's local filesystem. Fortunately, Python provides a way to set these variables properly. This frees the user from the necessity of having any expert knowledge of the compiling and linking process. Along with the **Python** executable, there should be another executable called **python3-config**, whose output can be used to set these environment variables as follows: +2. C-language Python header files and libraries -• Set **MET_PYTHON_BIN_EXE** to the full path of the desired python executable. +3. **NumPy** Python package -• On the command line, run "**python3-config --cflags**". Set the value of **MET_PYTHON_CC** to the output of that command. +4. **netCDF4** Python package -• Again on the command line, run "**python3-config --ldflags**". Set the value of **MET_PYTHON_LD** to the output of that command. +5. **Pandas** Python package -Make sure that these are set as environment variables or that you have included them on the command line prior to running **configure**. +6. **Xarray** Python package +Users should be aware that in some cases, the C-language Python header files and libraries may be deleted at the end of the Python installation process, and they may need to confirm their availability prior to compiling MET. Once the user has confirmed the above requirements are satisfied, they can compile the MET software for Python embedding by passing the **\-\-enable-python** option to the **configure** script on the command line. This will link the MET C++ code directly to the Python libraries. The **NumPy** and **netCDF4** Python packages are required by the Python scripts included with the MET software that facilitate the passing of data in memory and the reading and writing of temporary files when Python embedding is used. -MET_PYTHON_EXE -============== +In addition to using **\-\-enable-python** with **configure** as mentioned above, the following environment variables must also be set prior to executing **configure**: **MET_PYTHON_BIN_EXE**, **MET_PYTHON_CC**, and **MET_PYTHON_LD**. These may either be set as environment variables or as command line options to **configure**. These environment variables are used when building MET to enable the compiler to find the requisite Python executable, header files, and libraries in the user's local filesystem. Fortunately, Python provides a way to set these variables properly. This frees the user from the necessity of having any expert knowledge of the compiling and linking process. Along with the **Python** executable in the users local Python installation, there should be another executable called **python3-config**, whose output can be used to set these environment variables as follows: -When Python embedding support is compiled, MET instantiates the Python interpreter directly. However, for users of highly configurable Conda environments, the Python instance set at compilation time may not be sufficient. Users may want to switch between Conda environments for which different packages are available. MET version 9.0 has been enhanced to address this need. +• Set **MET_PYTHON_BIN_EXE** to the full path of the desired Python executable. -The types of Python embedding supported in MET are described below. In all cases, by default, the compiled Python instance is used to execute the Python script. If the packages that script imports are not available for the compiled Python instance, users will encounter a runtime error. In the event of a runtime error, users are advised to set the **MET_PYTHON_EXE** environment variable and rerun. This environment variable should be set to the full path to the version of Python you would like to use. See an example below. +• On the command line, run "**python3-config \-\-cflags**". Set the value of **MET_PYTHON_CC** to the output of that command. + +• Again on the command line, run "**python3-config \-\-ldflags \-\-embed**". Set the value of **MET_PYTHON_LD** to the output of that command. + +Make sure that these are set as environment variables or that you have included them on the command line prior to running **configure** + +If a user attempts to invoke Python embedding with a version of MET that was not compiled with Python, MET will return an ERROR: + +.. code-block:: none + :caption: MET Errors Without Python Enabled + + ERROR : Met2dDataFileFactory::new_met_2d_data_file() -> Support for Python has not been compiled! + ERROR : To run Python scripts, recompile with the --enable-python option. + + - or - + + ERROR : process_point_obs() -> Support for Python has not been compiled! + ERROR : To run Python scripts, recompile with the --enable-python option. + +Controlling Which Python MET Uses When Running +============================================== + +When MET is compiled with Python embedding support, MET uses the Python executable in that Python installation by default when Python embedding is used. However, for users of highly configurable Python environments, the Python instance set at compilation time may not be sufficient. Users may want to use an alternate Python installation if they need additional packages not available in the Python installation used when compiling MET. In MET versions 9.0+, users have the ability to use a different Python executable when running MET than the version used when compiling MET by setting the environment variable **MET_PYTHON_EXE**. + +If a user's Python script requires packages that are not available in the Python installation used when compiling the MET software, they will encounter a runtime error when using MET. In this instance, the user will need to change the Python MET is using to a different installation with the required packages for their script. It is the responsibility of the user to manage this Python installation, and one popular approach is to use a custom Anaconda (Conda) Python environment. Once the Python installation meeting the user's requirements is available, the user can force MET to use it by setting the **MET_PYTHON_EXE** environment variable to the full path of the Python executable in that installation. For example: .. code-block:: none + :caption: Setting MET_PYTHON_EXE - export MET_PYTHON_EXE=/usr/local/python3/bin/python3 + export MET_PYTHON_EXE=/usr/local/python3/bin/python3 -Setting this environment variable triggers slightly different processing logic in MET. Rather than executing the user-specified script with compiled Python instance directly, MET does the following: +Setting this environment variable triggers slightly different processing logic in MET than when MET uses the Python installation that was used when compiling MET. When using the Python installation that was used when compiling MET, Python is called directly and data are passed in memory from Python to the MET tools. When the user sets **MET_PYTHON_EXE**, MET does the following: 1. Wrap the user's Python script and arguments with a wrapper script (write_tmp_mpr.py, write_tmp_point.py, or write_tmp_dataplane.py) and specify the name of a temporary file to be written. 2. Use a system call to the **MET_PYTHON_EXE** Python instance to execute these commands and write the resulting data objects to a temporary ASCII or NetCDF file. -3. Use the compiled Python instance to run a wrapper script (read_tmp_ascii.py or read_tmp_dataplane.py) to read data from that temporary file. +3. Use the Python instance that MET was compiled with to run a wrapper script (read_tmp_ascii.py or read_tmp_dataplane.py) to read data from that temporary file. -With this approach, users should be able to execute Python scripts in their own custom environments. +With this approach, users are able to execute Python scripts using their own custom Python installations. -.. _pyembed-2d-data: +.. _pyembed-data-structures: -Python Embedding for 2D data -============================ +Data Structures Supported by Python Embedding +============================================= -We now describe how to write Python scripts so that the MET tools may extract 2D gridded data fields from them. Currently, MET offers two ways to interact with Python scripts: by using NumPy N-dimensional arrays (ndarrays) or by using Xarray DataArrays. The interface to be used (NumPy or Xarray) is specified on the command line (more on this later). The user's scripts can use any Python libraries that are supported by the local Python installation, or any personal or institutional libraries or code that are desired in order to implement the Python script, so long as the data has been loaded into either a NumPy ndarray or an Xarray DataArray by the end of the script. This offers advantages when using data file formats that MET does not directly support. If there is Python code to read the data format, the user can use those tools to read the data, and then copy the data into a NumPy ndarray or an Xarray DataArray. MET can then ingest the data via the Python script. Note that whether a NumPy ndarray or an Xarray DataArray is used, the data should be stored as double precision floating point numbers. Using different data types, such as integers or single precision floating point numbers, will lead to unexpected results in MET. +Python embedding with MET tools offers support for three different types of data structures: -**Using NumPy N-dimensional Arrays** +1. Two-dimensional (2D) gridded dataplanes -The data must be loaded into a 2D NumPy ndarray named **met_data**. In addition there must be a Python dictionary named **attrs** which contains metadata such as timestamps, grid projection and other information. Here is an example **attrs** dictionary: +2. Point data conforming to the :ref:`MET 11-column format` -.. code-block:: none +3. Matched-pair data conforming to the :ref:`MET MPR Line Type` - attrs = { - - 'valid': '20050807_120000', - 'init': '20050807_000000', - 'lead': '120000', - 'accum': '120000', - - 'name': 'Foo', - 'long_name': 'FooBar', - 'level': 'Surface', - 'units': 'None', - - # Define 'grid' as a string or a dictionary - - 'grid': { - 'type': 'Lambert Conformal', - 'hemisphere': 'N', - 'name': 'FooGrid', - 'scale_lat_1': 25.0, - 'scale_lat_2': 25.0, - 'lat_pin': 12.19, - 'lon_pin': -135.459, - 'x_pin': 0.0, - 'y_pin': 0.0, - 'lon_orient': -95.0, - 'd_km': 40.635, - 'r_km': 6371.2, - 'nx': 185, - 'ny': 129, - } - - } - -In the **attrs** dictionary, valid time, initialization time, lead time and accumulation time (if any) must be indicated by strings. Valid and initialization times must be given in YYYYMMDD[_HH[MMSS]] format, and lead and accumulation times must be given in HH[MMSS] format, where the square brackets indicate optional elements. The dictionary must also include strings for the name, long_name, level, and units to describe the data. The rest of the **attrs** dictionary gives the grid size and projection information in the same format that is used in the netCDF files written out by the MET tools. Those entries are also listed below. Note that the **grid** entry in the **attrs** dictionary can either be defined as a string or as a dictionary itself. - -If specified as a string, **grid** can be defined as follows: - -• As a named grid: +Details for each of these data structures are provided below. + +.. note:: + + All sample commands and directories listed below are relative to the top level of the MET source code directory. + +.. _pyembed-2d-data: + +Python Embedding for 2D Gridded Dataplanes +------------------------------------------ + +Currently, MET supports two different types of Python objects for two-dimensional gridded dataplanes: NumPy N-dimensional arrays (ndarrays) and Xarray DataArrays. The keyword **PYTHON_NUMPY** is used on the command line when using ndarrays, and **PYTHON_XARRAY** when using Xarray DataArrays. Example commands are included at the end of this section. + +Python Script Requirements for 2D Gridded Dataplanes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +1. The data must be stored in a variable with the name **met_data** + +2. The **met_data** variable must be of type **Xarray DataArray** or **NumPy N-D Array** + +3. The data inside the **met_data** variable must be **double precision floating point** type + +4. A Python dictionary named **attrs** must be defined in the user's script and contain the :ref:`required attributes` + +.. _pyembed-2d-attrs: + +Required Attributes for 2D Gridded Dataplanes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The **attrs** dictionary must contain the following information: + +.. list-table:: 2D Dataplane Attributes + :widths: 5 5 10 + :header-rows: 1 + + * - key + - description + - data type/format + * - valid + - valid time + - string (YYYYMMDD_HHMMSS) + * - init + - initialization time + - string (YYYYMMDD_HHMMSS) + * - lead + - forecast lead + - string (HHMMSS) + * - accum + - accumulation interval + - string (HHMMSS) + * - name + - variable name + - string + * - long_name + - variable long name + - string + * - level + - variable level + - string + * - units + - variable units + - string + * - grid + - grid information + - string or dict + +.. note:: + + Often times Xarray DataArray objects come with their own set of attributes available as a property. To avoid conflict with the required attributes + for MET, it is advised to strip these attributes and rely on the **attrs** dictionary defined in your script. + +The grid entry in the **attrs** dictionary must contain the grid size and projection information in the same format that is used in the netCDF files written out by the MET tools. The value of this item in the dictionary can either be a string, or another dictionary. Examples of the **grid** entry defined as a string are: + +• Using a named grid supported by MET: .. code-block:: none + :caption: Named Grid - 'grid': 'G212' + 'grid': 'G212' • As a grid specification string, as described in :ref:`appendixB`: .. code-block:: none + :caption: Grid Specification String - 'grid': 'lambert 185 129 12.19 -133.459 -95 40.635 6371.2 25 25 N' + 'grid': 'lambert 185 129 12.19 -133.459 -95 40.635 6371.2 25 25 N' • As the path to an existing gridded data file: .. code-block:: none + :caption: Grid From File - 'grid': '/path/to/sample_data.grib' + 'grid': '/path/to/sample_data.grib' -When specified as a dictionary, the contents of the **grid** dictionary vary based on the grid **type** string. The entries for the supported grid types are described below: +When specified as a dictionary, the contents of the **grid** entry vary based upon the grid **type**. The required elements for supported grid types are: • **Lambert Conformal** grid dictionary entries: @@ -188,103 +252,331 @@ When specified as a dictionary, the contents of the **grid** dictionary vary bas Additional information about supported grids can be found in :ref:`appendixB`. -**Using Xarray DataArrays** +Finally, an example **attrs** dictionary is shown below: -To use Xarray DataArrays, a similar procedure to the NumPy case is followed. The Xarray DataArray can be represented as a NumPy N-dimensional array (ndarray) via the **values** property of the DataArray, and an **attrs** property that contains a dictionary of attributes. The user must name the Xarray DataArray to be **met_data**. When one of the MET tools runs the Python script, it will look for an Xarray DataArray named **met_data**, and will retrieve the data and metadata from the **values** and **attrs** properties, respectively, of the Xarray DataArray. The Xarray DataArray **attrs** dictionary is populated in the same way as for the NumPy interface (please see :ref:`pyembed-2d-data` for requirements of each entry in the **attrs** dictionary). The **values** NumPy ndarray property of the Xarray DataArray is also populated in the same way as the NumPy case. +.. code-block:: none + :caption: Sample Attrs Dictionary + + attrs = { + + 'valid': '20050807_120000', + 'init': '20050807_000000', + 'lead': '120000', + 'accum': '120000', + + 'name': 'Foo', + 'long_name': 'FooBar', + 'level': 'Surface', + 'units': 'None', + + # Define 'grid' as a string or a dictionary + + 'grid': { + 'type': 'Lambert Conformal', + 'hemisphere': 'N', + 'name': 'FooGrid', + 'scale_lat_1': 25.0, + 'scale_lat_2': 25.0, + 'lat_pin': 12.19, + 'lon_pin': -135.459, + 'x_pin': 0.0, + 'y_pin': 0.0, + 'lon_orient': -95.0, + 'd_km': 40.635, + 'r_km': 6371.2, + 'nx': 185, + 'ny': 129, + } + } + +Running Python Embedding for 2D Gridded Dataplanes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +On the command line for any of the MET tools which will be obtaining its data from a Python script rather than directly from a data file, the user should specify either **PYTHON_NUMPY** or **PYTHON_XARRAY** wherever a (forecast or observation) data file would normally be given. Then in the **name** entry of the config file dictionaries for the forecast or observation data (typically used to specify the field name from the input data file), the user should list the **full path** to the Python script to be run followed by any command line arguments for that script. Note that for tools like MODE that take two data files, it is entirely possible to use the **PYTHON_NUMPY** for one file and the **PYTHON_XARRAY** for the other. + +Listed below is an example of running the Plot-Data-Plane tool to call a Python script for data that is included with the MET release tarball. Assuming the MET executables are in your path, this example may be run from the top-level MET source code directory: -.. note:: - Currently, MET does not support Xarray Dataset structures. If you have a Dataset in Xarray, you can create a DataArray of a single variable using: +.. code-block:: none + :caption: plot_data_plane Python Embedding - met_data = xr.DataArray(ds.varname,attrs=ds.attrs) + plot_data_plane PYTHON_NUMPY fcst.ps \ + 'name="scripts/python/examples/read_ascii_numpy.py data/python/fcst.txt FCST";' \ + -title "Python enabled plot_data_plane" + +The first argument for the Plot-Data-Plane tool is the gridded data file to be read. When calling Python script that has a two-dimensional gridded dataplane stored in a NumPy N-D array object, set this to the constant string **PYTHON_NUMPY**. The second argument is the name of the output PostScript file to be written. The third argument is a string describing the data to be plotted. When calling a Python script, set **name** to the full path of the Python script to be run along with any command line arguments for that script. Lastly, the **-title** option is used to add a title to the plot. Note that any print statements included in the Python script will be printed to the screen. The above example results in the following log messages: - | ds = your Dataset name - | varname = variable name in the Dataset you'd like to use in MET +.. code-block:: none + + DEBUG 1: Opening data file: PYTHON_NUMPY + Input File: 'data/python/fcst.txt' + Data Name : 'FCST' + Data Shape: (129, 185) + Data Type: dtype('float64') + Attributes: {'name': 'FCST', 'long_name': 'FCST_word', + 'level': 'Surface', 'units': 'None', + 'init': '20050807_000000', 'valid': '20050807_120000', + 'lead': '120000', 'accum': '120000' + 'grid': {...} } + DEBUG 1: Creating postscript file: fcst.ps -__________________ +Special Case for Ensemble-Stat, Series-Analysis, and MTD +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -It remains to discuss command lines and config files. Two methods for specifying the Python command and input file name are supported. +The Ensemble-Stat, Series-Analysis, MTD and Gen-Ens-Prod tools all have the ability to read multiple input files. Because of this feature, a different approach to Python embedding is required. A typical use of these tools is to provide a list of files on the command line. For example: -**Python Embedding Option 1:** +.. code-block:: + :caption: Gen-Ens-Prod Command Line -On the command line for any of the MET tools which will be obtaining its data from a Python script rather than directly from a data file, the user should specify either PYTHON_NUMPY or PYTHON_XARRAY wherever a (forecast or observation) data file name would normally be given. Then in the **name** entry of the config file dictionaries for the forecast or observation data, the user should list the Python script to be run followed by any command line arguments for that script. Note that for tools like MODE that take two data files, it would be entirely possible to use the NumPy interface for one file and the Xarray interface for the other. + gen_ens_prod ens1.nc ens2.nc ens3.nc ens4.nc -out ens_prod.nc -config GenEnsProd_config -___________________ +In this case, a user is passing 4 ensemble members to Gen-Ens-Prod to be evaluated, and each member is in a separate file. If a user wishes to use Python embedding to process the ensemble input files, then the same exact command is used however special modifications inside the GenEnsProd_config file are needed. In the config file dictionary, the user must set the **file_type** entry to either **PYTHON_NUMPY** or **PYTHON_XARRAY** to activate the Python embedding for these tools. Then, in the **name** entry of the config file dictionaries for the forecast or observation data, the user must list the **full path** to the Python script to be run. However, in the Python command, replace the name of the input gridded data file to the Python script with the constant string **MET_PYTHON_INPUT_ARG**. When looping over all of the input files, the MET tools will replace that constant **MET_PYTHON_INPUT_ARG** with the path to the input file currently being processed and optionally, any command line arguments for the Python script. Here is what this looks like in the GenEnsProd_config file for the above example: -Listed below is an example of running the Plot-Data-Plane tool to call a Python script for data that is included with the MET release tarball. Assuming the MET executables are in your path, this example may be run from the top-level MET source code directory. +.. code-block:: + :caption: Gen-Ens-Prod MET_PYTHON_INPUT_ARG Config -.. code-block:: none + file_type = PYTHON_NUMPY; + field = [ { name = "gen_ens_prod_pyembed.py MET_PYTHON_INPUT_ARG"; } ]; - plot_data_plane PYTHON_NUMPY fcst.ps \ - 'name="scripts/python/read_ascii_numpy.py data/python/fcst.txt FCST";' \ - -title "Python enabled plot_data_plane" - -The first argument for the Plot-Data-Plane tool is the gridded data file to be read. When calling a NumPy Python script, set this to the constant string PYTHON_NUMPY. The second argument is the name of the output PostScript file to be written. The third argument is a string describing the data to be plotted. When calling a Python script, set **name** to the Python script to be run along with command line arguments. Lastly, the **-title** option is used to add a title to the plot. Note that any print statements included in the Python script will be printed to the screen. The above example results in the following log messages. +In the event the user requires command line arguments to their Python script, they must be included alongside the file names separated by a delimiter. For example, the above Gen-Ens-Prod command with command line arguments for Python would look like: -.. code-block:: none - - DEBUG 1: Opening data file: PYTHON_NUMPY - Input File: 'data/python/fcst.txt' - Data Name : 'FCST' - Data Shape: (129, 185) - Data Type: dtype('float64') - Attributes: {'name': 'FCST', 'long_name': 'FCST_word', - 'level': 'Surface', 'units': 'None', - 'init': '20050807_000000', 'valid': '20050807_120000', - 'lead': '120000', 'accum': '120000' - 'grid': {...} } - DEBUG 1: Creating postscript file: fcst.ps +.. code-block:: + :caption: Gen-Ens-Prod Command Line with Python Args + + gen_ens_proce ens1.nc,arg1,arg2 ens2.nc,arg1,arg2 ens3.nc,arg1,arg2 ens4.nc,arg1,arg2 \ + -out ens_prod.nc -config GenEnsProd_config + +In this case, the user's Python script will receive "ens1.nc,arg1,arg2" as a single command line argument for each execution of the Python script (i.e. 1 time per file). The user must parse this argument inside their Python script to obtain **arg1** and **arg2** as separate arguments. The list of input files and optionally, any command line arguments can be written to a single file called **file_list** that is substituted for the file names and command line arguments. For example: + +.. code-block:: + :caption: Gen-Ens-Prod File List -**Python Embedding Option 2 using MET_PYTHON_INPUT_ARG:** + echo "ens1.nc,arg1,arg2 ens2.nc,arg1,arg2 ens3.nc,arg1,arg2 ens4.nc,arg1,arg2" > file_list + gen_ens_prod file_list -out ens_prod.nc -config GenEnsProd_config -The second option was added to support the use of Python embedding in tools which read multiple input files. Option 1 reads a single field of data from a single source, whereas tools like Ensemble-Stat, Series-Analysis, and MTD read data from multiple input files. While option 2 can be used in any of the MET tools, it is required for Python embedding in Ensemble-Stat, Series-Analysis, and MTD. +Finally, the above tools do not require data files to be present on a local disk. If the user wishes, their Python script can obtain data from other sources based upon only the command line arguments to their Python script. For example: -On the command line for any of the MET tools, specify the path to the input gridded data file(s) as the usage statement for the tool indicates. Do **not** substitute in PYTHON_NUMPY or PYTHON_XARRAY on the command line. In the config file dictionary set the **file_type** entry to either PYTHON_NUMPY or PYTHON_XARRAY to activate the Python embedding logic. Then, in the **name** entry of the config file dictionaries for the forecast or observation data, list the Python script to be run followed by any command line arguments for that script. However, in the Python command, replace the name of the input gridded data file with the constant string MET_PYTHON_INPUT_ARG. When looping over multiple input files, the MET tools will replace that constant **MET_PYTHON_INPUT_ARG** with the path to the file currently being processed. The example plot_data_plane command listed below yields the same result as the example shown above, but using the option 2 logic instead. +.. code-block:: + :caption: Gen-Ens-Prod Python Args Only -The Ensemble-Stat, Series-Analysis, and MTD tools support the use of file lists on the command line, as do some other MET tools. Typically, the ASCII file list contains a list of files which actually exist on your machine and should be read as input. For Python embedding, these tools loop over the ASCII file list entries, set MET_PYTHON_INPUT_ARG to that string, and execute the Python script. This only allows a single command line argument to be passed to the Python script. However multiple arguments may be concatenated together using some delimiter, and the Python script can be defined to parse arguments using that delimiter. When file lists are constructed in this way, the entries will likely not be files which actually exist on your machine. In this case, users should place the constant string "file_list" on the first line of their ASCII file lists. This will ensure that the MET tools will parse the file list properly. + gen_ens_prod 20230101,0 20230102,0 20230103,0 -out ens_prod.nc -confg GenEnsProd_config + +In the above command, each of the arguments "20230101,0", "20230102,0", and "20230103,0" are provided to the user's Python script in separate calls. Then, inside the Python script these arguments are used to construct a filename or query to a data server or other mechanism to return the desired data and format it the way MET expects inside the Python script, prior to calling Gen-Ens-Prod. + +Examples of Python Embedding for 2D Gridded Dataplanes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**Grid-Stat with Python embedding for forecast and observations** .. code-block:: none - - plot_data_plane data/python/fcst.txt fcst.ps \ - 'name="scripts/python/read_ascii_numpy.py MET_PYTHON_INPUT_ARG FCST"; \ - file_type=PYTHON_NUMPY;' \ - -title "Python enabled plot_data_plane" + :caption: GridStat Command with Dual Python Embedding + + grid_stat 'PYTHON_NUMPY' 'PYTHON_NUMPY' GridStat_config -outdir /path/to/output + +.. code-block:: none + :caption: GridStat Config with Dual Python Embedding + + fcst = { + field = [ + { + name = "/path/to/fcst/python/script.py python_arg1 python_arg2"; + } + ]; + } + + obs = { + field = [ + { + name = "/path/to/obs/python/script.py python_arg1 python_arg2"; + } + ]; + } .. _pyembed-point-obs-data: Python Embedding for Point Observations -======================================= - -The ASCII2NC tool supports the "-format python" option. With this option, point observations may be passed as input. An example of this is shown below: +--------------------------------------- + +MET also supports point observation data supplied in the :ref:`MET 11-column format`. + +Python Script Requirements for Point Observations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +1. The data must be stored in a variable with the name **point_data** + +2. The **point_data** variable must be a Python list representation of a NumPy N-D Array created from a Pandas DataFrame + +3. The **point_data** variable must have data in each of the 11 columns required for the MET tools even if it is NA + +To provide the data that MET expects for point observations, the user is encouraged when designing their Python script to consider how to map their observations into the MET 11-column format. Then, the user can populate their observations into a Pandas DataFrame with the following column names and dtypes: + +.. list-table:: Point Observation DataFrame Columns and Dtypes + :widths: 5 5 10 + :header-rows: 1 + + * - column name + - data type (dtype) + - description + * - typ + - string + - Message Type + * - sid + - string + - Station ID + * - vld + - string + - Valid Time (YYYYMMDD_HHMMSS) + * - lat + - numeric + - Latitude (Degrees North) + * - lon + - numeric + - Longitude (Degrees East) + * - elv + - numeric + - Elevation (MSL) + * - var + - string + - Variable name (or GRIB code) + * - lvl + - numeric + - Level + * - hgt + - numeric + - Height (MSL or AGL) + * - qc + - string + - QC string + * - obs + - numeric + - Observation Value + +To create the variable for MET, use the **.values** property of the Pandas DataFrame and the **.tolist()** method of the NumPy N-D Array. For example: + +.. code-block:: Python + :caption: Convert Pandas DataFrame to MET variable + + # Pandas DataFrame + my_dataframe = pd.DataFrame() + + # Convert to MET variable + point_data = my_dataframe.values.tolist() + +Running Python Embedding for Point Observations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Point2Grid, Plot-Point-Obs, Ensemble-Stat, and Point-Stat tools support Python embedding for point observations. Python embedding for these tools can be invoked directly on the command line by replacing the input MET NetCDF point observation file name with the **full path** to the Python script and any arguments. The Python command must begin with the prefix **PYTHON_NUMPY=**. The full command should be enclosed in quotes to prevent embedded whitespace from causing parsing errors. An example of this is shown below for Plot-Point-Obs: .. code-block:: none + :caption: plot_point_obs with Python Embedding - ascii2nc -format python \ - "MET_BASE/python/read_ascii_point.py sample_ascii_obs.txt" \ - sample_ascii_obs_python.nc + plot_point_obs \ + "PYTHON_NUMPY=scripts/python/examples/read_ascii_point.py data/sample_obs/ascii/sample_ascii_obs.txt" \ + output_image.ps -The Point2Grid, Plot-Point-Obs, Ensemble-Stat, and Point-Stat tools also process point observations. They support Python embedding of point observations directly on the command line by replacing the input MET NetCDF point observation file name with the Python command to be run. The Python command must begin with the prefix 'PYTHON_NUMPY=' and be followed by the path to the User's Python script and any arguments. The full command should be enclosed in single quotes to prevent embedded whitespace from causing parsing errors. An example of this is shown below: +The ASCII2NC tool also supports Python embedding, however invoking it varies slightly from other MET tools. For ASCII2NC, Python embedding is used by providing the "-format python" option on the command line. With this option, point observations may be passed as input. An example of this is shown below: .. code-block:: none + :caption: ascii2nc with Python Embedding + + ascii2nc -format python \ + "scripts/python/examples/read_ascii_point.py data/sample_obs/ascii/sample_ascii_obs.txt" \ + sample_ascii_obs_python.nc + +Both of the above examples use the **read_ascii_point.py** example script which is included with the MET code. It reads ASCII data in MET's 11-column point observation format and stores it in a Pandas DataFrame to be read by the MET tools using Python embedding for point data. The **read_ascii_point.py** example script can be found in: - plot_point_obs \ - "PYTHON_NUMPY=MET_BASE/python/read_ascii_point.py sample_ascii_obs.txt" \ - output_image.ps +• MET installation directory in *scripts/python/examples*. -Both of the above examples use the **read_ascii_point.py** sample script which is included with the MET code. It reads ASCII data in MET's 11-column point observation format and stores it in a Pandas DataFrame to be read by the MET tools using Python embedding for point data. The **read_ascii_point.py** sample script can be found in: +• `MET GitHub repository `_ in *scripts/python/examples*. -• MET installation directory in *MET_BASE/python*. +Examples of Python Embedding for Point Observations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -• `MET GitHub repository `_ in *met/scripts/python*. +**Point-Stat with Python embedding for forecast and observations** + +.. code-block:: none + :caption: PointStat Command with Dual Python Embedding + + point_stat 'PYTHON_NUMPY' 'PYTHON_NUMPY=/path/to/obs/python/script.py python_arg1 python_arg2' PointStat_config -outdir /path/to/output + +.. code-block:: none + :caption: PointStat Config with Dual Python Embedding + + fcst = { + field = [ + { + name = "/path/to/fcst/python/script.py python_arg1 python_arg2"; + } + ]; + } .. _pyembed-mpr-data: -Python Embedding for MPR data -============================= +Python Embedding for MPR Data +----------------------------- + +The MET Stat-Analysis tool also supports Python embedding. By using the command line option **-lookin python**, Stat-Analysis can read matched pair (MPR) data formatted in the MET MPR line-type format via Python. + +.. note:: + + This functionality assumes you are passing only the MPR line type information, and not other statistical line types. Sometimes users configure MET tools to write the MPR line type to the STAT file (along with all other line types). The example below will not work for those files, but rather only files from MET tools containing just the MPR line type information, or optionally, data in another format that the user adapts to the MPR line type format. + +Python Script Requirements for MPR Data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +1. The data must be stored in a variable with the name **mpr_data** + +2. The **mpr_data** variable must be a Python list representation of a NumPy N-D Array created from a Pandas DataFrame + +3. The **met_data** variable must have data in **exactly** 36 columns, corresponding to the summation of the :ref:`common STAT output` and the :ref:`MPR line type output`. + +If a user does not have an existing MPR line type file created by the MET tools, they will need to map their data into the 36 columns expected by Stat-Analysis for the MPR line type data. If a user already has MPR line type files, the most direct way for a user to read MPR line type data is to model their Python script after the sample **read_ascii_mpr.py** script. Sample code is included here for convenience: + +.. code-block:: Python + :caption: Reading MPR line types with Pandas + + # Open the MPR line type file + mpr_dataframe = pd.read_csv(input_mpr_file,\ + header=None,\ + delim_whitespace=True,\ + keep_default_na=False,\ + skiprows=1,\ + usecols=range(1,36),\ + dtype=str) + + # Convert to the variable MET expects + mpr_data = mpr_dataframe.values.tolist() + +Running Python Embedding for MPR Data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The Stat-Analysis tool supports the "-lookin python" option. With this option, matched pair (MPR) data may be passed as input. An example of this is provided in :numref:`StA-pyembed`. That example uses the **read_ascii_mpr.py** sample script which is included with the MET code. It reads MPR data and stores it in a Pandas dataframe to be read by the Stat-Analysis tool with Python. +Stat-Analysis can be run using the **-lookin python** command line option: + +.. code-block:: none + :caption: Stat-Analysis with Python Embedding of MPR Data + + stat_analysis \ + -lookin python scripts/python/examples/read_ascii_mpr.py point_stat_mpr.txt \ + -job aggregate_stat -line_type MPR -out_line_type CNT \ + -by FCST_VAR,FCST_LEV + +In this example, rather than passing the MPR output lines from Point-Stat directly into Stat-Analysis (which is the typical approach), the **read_ascii_mpr.py** Python embedding script reads that file and passes the data to Stat-Analysis. The aggregate_stat job is defined on the command line and CNT statistics are derived from the MPR input data. Separate CNT statistics are computed for each unique combination of FCST_VAR and FCST_LEV present in the input. The **read_ascii_mpr.py** sample script can be found in: -• MET installation directory in *MET_BASE/python*. +• MET installation directory in *scripts/python/examples*. + +• `MET GitHub repository `_ in *MET/scripts/python/examples*. + +MET Python Package +================== + +MET comes with a Python package that provides core functionality for the Python embedding capability. In rare cases, advanced users may find the classes and functions included with this Python package useful. + +To utilize the MET Python package **standalone** when NOT using it with Python embedding, users must add the following to their **PYTHONPATH** environment variable: + +.. code-block:: + :caption: MET Python Module PYTHONPATH + + export PYTHONPATH={MET_INSTALL_DIR}/share/met/python -• `MET GitHub repository `_ in *met/scripts/python*. +where {MET_INSTALL_DIR} is the top level directory where MET is installed, for example **/usr/local/met**. diff --git a/docs/Users_Guide/config_options.rst b/docs/Users_Guide/config_options.rst index 93a7a96229..3239becefc 100644 --- a/docs/Users_Guide/config_options.rst +++ b/docs/Users_Guide/config_options.rst @@ -1541,6 +1541,8 @@ Point-Stat and Ensemble-Stat, the reference time is the forecast valid time. end = 5400; } +.. _config_options-mask: + mask ^^^^ @@ -1562,14 +1564,26 @@ in the following ways: * The "poly" entry contains a comma-separated list of files that define verification masking regions. These masking regions may be specified in - two ways: as a lat/lon polygon or using a gridded data file such as the - NetCDF output of the Gen-Vx-Mask tool. + two ways: in an ASCII file containing lat/lon points defining the mask polygon, + or using a gridded data file such as the NetCDF output of the Gen-Vx-Mask tool. + Some details for each of these options are described below: + + * If providing an ASCII file containing the lat/lon points defining the mask + polygon, the file must contain a name for the region followed by the latitude + (degrees north) and longitude (degrees east) for each vertex of the polygon. + The values are separated by whitespace (e.g. spaces or newlines), and the + first and last polygon points are connected. + The general form is "poly_name lat1 lon1 lat2 lon2... latn lonn". + Here is an example of a rectangle consisting of 4 points: + + .. code-block:: none + :caption: ASCII Rectangle Polygon Mask - * An ASCII file containing a lat/lon polygon. - Latitude in degrees north and longitude in degrees east. - The first and last polygon points are connected. - For example, "MET_BASE/poly/EAST.poly" which consists of n points: - "poly_name lat1 lon1 lat2 lon2... latn lonn" + RECTANGLE + 25 -120 + 55 -120 + 55 -70 + 25 -70 Several masking polygons used by NCEP are predefined in the installed *share/met/poly* directory. Creating a new polygon is as @@ -1582,7 +1596,8 @@ in the following ways: observation point falls within the polygon defined is done in x/y grid space. - * The NetCDF output of the gen_vx_mask tool. + * The NetCDF output of the gen_vx_mask tool. Please see :numref:`masking` + for more details. * Any gridded data file that MET can read may be used to define a verification masking region. Users must specify a description of the @@ -1591,7 +1606,7 @@ in the following ways: applied, any grid point where the resulting field is 0, the mask is turned off. Any grid point where it is non-zero, the mask is turned on. - For example, "sample.grib {name = \"TMP\"; level = \"Z2\";} >273" + For example, "sample.grib {name = \"TMP\"; level = \"Z2\";} >273" * The "sid" entry is an array of strings which define groups of observation station ID's over which to compute statistics. Each entry diff --git a/docs/Users_Guide/installation.rst b/docs/Users_Guide/installation.rst index 9db4d6993f..a198fb8fab 100644 --- a/docs/Users_Guide/installation.rst +++ b/docs/Users_Guide/installation.rst @@ -21,6 +21,8 @@ Programming Languages The MET package, including MET-TC, is written primarily in C/C++ in order to be compatible with an extensive verification code base in C/C++ already in existence. In addition, the object-based MODE and MODE-TD verification tools rely heavily on the object-oriented aspects of C++. Knowledge of C/C++ is not necessary to use the MET package. The MET package has been designed to be highly configurable through the use of ASCII configuration files, enabling a great deal of flexibility without the need for source code modifications. +With the release of MET-11.1.0, C++11 is now the minimum required version of the C++ programming language standard. + NCEP's BUFRLIB is written entirely in Fortran. The portion of MET that handles the interface to the BUFRLIB for reading PrepBUFR point observation files is also written in Fortran. The MET package is intended to be a tool for the modeling community to use and adapt. As users make upgrades and improvements to the tools, they are encouraged to offer those upgrades to the broader community by offering feedback to the developers. @@ -122,7 +124,7 @@ MET Directory Structure The top-level MET directory consists of Makefiles, configuration files, and several subdirectories. The top-level Makefile and configuration files control how the entire toolkit is built. Instructions for using these files to build MET can be found in :numref:`Install_Building-the-MET`. -When MET has been successfully built and installed, the installation directory contains two subdirectories. The *bin/* directory contains executables for each module of MET as well as several plotting utilities. The *share/met/* directory contains many subdirectories with data required at runtime and a subdirectory of sample R scripts utilities. The *colortables/*, *map/*, and *ps/* subdirectories contain data used in creating PostScript plots for several MET tools. The *poly/* subdirectory contains predefined lat/lon polyline regions for use in selecting regions over which to verify. The polylines defined correspond to verification regions used by NCEP as described in :numref:`Appendix B, Section %s `. The *config/* directory contains default configuration files for the MET tools. The *python/* subdirectory contains sample scripts used in Python embedding (:numref:`Appendix F, Section %s `). The *table_files/* and *tc_data/* subdirectories contain GRIB table definitions and tropical cyclone data, respectively. The *Rscripts/* subdirectory contains a handful of plotting graphic utilities for MET-TC. These are the same Rscripts that reside under the top-level MET *scripts/Rscripts* directory, other than it is the installed location. The *wrappers/* subdirectory contains code used in Python embedding (:numref:`Appendix F, Section %s `). +When MET has been successfully built and installed, the installation directory contains two subdirectories. The *bin/* directory contains executables for each module of MET as well as several plotting utilities. The *share/met/* directory contains many subdirectories with data required at runtime and a subdirectory of sample R scripts utilities. The *colortables/*, *map/*, and *ps/* subdirectories contain data used in creating PostScript plots for several MET tools. The *poly/* subdirectory contains predefined lat/lon polyline regions for use in selecting regions over which to verify. The polylines defined correspond to verification regions used by NCEP as described in :numref:`Appendix B, Section %s `. The *config/* directory contains default configuration files for the MET tools. The *python/* subdirectory contains python scripts. The *python/examples* subdirectory contains sample scripts used in Python embedding (:numref:`Appendix F, Section %s `). The *python/pyembed/* subdirectory contains code used in Python embedding (:numref:`Appendix F, Section %s `). The *table_files/* and *tc_data/* subdirectories contain GRIB table definitions and tropical cyclone data, respectively. The *Rscripts/* subdirectory contains a handful of plotting graphic utilities for MET-TC. These are the same Rscripts that reside under the top-level MET *scripts/Rscripts* directory, other than it is the installed location. The *data/* directory contains several configuration and static data files used by MET. The *sample_fcst/* and *sample_obs/* subdirectories contain sample data used by the test scripts provided in the *scripts/* directory. @@ -209,7 +211,7 @@ The following environment variables should also be set: MET_PYTHON_CC='-I/usr/include/python3.6' MET_PYTHON_LD='-L/usr/lib/python3.6/config-x86_64-linux-gnu -lpython3.6m' - Note that this version of Python must include support for a minimum set of required pacakges. For more information about Python support in MET, including the list of required packages, please refer to :numref:`Appendix F, Section %s `. + Note that this version of Python must include support for a minimum set of required packages. For more information about Python support in MET, including the list of required packages, please refer to :numref:`Appendix F, Section %s `. * If compiling MODIS-Regrid and/or lidar2nc, set $MET_HDF to point to the main HDF4 directory, or set $MET_HDFINC to point to the directory with the HDF4 include files and set $MET_HDFLIB to point to the directory with the HDF4 library files. Also, set $MET_HDFEOS to point to the main HDF EOS directory, or set $MET_HDFEOSINC to point to the directory with the HDF EOS include files and set $MET_HDFEOSLIB to point to the directory with the HDF EOS library files. diff --git a/docs/Users_Guide/masking.rst b/docs/Users_Guide/masking.rst index 4289128f43..a4e7345643 100644 --- a/docs/Users_Guide/masking.rst +++ b/docs/Users_Guide/masking.rst @@ -31,22 +31,23 @@ The usage statement for the Gen-Vx-Mask tool is shown below: [-height n] [-width n] [-shapeno n] + [-shape_str name string] [-value n] [-name string] [-log file] [-v level] [-compress level] -gen_vx_mask has four required arguments and can take optional ones. Note, -type string (masking type) was previously optional but is now required. +gen_vx_mask has four required arguments and can take optional ones. Note that **-type string** (masking type) was previously optional but is now required. Required arguments for gen_vx_mask ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -1. The **input_file** argument is a gridded data file which specifies the grid definition for the domain over which the masking bitmap is to be defined. If output from gen_vx_mask, automatically read mask data as the **input_field**. +1. The **input_grid** argument is a named grid, the path to a gridded data file, or an explicit grid specification string (see :numref:`App_B-grid_specification_strings`) which defines the grid for which a mask is to be defined. If set to a gen_vx_mask output file, automatically read mask data as the **input_field**. 2. The **mask_file** argument defines the masking information, see below. -• For "poly", "poly_xy", "box", "circle", and "track" masking, specify an ASCII Lat/Lon file. +• For "poly", "poly_xy", "box", "circle", and "track" masking, specify an ASCII Lat/Lon file. Refer to :ref:`Types_of_masking_gen_vx_mask` for details on how to construct the ASCII Lat/Lon file for each type of mask. • For "grid" and "data" masking, specify a gridded data file. @@ -58,7 +59,7 @@ Required arguments for gen_vx_mask 3. The **out_file** argument is the output NetCDF mask file to be written. -4. The **-type string** is required to set the masking type. The application will give an error message and exit if "-type string" is not specified on the command line. See description of supported types below. +4. The **-type string** is required to set the masking type. The application will give an error message and exit if "-type string" is not specified on the command line. See the description of supported types below. Optional arguments for gen_vx_mask ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -83,18 +84,24 @@ Optional arguments for gen_vx_mask 10. The **-height n** and **-width n** options set the size in grid units for "box" masking. -11. The **-shapeno n** option is only used for shapefile masking. (See description of shapefile masking below). +11. The **-shapeno n** option is only used for shapefile masking. See the description of shapefile masking below. -12. The **-value n** option can be used to override the default output mask data value (1). +12. The **-shape_str name string** option is only used for shapefile masking. See the description of shapefile masking below. -13. The **-name string** option can be used to specify the output variable name for the mask. +13. The **-value n** option can be used to override the default output mask data value (1). -14. The **-log file** option directs output and errors to the specified log file. All messages will be written to that file as well as standard out and error. Thus, users can save the messages without having to redirect the output on the command line. The default behavior is no log file. +14. The **-name string** option can be used to specify the output variable name for the mask. -15. The **-v level** option indicates the desired level of verbosity. The value of "level" will override the default setting of 2. Setting the verbosity to 0 will make the tool run with no log messages, while increasing the verbosity will increase the amount of logging. +15. The **-log file** option directs output and errors to the specified log file. All messages will be written to that file as well as standard out and error. Thus, users can save the messages without having to redirect the output on the command line. The default behavior is no log file. -16. The **-compress level** option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression. +16. The **-v level** option indicates the desired level of verbosity. The value of "level" will override the default setting of 2. Setting the verbosity to 0 will make the tool run with no log messages, while increasing the verbosity will increase the amount of logging. +17. The **-compress level** option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression. + +.. _Types_of_masking_gen_vx_mask: + +Types of masking available in gen_vx_mask +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The Gen-Vx-Mask tool supports the following types of masking region definition selected using the **-type** command line option: 1. Polyline (**poly**) masking reads an input ASCII file containing Lat/Lon locations, connects the first and last points, and selects grid points whose Lat/Lon location falls inside that polyline in Lat/Lon space. This option is useful when defining geographic subregions of a domain. @@ -115,7 +122,11 @@ The Gen-Vx-Mask tool supports the following types of masking region definition s 9. Latitude (**lat**) and longitude (**lon**) masking computes the latitude and longitude value at each grid point. This logic only requires the definition of the grid, specified by the **input_file**. Technically, the **mask_file** is not needed, but a value must be specified for the command line to parse correctly. Users are advised to simply repeat the **input_file** setting twice. If the **-thresh** command line option is not used, the raw latitude or longitude values for each grid point will be written to the output. This option is useful when defining latitude or longitude bands over which to compute statistics. -10. Shapefile (**shape**) masking uses a closed polygon taken from an ESRI shapefile to define the masking region. Gen-Vx-Mask reads the shapefile with the ".shp" suffix and extracts the latitude and longitudes of the vertices. The other types of shapefiles (index file, suffix ".shx", and dBASE file, suffix ".dbf") are not currently used. The shapefile must consist of closed polygons rather than polylines, points, or any of the other data types that shapefiles support. Shapefiles usually contain more than one polygon, and the **-shape n** command line option enables the user to select one polygon from the shapefile. The integer **n** tells which shape number to use from the shapefile. Note that this value is zero-based, so that the first polygon in the shapefile is polygon number 0, the second polygon in the shapefile is polygon number 1, etc. For the user's convenience, some utilities that perform human-readable screen dumps of shapefile contents are provided. The gis_dump_shp, gis_dump_shx and gis_dump_dbf tools enable the user to examine the contents of her shapefiles. As an example, if the user knows the name of the particular polygon but not the number of the polygon in the shapefile, the user can use the gis_dump_dbf utility to examine the names of the polygons in the shapefile. The information written to the screen will display the corresponding polygon number. +10. Shapefile (**shape**) masking uses closed polygons taken from an ESRI shapefile to define the masking region. Gen-Vx-Mask reads the shapefile with the ".shp" suffix and extracts the latitude and longitudes of the vertices. The shapefile must consist of closed polygons rather than polylines, points, or any of the other data types that shapefiles support. When the **-shape_str** command line option is used, Gen-Vx-Mask also reads metadata from the corresponding dBASE file with the ".dbf" suffix. + + Shapefiles usually contain more than one polygon, and the user must select which of these shapes should be used. The **-shapeno n** and **-shape_str name string** command line options enable the user to select one or more polygons from the shapefile. For **-shape n**, **n** is a comma-separated list of integer shape indices to be used. Note that these values are zero-based. So the first polygon in the shapefile is shape number 0, the second polygon in the shapefile is shape number 1, etc. For example, **-shapeno 0,1,2** uses the first three shapes in the shapefile. When multiple shapes are specified, the mask is defined as their union. So all grid points falling inside at least one of the specified shapes are included in the mask. + + For the user's convenience, some utilities that perform human-readable screen dumps of shapefile contents are provided with MET. The **gis_dump_shp**, **gis_dump_shx**, and **gis_dump_dbf** tools enable the user to examine the contents of these shapefiles. In particular, the **gis_dump_dbf** tool prints the name and values of the metadata for each record. The **-shape_str** command line option filters the shapes using the attributes listed in the **gis_dump_dbf** output, and requires two arguments. The **name** argument is set to any valid shapefile attribute, and the **string** argument is a comma-separated list of values to be matched. An example of using **-shape_str** is **-shape_str CONTINENT Europe**, which will match all "CONTINENT" attribues that have the string "Europe" in them. Strings that contain embedded whitespace should be enclosed in single quotes. Also note that case insensitive matching is used. For example, when using a global country outline shapefile, **-shape_str NAME 'united kingdom,united states of america'** matches the "NAME" attributes that have both "United Kingdom" and "United States of America" in them. If **-shape_str** is used multiple times, only shapes matching all the named attributes will be used. For example, **-shape_str CONTINENT Europe -shape_str NAME Spain,Portugal** will only match shapes where the "CONTINENT" attrinute contains "Europe "and the "NAME" attribute contains "Spain" or "Portugal". If a user wishes, they can combine both the **-shape_str** and **-shapeno** options. In this case, the union of all matches from the shapefile will be used. The polyline, polyline XY, box, circle, and track masking methods all read an ASCII file containing Lat/Lon locations. Those files must contain a string, which defines the name of the masking region, followed by a series of whitespace-separated latitude (degrees north) and longitude (degree east) values. diff --git a/docs/Users_Guide/plotting.rst b/docs/Users_Guide/plotting.rst index 1db3b4be91..1ac44e2f7e 100644 --- a/docs/Users_Guide/plotting.rst +++ b/docs/Users_Guide/plotting.rst @@ -71,7 +71,7 @@ An equivalent command using python embedding for point observations is shown bel .. code-block:: none - plot_point_obs 'PYTHON_NUMPY=MET_BASE/python/read_met_point_obs.py sample_pb.nc' sample_data.ps + plot_point_obs 'PYTHON_NUMPY=MET_BASE/python/examples/read_met_point_obs.py sample_pb.nc' sample_data.ps Please see section :numref:`pyembed-point-obs-data` for more details about Python embedding in MET. diff --git a/docs/Users_Guide/reformat_point.rst b/docs/Users_Guide/reformat_point.rst index 1cd9b4705d..809639c249 100644 --- a/docs/Users_Guide/reformat_point.rst +++ b/docs/Users_Guide/reformat_point.rst @@ -1042,7 +1042,7 @@ Required arguments for point2grid 1. The **input_filename** argument indicates the name of the input file to be processed. The input can be a MET NetCDF point observation file generated by other MET tools or a NetCDF AOD dataset from GOES16/17. Python embedding for point observations is also supported, as described in :numref:`pyembed-point-obs-data`. -The MET point observation NetCDF file name as **input_filename** argument is equivalent with "PYTHON_NUMPY=MET_BASE/python/read_met_point_obs.py netcdf_file name'. +The MET point observation NetCDF file name as **input_filename** argument is equivalent with "PYTHON_NUMPY=MET_BASE/python/examples/read_met_point_obs.py netcdf_filename". 2. The **to_grid** argument defines the output grid as: (1) a named grid, (2) the path to a gridded data file, or (3) an explicit grid specification string. @@ -1100,7 +1100,7 @@ Listed below is an example of processing the same set of observations but using .. code-block:: none point2grid \ - 'PYTHON_NUMPY=MET_BASE/python/read_met_point_obs.py ascii2nc_edr_hourly.20130827.nc' \ + 'PYTHON_NUMPY=MET_BASE/python/examples/read_met_point_obs.py ascii2nc_edr_hourly.20130827.nc' \ G212 python_gridded_ascii_python.nc -config Point2GridConfig_edr \ -field 'name="200"; level="*"; valid_time="20130827_205959";' -method MAX -v 1 @@ -1191,10 +1191,10 @@ The script can be found at: .. code-block:: none - MET_BASE/utility/print_pointnc2ascii.py + MET_BASE/python/utility/print_pointnc2ascii.py For how to use the script, issue the command: .. code-block:: none - python3 MET_BASE/utility/print_pointnc2ascii.py -h + python3 MET_BASE/python/utility/print_pointnc2ascii.py -h diff --git a/docs/Users_Guide/release-notes.rst b/docs/Users_Guide/release-notes.rst index 840af44bf6..979fdbfaf6 100644 --- a/docs/Users_Guide/release-notes.rst +++ b/docs/Users_Guide/release-notes.rst @@ -9,6 +9,33 @@ When applicable, release notes are followed by the GitHub issue number which des enhancement, or new feature (`MET GitHub issues `_). Important issues are listed **in bold** for emphasis. +MET Version 11.1.0-beta2 release notes (20230505) +------------------------------------------------- + +**Note** that the 11.1.0-beta2 release was originally created on 20230423 but was recreated on 20230428 and 20230505 to include critical bugfixes. + + .. dropdown:: Documentation + + * Improve documentation on Python Embedding for point observations (`#2303 `_). + * Create dropdown menus for Appendix A (`#2460 `_). + * Clarify MET Compile Time Python requirements (`#2490 `_). + + .. dropdown:: Enhancements + + * Enhance the MET point processing tools to read the Python 'point_data' variable instead of just 'met_point_data' (`#2285 `_). + * SonarQube: Further reduce bugs for MET-11.1.0-beta2 (`#2474 `_). + * SonarQube: Replace all instances of NULL with nullptr (`#2504 `_). + * SonarQube: Remove code that will never be executed (`#2506 `_). + + .. dropdown:: Bugfixes + + * Bugfix: Correct the branch name for the SonarQube scanning nightly (`#2401 `_). + * Bugfix: Fix support for the YYYYMMDD format in NetCDF level timestrings (`#2482 `_). + * Bugfix: AERONET the lat/lon is not changed with different station ID (`#2493 `_). + * Bugfix: dtype in Python embedding example script and appendixF correction (`#2518 `_). + * Bugfix: write_tmp_dataplane uses fill_value unrecognized by MET (`#2525 `_). + * **Bugfix: Resolve compilation problems due to need for \-std=c++11** (`#2531 `_). + MET Version 11.1.0-beta1 release notes (20230228) ------------------------------------------------- diff --git a/docs/Users_Guide/stat-analysis.rst b/docs/Users_Guide/stat-analysis.rst index 1c1f1db4c0..c1973ed681 100644 --- a/docs/Users_Guide/stat-analysis.rst +++ b/docs/Users_Guide/stat-analysis.rst @@ -9,7 +9,7 @@ Introduction The Stat-Analysis tool ties together results from the Point-Stat, Grid-Stat, Ensemble-Stat, Wavelet-Stat, and TC-Gen tools by providing summary statistical information and a way to filter their STAT output files. It processes the STAT output created by the other MET tools in a variety of ways which are described in this section. -MET version 9.0 adds support for the passing matched pair data (MPR) into Stat-Analysis using a Python script with the "-lookin python ..." option. An example of running Stat-Analysis with Python embedding is shown in :numref:`stat_analysis-usage`. +MET version 9.0 adds support for the passing matched pair data (MPR) into Stat-Analysis using a Python script with the "-lookin python ..." option. An example of running Stat-Analysis with Python embedding can be found in :numref:`Appendix F, Section %s `. Scientific and statistical aspects ================================== @@ -282,12 +282,12 @@ The usage statement for the Stat-Analysis tool is shown below: stat_analysis has two required arguments and accepts several optional ones. -In the usage statement for the Stat-Analysis tool, some additional terminology is introduced. In the Stat-Analysis tool, the term "job" refers to a set of tasks to be performed after applying user-specified options (i.e., "filters"). The filters are used to pare down a collection of output from the MET statistics tools to only those lines that are desired for the analysis. The job and its filters together comprise the "job command line". The "job command line" may be specified either on the command line to run a single analysis job or within the configuration file to run multiple analysis jobs at the same time. If jobs are specified in both the configuration file and the command line, only the jobs indicated in the configuration file will be run. The various jobs types are described in :numref:`table_WS_format_info_ISC` and the filtering options are described in :numref:`wavelet_stat-configuration-file`. +In the usage statement for the Stat-Analysis tool, some additional terminology is introduced. In the Stat-Analysis tool, the term "job" refers to a set of tasks to be performed after applying user-specified options (i.e., "filters"). The filters are used to pare down a collection of output from the MET statistics tools to only those lines that are desired for the analysis. The job and its filters together comprise the "job command line". The "job command line" may be specified either on the command line to run a single analysis job or within the configuration file to run multiple analysis jobs at the same time. If jobs are specified in both the configuration file and the command line, only the jobs indicated in the configuration file will be run. The various jobs types are described in :numref:`Des_components_STAT_analysis_tool` and the filtering options are described in :numref:`stat_analysis-configuration-file`. Required arguments for stat_analysis ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -1. The **-lookin path** specifies the name of a directory to be searched recursively for STAT files (ending in ".stat") or any explicit file name with any suffix (such as "_ctc.txt") to be read. This option may be used multiple times to specify multiple directories and/or files to be read. If "-lookin python" is used, it must be followed by a Python embedding script and any command line arguments it takes. Python embedding can be used to pass matched pair (MPR) lines as input to Stat-Analysis. +1. The **-lookin path** specifies the name of a directory to be searched recursively for STAT files (ending in ".stat") or any explicit file name with any suffix (such as "_ctc.txt") to be read. This option may be used multiple times to specify multiple directories and/or files to be read. If "-lookin python" is used, it must be followed by a Python embedding script and any command line arguments it takes. Python embedding can be used to pass **only** matched pair (MPR) lines as input to Stat-Analysis. 2. Either a configuration file must be specified with the **-config** option, or a **JOB COMMAND LINE** must be denoted. The **JOB COMMAND LINE** is described in :numref:`stat_analysis-configuration-file` @@ -313,22 +313,6 @@ An example of the stat_analysis calling sequence is shown below. In this example, the Stat-Analysis tool will search for valid STAT lines located in the *../out/point_stat* directory that meet the options specified in the configuration file, *config/STATAnalysisConfig*. -.. _StA-pyembed: - -Python Embedding for Matched Pairs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The example below uses Python embedding. - -.. code-block:: none - - stat_analysis \ - -lookin python MET_BASE/python/read_ascii_mpr.py point_stat_mpr.txt \ - -job aggregate_stat -line_type MPR -out_line_type CNT \ - -by FCST_VAR,FCST_LEV - -In this example, rather than passing the MPR output lines from Point-Stat directly into Stat-Analysis (which is the typical approach), the read_ascii_mpr.py Python embedding script reads that file and passes the data to Stat-Analysis. The aggregate_stat job is defined on the command line and CNT statistics are derived from the MPR input data. Separate CNT statistics are computed for each unique combination of FCST_VAR and FCST_LEV present in the input. Please refer to :numref:`Appendix F, Section %s ` for more details about Python embedding in MET. - .. _stat_analysis-configuration-file: stat_analysis configuration file diff --git a/docs/Users_Guide/tc-pairs.rst b/docs/Users_Guide/tc-pairs.rst index dbacd34839..556f7358c2 100644 --- a/docs/Users_Guide/tc-pairs.rst +++ b/docs/Users_Guide/tc-pairs.rst @@ -421,35 +421,38 @@ TC-Pairs produces output in TCST format. The default output file name can be ove - BMODEL - User provided text string designating model name * - 4 + - DESC + - User provided description text string + * - 5 - STORM_ID - BBCCYYYY designation of storm - * - 5 + * - 6 - BASIN - Basin (BB in STORM_ID) - * - 6 + * - 7 - CYCLONE - Cyclone number (CC in STORM_ID) - * - 7 + * - 8 - STORM_NAME - Name of Storm - * - 8 + * - 9 - INIT - Initialization time of forecast in YYYYMMDD_HHMMSS format. - * - 9 + * - 10 - LEAD - Forecast lead time in HHMMSS format. - * - 10 + * - 11 - VALID - Forecast valid time in YYYYMMDD_HHMMSS format. - * - 11 + * - 12 - INIT_MASK - Initialization time masking grid applied - * - 12 + * - 13 - VALID_MASK - Valid time masking grid applied - * - 13 + * - 14 - LINE_TYPE - - Output line type (TCMPR or PROBRIRW) + - Output line types described below .. _TCMPR Line Type: @@ -463,151 +466,151 @@ TC-Pairs produces output in TCST format. The default output file name can be ove * - Column Number - Header Column Name - Description - * - 13 + * - 14 - TCMPR - Tropical Cyclone Matched Pair line type - * - 14 + * - 15 - TOTAL - Total number of pairs in track - * - 15 + * - 16 - INDEX - Index of the current track pair - * - 16 + * - 17 - LEVEL - Level of storm classification - * - 17 + * - 18 - WATCH_WARN - HU or TS watch or warning in effect - * - 18 + * - 19 - INITIALS - Forecaster initials - * - 19 + * - 20 - ALAT - Latitude position of adeck model - * - 20 + * - 21 - ALON - Longitude position of adeck model - * - 21 + * - 22 - BLAT - Latitude position of bdeck model - * - 22 + * - 23 - BLON - Longitude position of bdeck model - * - 23 + * - 24 - TK_ERR - Track error of adeck relative to bdeck (nm) - * - 24 + * - 25 - X_ERR - X component position error (nm) - * - 25 + * - 26 - Y_ERR - Y component position error (nm) - * - 26 + * - 27 - ALTK_ERR - Along track error (nm) - * - 27 + * - 28 - CRTK_ERR - Cross track error (nm) - * - 28 + * - 29 - ADLAND - adeck distance to land (nm) - * - 29 + * - 30 - BDLAND - bdeck distance to land (nm) - * - 30 + * - 31 - AMSLP - adeck mean sea level pressure - * - 31 + * - 32 - BMSLP - bdeck mean sea level pressure - * - 32 + * - 33 - AMAX_WIND - adeck maximum wind speed - * - 33 + * - 34 - BMAX_WIND - bdeck maximum wind speed - * - 34, 35 + * - 35, 36 - A/BAL_WIND_34 - a/bdeck 34-knot radius winds in full circle - * - 36, 37 + * - 37, 38 - A/BNE_WIND_34 - a/bdeck 34-knot radius winds in NE quadrant - * - 38, 39 + * - 39, 40 - A/BSE_WIND_34 - a/bdeck 34-knot radius winds in SE quadrant - * - 40, 41 + * - 41, 42 - A/BSW_WIND_34 - a/bdeck 34-knot radius winds in SW quadrant - * - 42, 43 + * - 43, 44 - A/BNW_WIND_34 - a/bdeck 34-knot radius winds in NW quadrant - * - 44, 45 + * - 45, 46 - A/BAL_WIND_50 - a/bdeck 50-knot radius winds in full circle - * - 46, 47 + * - 47, 48 - A/BNE_WIND_50 - a/bdeck 50-knot radius winds in NE quadrant - * - 48, 49 + * - 49, 50 - A/BSE_WIND_50 - a/bdeck 50-knot radius winds in SE quadrant - * - 50, 51 + * - 51, 52 - A/BSW_WIND_50 - a/bdeck 50-knot radius winds in SW quadrant - * - 52, 53 + * - 53, 54 - A/BNW_WIND_50 - a/bdeck 50-knot radius winds in NW quadrant - * - 54, 55 + * - 55, 56 - A/BAL_WIND_64 - a/bdeck 64-knot radius winds in full circle - * - 56, 57 + * - 57, 58 - A/BNE_WIND_64 - a/bdeck 64-knot radius winds in NE quadrant - * - 58, 59 + * - 59, 60 - A/BSE_WIND_64 - a/bdeck 64-knot radius winds in SE quadrant - * - 60, 61 + * - 61, 62 - A/BSW_WIND_64 - a/bdeck 64-knot radius winds in SW quadrant - * - 62, 63 + * - 63, 64 - A/BNW_WIND_64 - a/bdeck 64-knot radius winds in NW quadrant - * - 64, 65 + * - 65, 66 - A/BRADP - pressure in millibars of the last closed isobar, 900 - 1050 mb - * - 66, 67 + * - 67, 68 - A/BRRP - radius of the last closed isobar in nm, 0 - 9999 nm - * - 68, 69 + * - 69, 70 - A/BMRD - radius of max winds, 0 - 999 nm - * - 70, 71 + * - 71, 72 - A/BGUSTS - gusts, 0 through 995 kts - * - 72, 73 + * - 73, 74 - A/BEYE - eye diameter, 0 through 999 nm - * - 74, 75 + * - 75, 76 - A/BDIR - storm direction in compass coordinates, 0 - 359 degrees - * - 76, 77 + * - 77, 78 - A/BSPEED - storm speed, 0 - 999 kts - * - 78, 79 + * - 79, 80 - A/BDEPTH - system depth, D-deep, M-medium, S-shallow, X-unknown - * - 80 + * - 81 - NUM_MEMBERS - consensus variable: number of models (or ensemble members) that were used to build the consensus track - * - 81 + * - 82 - TRACK_SPREAD - consensus variable: the mean of the distances from the member location to the consensus track location (nm) - * - 82 + * - 83 - TRACK_STDEV - consensus variable: the standard deviation of the distances from the member locations to the consensus track location (nm) - * - 83 + * - 84 - MSLP_STDEV - consensus variable: the standard deviation of the member's mean sea level pressure values - * - 84 + * - 85 - MAX_WIND_STDEV - consensus variable: the standard deviation of the member's maximum wind speed values @@ -623,31 +626,31 @@ TC-Pairs produces output in TCST format. The default output file name can be ove * - Column Number - Header Column Name - Description - * - 13 + * - 14 - TCDIAG - Tropical Cyclone Diagnostics line type - * - 14 + * - 15 - TOTAL - Total number of pairs in track - * - 15 + * - 16 - INDEX - Index of the current track pair - * - 16 + * - 17 - DIAG_SOURCE - Diagnostics data source indicated by the `-diag` command line option - * - 17 + * - 18 - TRACK_SOURCE - ATCF ID of the track data used to define the diagnostics - * - 18 + * - 19 - FIELD_SOURCE - Description of gridded field data source used to define the diagnostics - * - 19 + * - 20 - N_DIAG - Number of storm diagnostic name and value columns to follow - * - 20 + * - 21 - DIAG_i - Name of the of the ith storm diagnostic (repeated) - * - 21 + * - 22 - VALUE_i - Value of the ith storm diagnostic (repeated) @@ -663,75 +666,75 @@ TC-Pairs produces output in TCST format. The default output file name can be ove * - Column Number - Header Column Name - Description - * - 13 + * - 14 - PROBRIRW - Probability of Rapid Intensification/Weakening line type - * - 14 + * - 15 - ALAT - Latitude position of edeck model - * - 15 + * - 16 - ALON - Longitude position of edeck model - * - 16 + * - 17 - BLAT - Latitude position of bdeck model - * - 17 + * - 18 - BLON - Longitude position of bdeck model - * - 18 + * - 19 - INITIALS - Forecaster initials - * - 19 + * - 20 - TK_ERR - Track error of adeck relative to bdeck (nm) - * - 20 + * - 21 - X_ERR - X component position error (nm) - * - 21 + * - 22 - Y_ERR - Y component position error (nm) - * - 22 + * - 23 - ADLAND - adeck distance to land (nm) - * - 23 + * - 24 - BDLAND - bdeck distance to land (nm) - * - 24 + * - 25 - RI_BEG - Start of RI time window in HH format - * - 25 + * - 26 - RI_END - End of RI time window in HH format - * - 26 + * - 27 - RI_WINDOW - Width of RI time window in HH format - * - 27 + * - 28 - AWIND_END - Forecast maximum wind speed at RI end - * - 28 + * - 29 - BWIND_BEG - Best track maximum wind speed at RI begin - * - 29 + * - 30 - BWIND_END - Best track maximum wind speed at RI end - * - 30 + * - 31 - BDELTA - Exact Best track wind speed change in RI window - * - 31 + * - 32 - BDELTA_MAX - Maximum Best track wind speed change in RI window - * - 32 + * - 33 - BLEVEL_BEG - Best track storm classification at RI begin - * - 33 + * - 34 - BLEVEL_END - Best track storm classification at RI end - * - 34 + * - 35 - N_THRESH - Number of probability thresholds - * - 35 + * - 36 - THRESH_i - The ith probability threshold value (repeated) - * - 36 + * - 37 - PROB_i - The ith probability value (repeated) diff --git a/docs/Users_Guide/tc-rmw.rst b/docs/Users_Guide/tc-rmw.rst index 766d9a4d7b..29e0e3be07 100644 --- a/docs/Users_Guide/tc-rmw.rst +++ b/docs/Users_Guide/tc-rmw.rst @@ -166,12 +166,12 @@ tc_rmw output file The NetCDF output file contains the following dimensions: -1. *range* - the radial dimension of the range-azimuth grid +1. *track_point* - the track points corresponding to the model output valid times -2. *azimuth* - the azimuthal dimension of the range-azimuth grid +2. *pressure* - if any pressure levels are specified in the data variable list, they will be sorted and combined into a 3D NetCDF variable, which pressure as the vertical dimension and range and azimuth as the horizontal dimensions -3. *pressure* - if any pressure levels are specified in the data variable list, they will be sorted and combined into a 3D NetCDF variable, which pressure as the vertical dimension and range and azimuth as the horizontal dimensions +3. *range* - the radial dimension of the range-azimuth grid -4. *track_point* - the track points corresponding to the model output valid times +4. *azimuth* - the azimuthal dimension of the range-azimuth grid For each data variable specified in the data variable list, a corresponding NetCDF variable will be created with the same name and units. diff --git a/docs/Users_Guide/tc-stat.rst b/docs/Users_Guide/tc-stat.rst index 1902330f8b..0d9e824837 100644 --- a/docs/Users_Guide/tc-stat.rst +++ b/docs/Users_Guide/tc-stat.rst @@ -59,6 +59,8 @@ The TC-Stat tool can be used to read TCMPR lines and compare the occurrence of r Users may specify several job command options to configure the behavior of this job. Using these configurable options, the TC-Stat tool analyzes paired tracks and for each track point (i.e. each TCMPR line) determines whether rapid intensification or weakening occurred. For each point in time, it uses the forecast and BEST track event occurrence to populate a 2x2 contingency table. The job may be configured to require that forecast and BEST track events occur at exactly the same time to be considered a hit. Alternatively, the job may be configured to define a hit as long as the forecast and BEST track events occurred within a configurable time window. Using this relaxed matching criteria false alarms may be considered hits and misses may be considered correct negatives as long as the adeck and bdeck events were close enough in time. Each rirw job applies a single intensity change threshold. Therefore, assessing a model's performance with rapid intensification and weakening requires that two separate jobs be run. +The RIRW job supports the **-out_stat** option to write the contingency table counts and statistics to a STAT output file. + Probability of Rapid Intensification ------------------------------------ @@ -383,6 +385,7 @@ _________________________ e.g.: -job filter -line_type TCMPR -amodel HWFI -dump_row ./tc_filter_job.tcst -job summary -line_type TCMPR -column TK_ERR -dump_row ./tc_summary_job.tcst -job rirw -line_type TCMPR -rirw_time 24 -rirw_exact false -rirw_thresh ge20 + -job rirw -line_type TCMPR -rirw_time 24 -rirw_exact false -rirw_thresh ge20 -out_stat ./tc_rirw.stat -job probrirw -line_type PROBRIRW -column_thresh RI_WINDOW ==24 \ -probrirw_thresh 30 -probrirw_prob_thresh ==0.25 @@ -470,6 +473,8 @@ The RIRW job produces contingency table counts and statistics defined by identif Users may also specify the **-out_alpha** option to define the alpha value for the confidence intervals in the CTS output line type. In addition, the **-by column_name** option is a convenient way of running the same job across multiple stratifications of data. For example, **-by AMODEL** runs the same job for each unique AMODEL name in the data. +Users may also specify the **-out_stat** option to write the contingency table counts and statistics (for the CTC and CTS output line types) to an output STAT file. Information about the RIRW timing information and filtering criteria are written to the STAT header columns while the contingency table counts and/or statistics are written to the CTC and/or CTS output columns. + **Job: PROBRIRW** The PROBRIRW job produces probabilistic contingency table counts and statistics defined by placing forecast probabilities and BEST track rapid intensification events into an Nx2 contingency table. Users may specify several job command options to configure the behavior of this job: diff --git a/docs/conf.py b/docs/conf.py index fb7f5b4268..2f917f4079 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,11 +20,11 @@ project = 'MET' author = 'UCAR/NCAR, NOAA, CSU/CIRA, and CU/CIRES' author_list = 'Jensen, T., J. Prestopnik, H. Soh, L. Goodrich, B. Brown, R. Bullock, J. Halley Gotway, K. Newman, J. Opatz' -version = '11.1.0-beta1' +version = '11.1.0-beta2' verinfo = version release = f'{version}' release_year = '2023' -release_date = f'{release_year}-02-28' +release_date = f'{release_year}-05-05' copyright = f'{release_year}, {author}' # -- General configuration --------------------------------------------------- diff --git a/docs/requirements.txt b/docs/requirements.txt index b0b0957e2a..0b266552df 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,3 @@ sphinx-gallery sphinxcontrib-bibtex -sphinx-design +sphinx-design==0.3.0 diff --git a/internal/scripts/installation/compile_MET_all.sh b/internal/scripts/installation/compile_MET_all.sh index 0ac540d147..2eb0ff62e6 100755 --- a/internal/scripts/installation/compile_MET_all.sh +++ b/internal/scripts/installation/compile_MET_all.sh @@ -118,8 +118,15 @@ if [ ! -e $TAR_DIR ]; then exit 1 fi -# Update library linker path -export LD_LIBRARY_PATH=${TEST_BASE}/external_libs/lib${MET_PYTHON:+:$MET_PYTHON/lib}${MET_NETCDF:+:$MET_NETCDF/lib}${MET_HDF5:+:$MET_HDF5/lib}${MET_BUFRLIB:+:$MET_BUFRLIB}${MET_GRIB2CLIB:+:$MET_GRIB2CLIB}${LIB_JASPER:+:$LIB_JASPER}${LIB_LIBPNG:+:$LIB_LIBPNG}${LIB_Z:+:$LIB_Z}${MET_GSL:+:$MET_GSL/lib}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} +# If MET_PYTHON_LIB is not set in the environment file, set it to the +# lib directory so it can be use to install MET with Python Embedding +# support +if [[ -z "$MET_PYTHON_LIB" ]]; then + MET_PYTHON_LIB=${MET_PYTHON}/lib +fi + + +# Print library linker path echo "LD_LIBRARY_PATH = ${LD_LIBRARY_PATH}" # if LIB_Z is not set in the environment file, set it to the @@ -430,17 +437,17 @@ if [ $COMPILE_G2CLIB -eq 1 ]; then rm -rf ${LIB_DIR}/g2clib/g2clib* tar -xf ${TAR_DIR}/g2clib*.tar -C ${LIB_DIR}/g2clib cd ${LIB_DIR}/g2clib/g2clib* - sed -i 's|INC=.*|INC=-I${LIB_DIR}/include -I${LIB_DIR}/include/jasper|g' makefile + # Sed commands use double-quotes to support variable expansion. + sed -i "s|INC=.*|INC=-I${LIB_DIR}/include -I${LIB_DIR}/include/jasper|g" makefile - # allow other compilers besides gcc - sed -i 's/CC=gcc/CC=${CC_COMPILER}/g' makefile + # Allow other compilers besides gcc + sed -i "s|CC=gcc|CC=${CC}|g" makefile # remove -D__64BIT__ flag because compiling with it has # shown issues with GRIB/GRIB2 files that are over 2GB in size # This flag was removed in g2clib 1.6.4 # so this can be removed if the version is updated sed -i 's/-D__64BIT__//g' makefile - export CC_COMPILER=${CC} echo "cd `pwd`" # g2clib appears to compile but causes failure compiling MET if -j argument is used # so exclude it from this call @@ -450,6 +457,7 @@ if [ $COMPILE_G2CLIB -eq 1 ]; then cp *.h ${LIB_DIR}/include/. fi + # Compile HDF # Depends on jpeg # Edit 'mfhdf/hdiff/Makefile' as follows: @@ -628,7 +636,7 @@ export LDFLAGS="-Wl,--disable-new-dtags" # https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html # ${parameter:+word} # If parameter is null or unset, nothing is substituted, otherwise the expansion of word is substituted. -export LDFLAGS="${LDFLAGS} -Wl,-rpath,${LIB_DIR}/lib${ADDTL_DIR:+:$ADDTL_DIR}${LIB_DIR}/lib${MET_NETCDF:+:$MET_NETCDF/lib}${MET_HDF5:+:$MET_HDF5/lib}${MET_BUFRLIB:+:$MET_BUFRLIB}${MET_GRIB2CLIB:+:$MET_GRIB2CLIB}${MET_PYTHON:+:$MET_PYTHON/lib}${MET_GSL:+:$MET_GSL/lib}" +export LDFLAGS="${LDFLAGS} -Wl,-rpath,${LIB_DIR}/lib${ADDTL_DIR:+:$ADDTL_DIR}${LIB_DIR}/lib${MET_NETCDF:+:$MET_NETCDF/lib}${MET_HDF5:+:$MET_HDF5/lib}${MET_BUFRLIB:+:$MET_BUFRLIB}${MET_GRIB2CLIB:+:$MET_GRIB2CLIB}${MET_PYTHON_LIB:+:$MET_PYTHON_LIB}${MET_GSL:+:$MET_GSL/lib}" export LDFLAGS="${LDFLAGS} -Wl,-rpath,${LIB_JASPER:+$LIB_JASPER}${LIB_LIBPNG:+:$LIB_PNG}${LIB_Z:+$LIB_Z}" export LDFLAGS="${LDFLAGS} ${LIB_JASPER:+-L$LIB_JASPER} ${LIB_LIBPNG:+-L$LIB_LIBPNG} ${MET_HDF5:+-L$MET_HDF5/lib} ${ADDTL_DIR:+-L$ADDTL_DIR}" export LIBS="${LIBS} -lhdf5_hl -lhdf5 -lz" diff --git a/internal/scripts/installation/config/install_met_env.hera b/internal/scripts/installation/config/install_met_env.hera index 803d5e1aef..1b938cddfd 100755 --- a/internal/scripts/installation/config/install_met_env.hera +++ b/internal/scripts/installation/config/install_met_env.hera @@ -1,28 +1,26 @@ -module use -a /contrib/anaconda/modulefiles module load intel/2022.1.2 -module load anaconda/latest -export TEST_BASE=/contrib/met/11.0.0 +export PATH=/scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin:${PATH} +export TEST_BASE=/contrib/met/11.1.0 export COMPILER=intel_2022.1.2 export MET_SUBDIR=${TEST_BASE} -export MET_TARBALL=v11.0.0.tar.gz +export MET_TARBALL=v11.1.0.tar.gz export USE_MODULES=TRUE -export PYTHON_MODULE=anaconda_latest -export MET_PYTHON=/contrib/anaconda/anaconda3/latest/ -export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.7m -export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.7/config-3.7m-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.7m\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -Xlinker\ -export-dynamic +export MET_PYTHON=/scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10 +export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10 +export MET_PYTHON_LD=`python3-config --ldflags --embed` export SET_D64BIT=FALSE -export EXTERNAL_LIBS=/contrib/met/11.0.0/external_libs/ +export EXTERNAL_LIBS=/contrib/met/11.1.0/external_libs/ #export MET_NETCDF=${EXTERNAL_LIBS} -export MET_GSL=${EXTERNAL_LIBS} -export MET_BUFRLIB=${EXTERNAL_LIBS} -export BUFRLIB_NAME=-lbufr +#export MET_GSL=${EXTERNAL_LIBS} +#export MET_BUFRLIB=${EXTERNAL_LIBS} +#export BUFRLIB_NAME=-lbufr #export MET_HDF5=${EXTERNAL_LIBS} -export MET_GRIB2CLIB=${EXTERNAL_LIBS}/lib -export MET_GRIB2CINC=${EXTERNAL_LIBS}/include -export GRIB2CLIB_NAME=-lgrib2c -export LIB_JASPER=${EXTERNAL_LIBS}/lib -export LIB_LIBPNG=${EXTERNAL_LIBS}/lib -export LIB_Z=${EXTERNAL_LIBS}/lib -#export SET_D64BIT=FALSE +#export MET_GRIB2CLIB=${EXTERNAL_LIBS}/lib +#export MET_GRIB2CINC=${EXTERNAL_LIBS}/include +#export GRIB2CLIB_NAME=-lgrib2c +#export LIB_JASPER=${EXTERNAL_LIBS}/lib +#export LIB_LIBPNG=${EXTERNAL_LIBS}/lib +#export LIB_Z=${EXTERNAL_LIBS}/lib #export CFLAGS="-Wall -g" #export CXXFLAGS="-Wall -g -lcurl" +export MAKE_ARGS=-j \ No newline at end of file diff --git a/internal/scripts/installation/config/install_met_env.jet b/internal/scripts/installation/config/install_met_env.jet index b90839a982..0d88a2f0fc 100644 --- a/internal/scripts/installation/config/install_met_env.jet +++ b/internal/scripts/installation/config/install_met_env.jet @@ -2,17 +2,17 @@ module load intel/2022.1.2 module load netcdf/4.7.0 module load hdf5/1.10.5 -export TEST_BASE=/contrib/met/11.0.0 +export TEST_BASE=/contrib/met/11.1.0 export COMPILER=intel_18.0.5.274 export MET_SUBDIR=${TEST_BASE} -export MET_TARBALL=v11.0.0.tar.gz +export MET_TARBALL=v11.1.0.tar.gz export USE_MODULES=TRUE -export MET_PYTHON=/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.0_py3.8 -export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.8 -export MET_PYTHON_LD=-L${MET_PYTHON}/lib\ -L${MET_PYTHON}/lib/python3.8/config-3.8-x86_64-linux-gnu\ -lpython3.8\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -Xlinker\ -export-dynamic +export MET_PYTHON=/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.1_py3.10 +export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10 +export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.10/config-3.10-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.10\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -lm export MET_NETCDF=/apps/netcdf/4.7.0/intel/18.0.5.274 export MET_HDF5=/apps/hdf5/1.10.5/intel/18.0.5.274 -export EXTERNAL_LIBS=/contrib/met/11.0.0/external_libs/ +export EXTERNAL_LIBS=/contrib/met/11.1.0/external_libs/ #export MET_GSL=${EXTERNAL_LIBS} #export MET_BUFRLIB=${EXTERNAL_LIBS} #export BUFRLIB_NAME=-lbufr @@ -22,4 +22,5 @@ export EXTERNAL_LIBS=/contrib/met/11.0.0/external_libs/ #export LIB_JASPER=${EXTERNAL_LIBS}/lib #export LIB_LIBPNG=${EXTERNAL_LIBS}/lib #export LIB_Z=${EXTERNAL_LIBS}/lib +export MAKE_ARGS=-j export SET_D64BIT=FALSE diff --git a/internal/scripts/installation/config/install_met_env.orion b/internal/scripts/installation/config/install_met_env.orion index a144d8c492..4a25f75b57 100644 --- a/internal/scripts/installation/config/install_met_env.orion +++ b/internal/scripts/installation/config/install_met_env.orion @@ -1,14 +1,14 @@ module load intel/2020.2 -export TEST_BASE=/apps/contrib/MET/11.0.0 +export TEST_BASE=/apps/contrib/MET/11.1.0 export COMPILER=intel_2020 export MET_SUBDIR=${TEST_BASE}/ -export MET_TARBALL=v11.0.0.tar.gz +export MET_TARBALL=v11.1.0.tar.gz export USE_MODULES=TRUE -export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.0_py3.8 -export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.8 -export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.8/config-3.8-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.8\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm -export EXTERNAL_LIBS=/apps/contrib/MET/11.0.0/external_libs +export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.1_py3.10 +export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10 +export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.10/config-3.10-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.10\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -lm +export EXTERNAL_LIBS=/apps/contrib/MET/11.1.0/external_libs export LIB_Z=${EXTERNAL_LIBS}/lib #export MET_GSL=${EXTERNAL_LIBS} #export MET_BUFRLIB=${EXTERNAL_LIBS} @@ -21,6 +21,7 @@ export LIB_Z=${EXTERNAL_LIBS}/lib #export LIB_JASPER=${EXTERNAL_LIBS}/lib #export LIB_LIBPNG=${EXTERNAL_LIBS}/lib #export SET_D64BIT=FALSE +export MAKE_ARGS=-j #export CFLAGS="-Wall -g" #export CXXFLAGS="-Wall -g" diff --git a/internal/scripts/installation/config/install_met_env_met_only.hera b/internal/scripts/installation/config/install_met_env_met_only.hera index 645ca9acab..b4ac247f75 100644 --- a/internal/scripts/installation/config/install_met_env_met_only.hera +++ b/internal/scripts/installation/config/install_met_env_met_only.hera @@ -1,17 +1,15 @@ -module use -a /contrib/anaconda/modulefiles module load intel/2022.1.2 -module load anaconda/latest -export TEST_BASE=/contrib/met/11.0.0 +export PATH=/scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin:${PATH} +export TEST_BASE=/contrib/met/11.1.0 export COMPILER=intel_2022.1.2 export MET_SUBDIR=${TEST_BASE} -export MET_TARBALL=v11.0.0.tar.gz +export MET_TARBALL=v11.1.0.tar.gz export USE_MODULES=TRUE -export PYTHON_MODULE=anaconda_latest -export MET_PYTHON=/contrib/anaconda/anaconda3/latest -export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.7m -export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.7/config-3.7m-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.7m\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -Xlinker\ -export-dynamic +export MET_PYTHON=/scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10 +export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10 +export MET_PYTHON_LD=`python3-config --ldflags --embed` export SET_D64BIT=FALSE -export EXTERNAL_LIBS=/contrib/met/11.0.0/external_libs/ +export EXTERNAL_LIBS=/contrib/met/11.1.0/external_libs/ export MET_NETCDF=${EXTERNAL_LIBS} export MET_GSL=${EXTERNAL_LIBS} export MET_BUFRLIB=${EXTERNAL_LIBS} @@ -23,6 +21,6 @@ export GRIB2CLIB_NAME=-lgrib2c export LIB_JASPER=${EXTERNAL_LIBS}/lib export LIB_LIBPNG=${EXTERNAL_LIBS}/lib export LIB_Z=${EXTERNAL_LIBS}/lib -export SET_D64BIT=FALSE #export CFLAGS="-Wall -g" #export CXXFLAGS="-Wall -g -lcurl" +export MAKE_ARGS=-j \ No newline at end of file diff --git a/internal/scripts/installation/config/install_met_env_met_only.jet b/internal/scripts/installation/config/install_met_env_met_only.jet index e642880266..c4c66ebfc3 100644 --- a/internal/scripts/installation/config/install_met_env_met_only.jet +++ b/internal/scripts/installation/config/install_met_env_met_only.jet @@ -7,9 +7,9 @@ export COMPILER=intel_18.0.5.274 export MET_SUBDIR=${TEST_BASE} export MET_TARBALL=v11.0.0.tar.gz export USE_MODULES=TRUE -export MET_PYTHON=/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.0_py3.8 -export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.8 -export MET_PYTHON_LD=-L${MET_PYTHON}/lib\ -L${MET_PYTHON}/lib/python3.8/config-3.8-x86_64-linux-gnu\ -lpython3.8\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -Xlinker\ -export-dynamic +export MET_PYTHON=/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.1_py3.10 +export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10 +export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.10/config-3.10-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.10\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -lm export MET_NETCDF=/apps/netcdf/4.7.0/intel/18.0.5.274 export MET_HDF5=/apps/hdf5/1.10.5/intel/18.0.5.274 export EXTERNAL_LIBS=/contrib/met/11.0.0/external_libs/ @@ -22,4 +22,5 @@ export GRIB2CLIB_NAME=-lgrib2c export LIB_JASPER=${EXTERNAL_LIBS}/lib export LIB_LIBPNG=${EXTERNAL_LIBS}/lib export LIB_Z=${EXTERNAL_LIBS}/lib +export MAKE_ARGS=-j export SET_D64BIT=FALSE diff --git a/internal/scripts/installation/config/install_met_env_met_only.orion b/internal/scripts/installation/config/install_met_env_met_only.orion index ece8d88aa3..49ab2ebd68 100644 --- a/internal/scripts/installation/config/install_met_env_met_only.orion +++ b/internal/scripts/installation/config/install_met_env_met_only.orion @@ -1,14 +1,14 @@ module load intel/2020.2 -export TEST_BASE=/apps/contrib/MET/11.0.0 +export TEST_BASE=/apps/contrib/MET/11.1.0 export COMPILER=intel_2020 export MET_SUBDIR=${TEST_BASE}/ -export MET_TARBALL=v11.0.0.tar.gz +export MET_TARBALL=v11.1.0.tar.gz export USE_MODULES=TRUE -export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.0_py3.8 -export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.8 -export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.8/config-3.8-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.8\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm -export EXTERNAL_LIBS=/apps/contrib/MET/11.0.0/external_libs +export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.1_py3.10 +export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10 +export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.10/config-3.10-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.10\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -lm +export EXTERNAL_LIBS=/apps/contrib/MET/11.1.0/external_libs export MET_GSL=${EXTERNAL_LIBS} export MET_BUFRLIB=${EXTERNAL_LIBS} export BUFRLIB_NAME=-lbufr @@ -21,5 +21,6 @@ export LIB_JASPER=${EXTERNAL_LIBS}/lib export LIB_LIBPNG=${EXTERNAL_LIBS}/lib export LIB_Z=${EXTERNAL_LIBS}/lib export SET_D64BIT=FALSE +export MAKE_ARGS=-j #export CFLAGS="-Wall -g" #export CXXFLAGS="-Wall -g" diff --git a/internal/scripts/installation/modulefiles/11.0.0.lua.wcoss2 b/internal/scripts/installation/modulefiles/11.0.0.lua.wcoss2 deleted file mode 100644 index c4afdd9fda..0000000000 --- a/internal/scripts/installation/modulefiles/11.0.0.lua.wcoss2 +++ /dev/null @@ -1,27 +0,0 @@ -help([[ -]]) - -local pkgName = myModuleName() -local pkgVersion = myModuleVersion() -local pkgNameVer = myModuleFullName() - -local hierA = hierarchyA(pkgNameVer,1) -local compNameVer = hierA[1] - - -conflict(pkgName) - -local opt = os.getenv("HPC_OPT") or os.getenv("OPT") or "/opt/modules" - -local base = pathJoin(opt,compNameVer,pkgName,pkgVersion) - -prepend_path("PATH", pathJoin(base,"bin")) - -setenv("MET_ROOT", base) -setenv("MET_BASE", pathJoin(base, "share", "met")) -setenv("MET_VERSION", pkgVersion) - -whatis("Name: ".. pkgName) -whatis("Version: " .. pkgVersion) -whatis("Category: applications") -whatis("Description: Model Evaluation Tools (MET)") diff --git a/internal/scripts/installation/modulefiles/11.0.0_acorn b/internal/scripts/installation/modulefiles/11.0.0_acorn deleted file mode 100644 index 117c0aa323..0000000000 --- a/internal/scripts/installation/modulefiles/11.0.0_acorn +++ /dev/null @@ -1,34 +0,0 @@ -#%Module###################################################################### -## -## Model Evaluation Tools -## -proc ModulesHelp { } { - puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v10.1.0 - *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" -} - -# The intel compiler is required to run MET - -module use /apps/ops/para/libs/modulefiles/compiler/intel/19.1.3.304/ -module load intel -module load python/3.8.6 -module load netcdf/4.7.4 -module load hdf5/1.12.2 -module load bufr/11.5.0 -module load zlib/1.2.11 -module load jasper/2.0.25 -module load libpng/1.6.37 -module load gsl/2.7 -module load g2c/1.6.4 - -set base /apps/sw_review/emc/MET/11.0.0 -set ver 11.0.0 -set share $base/share/met -set lib_base $base - -prepend-path PATH $base/exec - -setenv METversion V$ver -setenv MET_ROOT $base - - diff --git a/internal/scripts/installation/modulefiles/11.0.0_casper b/internal/scripts/installation/modulefiles/11.0.0_casper deleted file mode 100644 index b0d007db53..0000000000 --- a/internal/scripts/installation/modulefiles/11.0.0_casper +++ /dev/null @@ -1,28 +0,0 @@ -#%Module###################################################################### -## -## Model Evaluation Tools -## -proc ModulesHelp { } { - puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0 - *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" -} - -# If they exist, remove ncdump and ncgen from /glade/p/ral/jntp/MET/MET_releases/casper/11.0.0/external_libs/bin - -# The intel compiler is required to run MET -module load ncarenv/1.3 -module load intel/2021.2 -module load netcdf/4.8.0 - -set base /glade/p/ral/jntp/MET/MET_releases/casper/11.0.0 -set ver 11.0.0 -set share $base/share/met - -prepend-path PATH $base/bin:/glade/p/ral/jntp/MET/MET_releases/casper/11.0.0/external_libs/bin:/glade/p/ral/jntp/MET/METplus/miniconda/miniconda3/envs/metplus_v5.0_py3.8/bin - - -setenv METversion V$ver - -# setenv MET_BUFRLIB /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs/libs -# setenv MET_GRIB2C /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs -# setenv MET_GSL /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs diff --git a/internal/scripts/installation/modulefiles/11.0.0_cheyenne b/internal/scripts/installation/modulefiles/11.0.0_cheyenne deleted file mode 100644 index d6c23de79c..0000000000 --- a/internal/scripts/installation/modulefiles/11.0.0_cheyenne +++ /dev/null @@ -1,27 +0,0 @@ -#%Module###################################################################### -## -## Model Evaluation Tools -## -proc ModulesHelp { } { - puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0 - *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" -} - -# If they exist, remove ncdump and ncgen from /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs/bin - -# The intel compiler is required to run MET -module load ncarenv/1.3 -module load intel/2021.2 -module load netcdf/4.8.0 - -set base /glade/p/ral/jntp/MET/MET_releases/11.0.0 -set ver 11.0.0 -set share $base/share/met - -prepend-path PATH $base/bin:/glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs/bin:/glade/p/ral/jntp/MET/METplus/miniconda/miniconda3/envs/metplus_v5.0_py3.8/bin - -setenv METversion V$ver - -# setenv MET_BUFRLIB /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs/libs -# setenv MET_GRIB2C /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs -# setenv MET_GSL /glade/p/ral/jntp/MET/MET_releases/11.0.0/external_libs diff --git a/internal/scripts/installation/modulefiles/11.0.0_frontera b/internal/scripts/installation/modulefiles/11.0.0_frontera deleted file mode 100644 index ebd3cb9c5f..0000000000 --- a/internal/scripts/installation/modulefiles/11.0.0_frontera +++ /dev/null @@ -1,25 +0,0 @@ -#%Module###################################################################### -## -## Model Evaluation Tools -## -proc ModulesHelp { } { - puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0 - *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" -} - -# If they exist, remove ncdump and ncgen from /work2/06612/tg859120/frontera/MET/11.0.0/external_libs/bin - -module unload python3 -module load intel/19.1.1 -module load hdf5/1.12.0 -module load netcdf/4.7.4 - -set base /work2/06612/tg859120/frontera/MET/11.0.0 -set ver 11.0.0 -set share $base/share/met -set lib_base $base - -prepend-path PATH $base/bin:/work2/06612/tg859120/frontera/miniconda/miniconda3/envs/metplus_v5.0_py3.8/bin - -setenv METversion V$ver -setenv MET_ROOT $base diff --git a/internal/scripts/installation/modulefiles/11.0.0_gaea b/internal/scripts/installation/modulefiles/11.0.0_gaea deleted file mode 100644 index c91a2e74bb..0000000000 --- a/internal/scripts/installation/modulefiles/11.0.0_gaea +++ /dev/null @@ -1,17 +0,0 @@ -#%Module###################################################################### -## -## Model Evaluation Tools -## -proc ModulesHelp { } { - puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v10.1.2 - *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" -} - -module load intel/19.0.5.281 - -set base /usw/met -set ver 11.0.0 -set share $base/$ver/share/met -set lib_base $base/11.0.0 - -prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin:/lustre/f2/dev/esrl/Julie.Prestopnik/projects/miniconda/miniconda3/envs/metplus_v5.0_py3.8/bin diff --git a/internal/scripts/installation/modulefiles/11.0.1.lua.wcoss2 b/internal/scripts/installation/modulefiles/11.0.1.lua.wcoss2 deleted file mode 100644 index c4afdd9fda..0000000000 --- a/internal/scripts/installation/modulefiles/11.0.1.lua.wcoss2 +++ /dev/null @@ -1,27 +0,0 @@ -help([[ -]]) - -local pkgName = myModuleName() -local pkgVersion = myModuleVersion() -local pkgNameVer = myModuleFullName() - -local hierA = hierarchyA(pkgNameVer,1) -local compNameVer = hierA[1] - - -conflict(pkgName) - -local opt = os.getenv("HPC_OPT") or os.getenv("OPT") or "/opt/modules" - -local base = pathJoin(opt,compNameVer,pkgName,pkgVersion) - -prepend_path("PATH", pathJoin(base,"bin")) - -setenv("MET_ROOT", base) -setenv("MET_BASE", pathJoin(base, "share", "met")) -setenv("MET_VERSION", pkgVersion) - -whatis("Name: ".. pkgName) -whatis("Version: " .. pkgVersion) -whatis("Category: applications") -whatis("Description: Model Evaluation Tools (MET)") diff --git a/internal/scripts/installation/modulefiles/11.0.1_acorn b/internal/scripts/installation/modulefiles/11.0.1_acorn deleted file mode 100644 index 832194dc76..0000000000 --- a/internal/scripts/installation/modulefiles/11.0.1_acorn +++ /dev/null @@ -1,34 +0,0 @@ -#%Module###################################################################### -## -## Model Evaluation Tools -## -proc ModulesHelp { } { - puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.1 - *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" -} - -# The intel compiler is required to run MET - -module use /apps/ops/para/libs/modulefiles/compiler/intel/19.1.3.304/ -module load intel python/3.10.4 -module load ve/evs/1.0 -module load netcdf/4.7.4 -module load hdf5/1.12.2 -module load bufr/11.5.0 -module load zlib/1.2.11 -module load jasper/2.0.25 -module load libpng/1.6.37 -module load gsl/2.7 -module load g2c/1.6.4 - -set base /apps/sw_review/emc/MET/11.0.1 -set ver 11.0.1 -set share $base/share/met -set lib_base $base - -prepend-path PATH $base/exec - -setenv METversion V$ver -setenv MET_ROOT $base - - diff --git a/internal/scripts/installation/modulefiles/11.0.0_hera b/internal/scripts/installation/modulefiles/11.1.0_hera similarity index 62% rename from internal/scripts/installation/modulefiles/11.0.0_hera rename to internal/scripts/installation/modulefiles/11.1.0_hera index 5a38c524bb..849d9c01b9 100644 --- a/internal/scripts/installation/modulefiles/11.0.0_hera +++ b/internal/scripts/installation/modulefiles/11.1.0_hera @@ -3,25 +3,21 @@ ## Model Evaluation Tools ## proc ModulesHelp { } { - puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0 + puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.1.0 *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" } # The intel compiler is required to run MET prereq intel -prereq anaconda/latest set base /contrib/met -set ver 11.0.0 +set ver 11.1.0 set share $base/$ver/share/met -set lib_base $base/11.0.0 -setenv MET_ROOT $base/$ver/MET-11.0.0 +set lib_base $base/11.1.0 +setenv MET_ROOT $base/$ver -prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin - - -#prepend-path LD_LIBRARY_PATH $lib_base/external_libs/lib +prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin:/scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin #setenv METversion $ver #setenv MET_CONFIG $share/config @@ -36,12 +32,11 @@ prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin #setenv F77 ifort #module load intel/2022.1.2 -#module load anaconda/latest -#setenv libdir /contrib/met/11.0.0/external_libs/lib -#setenv incdir /contrib/met/11.0.0/external_libs/include -#setenv iprefix /contrib/met/11.0.0/external_libs -#setenv basedir /contrib/met/11.0.0/met-11.0.0 +#setenv libdir /contrib/met/11.1.0/external_libs/lib +#setenv incdir /contrib/met/11.1.0/external_libs/include +#setenv iprefix /contrib/met/11.1.0/external_libs +#setenv basedir /contrib/met/11.1.0/met-11.1.0 #setenv MET_HDF5 $iprefix #setenv MET_NETCDF $incdir @@ -54,9 +49,9 @@ prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin #setenv MET_HDFLIB $libdir #setenv MET_HDFEOSINC $incdir #setenv MET_HDFEOSLIB $libdir -#setenv MET_PYTHON /contrib/anaconda3/latest -#setenv MET_PYTHON_CC -I/contrib/anaconda/anaconda3/latest/include/python3.7m -#setenv MET_PYTHON_LD -L/contrib/anaconda/anaconda3/latest/lib\ -lpython3.7m\ -lpthread\ -ldl\ -lutil\ -lm\ -Xlinker\ -export-dynamic +#setenv MET_PYTHON /scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10 +#setenv MET_PYTHON_CC -I${MET_PYTHON}/include/python3.10 +#setenv MET_PYTHON_LD `python3-config --ldflags --embed` #setenv MET_FONT_DIR $basedir/fonts/ # CAIRO and FREETYPE were not used @@ -64,5 +59,3 @@ prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin #setenv MET_CAIROINC $incdir/cairo #setenv MET_FREETYPELIB $libdir #setenv MET_FREETYPEINC $incdir/freetype2 - - diff --git a/internal/scripts/installation/modulefiles/11.0.0_jet b/internal/scripts/installation/modulefiles/11.1.0_jet similarity index 80% rename from internal/scripts/installation/modulefiles/11.0.0_jet rename to internal/scripts/installation/modulefiles/11.1.0_jet index 30a6fcc946..fb1190d8f7 100644 --- a/internal/scripts/installation/modulefiles/11.0.0_jet +++ b/internal/scripts/installation/modulefiles/11.1.0_jet @@ -3,7 +3,7 @@ ## Model Evaluation Tools ## proc ModulesHelp { } { - puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0 + puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.1.0 *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" } @@ -11,9 +11,9 @@ prereq intel prereq netcdf/4.7.0 prereq hdf5/1.10.5 -set base /contrib/met/11.0.0 -set ver 11.0.0 +set base /contrib/met/11.1.0 +set ver 11.1.0 set share $base/share/met -setenv MET_ROOT $base/$ver/MET-11.0.0 +setenv MET_ROOT $base/$ver/MET-11.1.0 -prepend-path PATH $base/bin:$base/external_libs/bin:/apps/netcdf/4.7.0/intel/18.0.5.274/bin:/apps/hdf5/1.10.5/intel_seq/18.0.5.274/bin:/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.0_py3.8 \ No newline at end of file +prepend-path PATH $base/bin:$base/external_libs/bin:/apps/netcdf/4.7.0/intel/18.0.5.274/bin:/apps/hdf5/1.10.5/intel_seq/18.0.5.274/bin:/mnt/lfs1/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.1_py3.10 diff --git a/internal/scripts/installation/modulefiles/11.0.0_orion b/internal/scripts/installation/modulefiles/11.1.0_orion similarity index 58% rename from internal/scripts/installation/modulefiles/11.0.0_orion rename to internal/scripts/installation/modulefiles/11.1.0_orion index 655c4af0cb..03ebbc352a 100644 --- a/internal/scripts/installation/modulefiles/11.0.0_orion +++ b/internal/scripts/installation/modulefiles/11.1.0_orion @@ -3,29 +3,29 @@ ## Model Evaluation Tools ## proc ModulesHelp { } { - puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.0.0 + puts stderr "Sets up the paths and environment variables to use the Model Evaluation Tools v11.1.0 *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" } prereq intel/2020.2 set base /apps/contrib/MET -set ver 11.0.0 +set ver 11.1.0 set share $base/$ver/share/met -set lib_base $base/11.0.0 -setenv MET_ROOT $base/$ver/MET-11.0.0 +set lib_base $base/11.1.0 +setenv MET_ROOT $base/$ver/MET-11.1.0 -prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin:/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.0_py3.8/bin +prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin:/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin -#export CC=icc +export CC=icc #export CXX=icc #export F77=ifort #module load intel/2020.2 #module load intelpython3/2020.2 -#export libdir=/apps/contrib/MET/11.0.0/external_libs/lib -#export incdir=/apps/contrib/MET/11.0.0/external_libs/include -#export iprefix=/apps/contrib/MET/11.0.0/external_libs -#export basedir=/apps/contrib/MET/11.0.0/MET-11.0.0 +#export libdir=/apps/contrib/MET/11.1.0/external_libs/lib +#export incdir=/apps/contrib/MET/11.1.0/external_libs/include +#export iprefix=/apps/contrib/MET/11.1.0/external_libs +#export basedir=/apps/contrib/MET/11.1.0/MET-11.1.0 #export MET_HDF5=$iprefix #export MET_NETCDF=$incdir #export MET_GRIB2CINC=$incdir @@ -37,9 +37,9 @@ prepend-path PATH $base/$ver/bin:$lib_base/external_libs/bin:/work/noaa/ovp/mini #export MET_HDFLIB=$libdir #export MET_HDFEOSINC=$incdir #export MET_HDFEOSLIB=$libdir -#export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.0_py3.8 -#export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.8 -#export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.8/config-3.8-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.8\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm +#export MET_PYTHON=/work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.1_py3.10 +#export MET_PYTHON_CC=-I${MET_PYTHON}/include/python3.10 +#export MET_PYTHON_LD=-L${MET_PYTHON}/lib/python3.10/config-3.10-x86_64-linux-gnu\ -L${MET_PYTHON}/lib\ -lpython3.10\ -lcrypt\ -lpthread\ -ldl\ -lutil\ -lrt\ -lm\ -lm #export MET_FONT_DIR=$basedir/fonts/ #export LDFLAGS=-Wl,--disable-new-dtags -Wl,-rpath,${libdir}:${MET_PYTHON}/lib -#export CPPFLAGS=-I/apps/contrib/met/11.0.0/external_libs/include +#export CPPFLAGS=-I/apps/contrib/met/11.1.0/external_libs/include diff --git a/internal/test_unit/config/TCStatConfig_ALAL2010 b/internal/test_unit/config/TCStatConfig_ALAL2010 index 02360bf454..05bf63a7a1 100644 --- a/internal/test_unit/config/TCStatConfig_ALAL2010 +++ b/internal/test_unit/config/TCStatConfig_ALAL2010 @@ -211,7 +211,8 @@ jobs = [ "-job filter -amodel AHWI -rirw_track BDECK -rirw_thresh >=30 -rirw_exact FALSE -dump_row ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_AHWI_ri.tcst", "-job filter -amodel AHWI -rirw_track BDECK -rirw_thresh <=-30 -rirw_exact TRUE -dump_row ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_AHWI_rw.tcst", "-job rirw -rirw_window 00 -rirw_thresh <=-15 -out_line_type CTC,CTS,MPR", - "-job rirw -rirw_window 12 -rirw_thresh <=-15 -out_line_type CTC,CTS,MPR" + "-job rirw -rirw_window 12 -rirw_thresh <=-15 -out_line_type CTC,CTS,MPR", + "-job rirw -rirw_window 12 -rirw_thresh <=-15 -out_line_type CTC,CTS -out_stat ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_rirw.stat" ]; // diff --git a/internal/test_unit/xml/unit_gen_vx_mask.xml b/internal/test_unit/xml/unit_gen_vx_mask.xml index 342721af33..ca66cea64c 100644 --- a/internal/test_unit/xml/unit_gen_vx_mask.xml +++ b/internal/test_unit/xml/unit_gen_vx_mask.xml @@ -489,6 +489,44 @@ + + + + + + + &MET_BIN;/gen_vx_mask + \ + 'latlon 360 361 -90 -130 0.5 0.5' \ + &INPUT_DIR;/shapefile/ne_110m_admin_0_countries/ne_110m_admin_0_countries.shp \ + &OUTPUT_DIR;/gen_vx_mask/South_America_mask.nc \ + -type shape -shape_str Continent 'south america' \ + -name South_America -v 2 + + + &OUTPUT_DIR;/gen_vx_mask/South_America_mask.nc + + + + + + + + + + &MET_BIN;/gen_vx_mask + \ + &OUTPUT_DIR;/gen_vx_mask/South_America_mask.nc \ + &INPUT_DIR;/shapefile/ne_110m_admin_0_countries/ne_110m_admin_0_countries.shp \ + &OUTPUT_DIR;/gen_vx_mask/South_America_Spain_Portugal_mask.nc \ + -type shape -shape_str CONTINENT Europe -shape_str Name Spain,Portugal \ + -name South_America_Spain_Portugal -value 2 + + + &OUTPUT_DIR;/gen_vx_mask/South_America_Spain_Portugal_mask.nc + + + @@ -500,8 +538,8 @@ PYTHON_NUMPY \ &OUTPUT_DIR;/gen_vx_mask/PYTHON_FCST_or_OBS_mask.nc \ -type data \ - -input_field 'name="&MET_BASE;/python/read_ascii_numpy.py &MET_DATA;/python/fcst.txt FCST";' \ - -mask_field 'name="&MET_BASE;/python/read_ascii_numpy.py &MET_DATA;/python/obs.txt OBS";' \ + -input_field 'name="&MET_BASE;/python/examples/read_ascii_numpy.py &MET_DATA;/python/fcst.txt FCST";' \ + -mask_field 'name="&MET_BASE;/python/examples/read_ascii_numpy.py &MET_DATA;/python/obs.txt OBS";' \ -thresh gt0 -union -v 3 diff --git a/internal/test_unit/xml/unit_python.xml b/internal/test_unit/xml/unit_python.xml index 5a519d9212..051f709a62 100644 --- a/internal/test_unit/xml/unit_python.xml +++ b/internal/test_unit/xml/unit_python.xml @@ -31,7 +31,7 @@ \ PYTHON_NUMPY \ &OUTPUT_DIR;/python/letter_numpy_grid_name.ps \ - 'name = "&MET_BASE;/python/read_ascii_numpy_grid.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ + 'name = "&MET_BASE;/python/examples/read_ascii_numpy_grid.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ -plot_range 0.0 255.0 \ -title "Grid Name: 'G212'" \ -v 1 @@ -53,7 +53,7 @@ \ PYTHON_NUMPY \ &OUTPUT_DIR;/python/letter_numpy_grid_string.ps \ - 'name = "&MET_BASE;/python/read_ascii_numpy_grid.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ + 'name = "&MET_BASE;/python/examples/read_ascii_numpy_grid.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ -plot_range 0.0 255.0 \ -title "Grid String: 'lambert 185 129 12.19 -133.459 -95 40.635 6371.2 25 25 N'" \ -v 1 @@ -74,7 +74,7 @@ \ PYTHON_NUMPY \ &OUTPUT_DIR;/python/letter_numpy_grid_data_file.ps \ - 'name = "&MET_BASE;/python/read_ascii_numpy_grid.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ + 'name = "&MET_BASE;/python/examples/read_ascii_numpy_grid.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ -plot_range 0.0 255.0 \ -title "Gridded Data File: 'wrfprs_ruc13_12.tm00_G212'" \ -v 1 @@ -90,7 +90,7 @@ \ PYTHON_NUMPY \ &OUTPUT_DIR;/python/letter_numpy.ps \ - 'name = "&MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ + 'name = "&MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ -plot_range 0.0 255.0 \ -title "Python enabled numpy plot_data_plane" \ -v 1 @@ -105,7 +105,7 @@ \ PYTHON_XARRAY \ &OUTPUT_DIR;/python/letter_xarray.ps \ - 'name = "&MET_BASE;/python/read_ascii_xarray.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ + 'name = "&MET_BASE;/python/examples/read_ascii_xarray.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ -plot_range 0.0 255.0 \ -title "Python enabled xarray plot_data_plane" \ -v 1 @@ -120,7 +120,7 @@ \ &DATA_DIR_PYTHON;/letter.txt \ &OUTPUT_DIR;/python/letter_file_type.ps \ - 'name = "&MET_BASE;/python/read_ascii_numpy.py MET_PYTHON_INPUT_ARG LETTER"; file_type=PYTHON_NUMPY;' \ + 'name = "&MET_BASE;/python/examples/read_ascii_numpy.py MET_PYTHON_INPUT_ARG LETTER"; file_type=PYTHON_NUMPY;' \ -plot_range 0.0 255.0 \ -title "Python enabled plot_data_plane using file_type option" \ -v 1 @@ -133,7 +133,7 @@ &MET_BIN;/mode - OBS_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS + OBS_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS \ &OUTPUT_DIR;/pcp_combine/arw-tom-gep0_2012040912_F030_APCP06.nc \ @@ -152,8 +152,8 @@ &MET_BIN;/mode - FCST_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST - OBS_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS + FCST_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST + OBS_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS \ PYTHON_NUMPY \ @@ -172,7 +172,7 @@ &MET_BIN;/grid_stat - OBS_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS + OBS_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS \ &DATA_DIR_MODEL;/grib1/nam_st4/nam_2012040900_F012_gSt4.grib \ @@ -189,8 +189,8 @@ &MET_BIN;/grid_stat - FCST_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST - OBS_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS + FCST_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST + OBS_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS \ PYTHON_NUMPY \ @@ -206,8 +206,8 @@ &MET_BIN;/point_stat - FCST_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST - OBS_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS + FCST_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST + OBS_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS \ PYTHON_NUMPY \ @@ -224,8 +224,8 @@ &MET_BIN;/wavelet_stat - FCST_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST - OBS_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS + FCST_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST + OBS_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/obs.txt OBS \ PYTHON_NUMPY \ @@ -244,7 +244,7 @@ &MET_BIN;/wavelet_stat - FCST_COMMAND &MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST + FCST_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST \ PYTHON_NUMPY \ @@ -266,7 +266,7 @@ PYTHON_NUMPY \ G130 \ &OUTPUT_DIR;/python/regrid_data_plane.nc \ - -field 'name="&MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST";' \ + -field 'name="&MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST";' \ -v 1 @@ -279,7 +279,7 @@ \ PYTHON_NUMPY \ &OUTPUT_DIR;/python/shift_data_plane.nc \ - 'name="&MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST";' \ + 'name="&MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/fcst.txt FCST";' \ -from 30 -110 -to 35 -115 \ -v 1 @@ -293,7 +293,7 @@ \ &DATA_DIR_PYTHON;/fcst.txt \ &OUTPUT_DIR;/python/shift_data_plane_input_arg.nc \ - 'name="&MET_BASE;/python/read_ascii_numpy.py MET_PYTHON_INPUT_ARG FCST"; file_type=PYTHON_NUMPY;' \ + 'name="&MET_BASE;/python/examples/read_ascii_numpy.py MET_PYTHON_INPUT_ARG FCST"; file_type=PYTHON_NUMPY;' \ -from 30 -110 -to 35 -115 \ -v 1 @@ -305,8 +305,8 @@ &MET_BIN;/series_analysis - FCST_COMMAND &MET_BASE;/python/read_ascii_numpy.py MET_PYTHON_INPUT_ARG FCST - OBS_COMMAND &MET_BASE;/python/read_ascii_numpy.py MET_PYTHON_INPUT_ARG OBS + FCST_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py MET_PYTHON_INPUT_ARG FCST + OBS_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py MET_PYTHON_INPUT_ARG OBS \ -fcst &DATA_DIR_PYTHON;/fcst.txt &DATA_DIR_PYTHON;/fcst.txt \ @@ -324,8 +324,8 @@ &MET_BIN;/mtd - FCST_COMMAND &MET_BASE;/python/read_ascii_numpy.py MET_PYTHON_INPUT_ARG FCST - OBS_COMMAND &MET_BASE;/python/read_ascii_numpy.py MET_PYTHON_INPUT_ARG OBS + FCST_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py MET_PYTHON_INPUT_ARG FCST + OBS_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py MET_PYTHON_INPUT_ARG OBS OUTPUT_PREFIX PYTHON \ @@ -348,8 +348,8 @@ &MET_BIN;/ensemble_stat - FCST_COMMAND &MET_BASE;/python/read_ascii_numpy.py MET_PYTHON_INPUT_ARG FCST - OBS_COMMAND &MET_BASE;/python/read_ascii_numpy.py MET_PYTHON_INPUT_ARG OBS + FCST_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py MET_PYTHON_INPUT_ARG FCST + OBS_COMMAND &MET_BASE;/python/examples/read_ascii_numpy.py MET_PYTHON_INPUT_ARG OBS OUTPUT_PREFIX PYTHON \ @@ -369,7 +369,7 @@ &MET_BIN;/ascii2nc \ - "&MET_BASE;/python/read_ascii_point.py &MET_DATA;/sample_obs/ascii/sample_ascii_obs.txt" \ + "&MET_BASE;/python/examples/read_ascii_point.py &MET_DATA;/sample_obs/ascii/sample_ascii_obs.txt" \ &OUTPUT_DIR;/python/ascii2nc_python.nc \ -format python @@ -382,7 +382,7 @@ &MET_BIN;/ascii2nc \ - "&MET_BASE;/python/read_ascii_point.py &MET_DATA;/sample_obs/ascii/sample_ascii_obs_varname.txt" \ + "&MET_BASE;/python/examples/read_ascii_point.py &MET_DATA;/sample_obs/ascii/sample_ascii_obs_varname.txt" \ &OUTPUT_DIR;/python/ascii2nc_python_varname.nc \ -format python @@ -395,7 +395,7 @@ &MET_BIN;/stat_analysis \ - -lookin python &MET_BASE;/python/read_ascii_mpr.py &OUTPUT_DIR;/python/point_stat_120000L_20050807_120000V.stat \ + -lookin python &MET_BASE;/python/examples/read_ascii_mpr.py &OUTPUT_DIR;/python/point_stat_120000L_20050807_120000V.stat \ -job aggregate_stat -line_type MPR -out_line_type sl1l2 -by FCST_VAR \ -out_stat &OUTPUT_DIR;/python/stat_analysis_python_AGGR_MPR_to_SL1L2.stat @@ -415,7 +415,7 @@ \ PYTHON_NUMPY \ &OUTPUT_DIR;/python/letter_numpy_grid_name_user_python.ps \ - 'name = "&MET_BASE;/python/read_ascii_numpy_grid.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ + 'name = "&MET_BASE;/python/examples/read_ascii_numpy_grid.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ -plot_range 0.0 255.0 \ -title "Grid Name: 'G212'" \ -v 1 @@ -433,7 +433,7 @@ MET_PYTHON_EXE &MET_PYTHON_EXE; \ - "&MET_BASE;/python/read_ascii_point.py &MET_DATA;/sample_obs/ascii/sample_ascii_obs.txt" \ + "&MET_BASE;/python/examples/read_ascii_point.py &MET_DATA;/sample_obs/ascii/sample_ascii_obs.txt" \ &OUTPUT_DIR;/python/ascii2nc_user_python.nc \ -format python @@ -453,7 +453,7 @@ \ PYTHON_NUMPY \ &OUTPUT_DIR;/python/letter_user_python.ps \ - 'name = "&MET_BASE;/python/read_ascii_numpy.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ + 'name = "&MET_BASE;/python/examples/read_ascii_numpy.py &DATA_DIR_PYTHON;/letter.txt LETTER";' \ -plot_range 0.0 255.0 \ -title "Python enabled plot_data_plane" \ -v 1 @@ -471,7 +471,7 @@ MET_PYTHON_EXE &MET_PYTHON_EXE; \ - -lookin python &MET_BASE;/python/read_ascii_mpr.py &OUTPUT_DIR;/python/point_stat_120000L_20050807_120000V.stat \ + -lookin python &MET_BASE;/python/examples/read_ascii_mpr.py &OUTPUT_DIR;/python/point_stat_120000L_20050807_120000V.stat \ -job aggregate_stat -line_type MPR -out_line_type sl1l2 -by FCST_VAR \ -out_stat &OUTPUT_DIR;/python/stat_analysis_user_python_AGGR_MPR_to_SL1L2.stat @@ -483,7 +483,7 @@ &MET_BIN;/point2grid \ - 'PYTHON_NUMPY=&MET_BASE;/python/read_met_point_obs.py &OUTPUT_DIR;/pb2nc/ndas.20120409.t12z.prepbufr.tm00.nc' \ + 'PYTHON_NUMPY=&MET_BASE;/python/examples/read_met_point_obs.py &OUTPUT_DIR;/pb2nc/ndas.20120409.t12z.prepbufr.tm00.nc' \ G212 \ &OUTPUT_DIR;/python/pb2nc_TMP.nc \ -field 'name="TMP"; level="*"; valid_time="20120409_120000"; censor_thresh=[ <0 ]; censor_val=[0];' \ @@ -502,7 +502,7 @@ MET_PYTHON_EXE &MET_PYTHON_EXE; \ - 'PYTHON_NUMPY=&MET_BASE;/python/read_met_point_obs.py &OUTPUT_DIR;/pb2nc/ndas.20120409.t12z.prepbufr.tm00.nc' \ + 'PYTHON_NUMPY=&MET_BASE;/python/examples/read_met_point_obs.py &OUTPUT_DIR;/pb2nc/ndas.20120409.t12z.prepbufr.tm00.nc' \ G212 \ &OUTPUT_DIR;/python/pb2nc_TMP_user_python.nc \ -field 'name="TMP"; level="*"; valid_time="20120409_120000"; censor_thresh=[ <0 ]; censor_val=[0];' \ @@ -520,7 +520,7 @@ TO_GRID NONE \ - 'PYTHON_NUMPY=&MET_BASE;/python/read_ascii_point.py &MET_DATA;/sample_obs/ascii/precip24_2010010112.ascii' \ + 'PYTHON_NUMPY=&MET_BASE;/python/examples/read_ascii_point.py &MET_DATA;/sample_obs/ascii/precip24_2010010112.ascii' \ &OUTPUT_DIR;/python/precip24_2010010112.ps \ -config &CONFIG_DIR;/PlotPointObsConfig \ -plot_grid &DATA_DIR_MODEL;/grib2/nam/nam_2012040900_F012.grib2 \ @@ -538,9 +538,9 @@ TO_GRID NONE \ - 'PYTHON_NUMPY=&MET_BASE;/python/read_met_point_obs.py &OUTPUT_DIR;/pb2nc/ndas.20120409.t12z.prepbufr.tm00.nc' \ + 'PYTHON_NUMPY=&MET_BASE;/python/examples/read_met_point_obs.py &OUTPUT_DIR;/pb2nc/ndas.20120409.t12z.prepbufr.tm00.nc' \ &OUTPUT_DIR;/python/nam_and_ndas.20120409.t12z.prepbufr_CONFIG.ps \ - -point_obs 'PYTHON_NUMPY=&MET_BASE;/python/read_met_point_obs.py &OUTPUT_DIR;/ascii2nc/trmm_2012040912_3hr.nc' \ + -point_obs 'PYTHON_NUMPY=&MET_BASE;/python/examples/read_met_point_obs.py &OUTPUT_DIR;/ascii2nc/trmm_2012040912_3hr.nc' \ -plot_grid &DATA_DIR_MODEL;/grib2/nam/nam_2012040900_F012.grib2 \ -config &CONFIG_DIR;/PlotPointObsConfig \ -title "NAM 2012040900 F12 vs NDAS 500mb RH and TRMM 3h > 0" \ @@ -570,7 +570,7 @@ &OUTPUT_DIR;/python/ensemble_stat/input_file_list \ &CONFIG_DIR;/EnsembleStatConfig \ -grid_obs &DATA_DIR_OBS;/laps/laps_2012041012_F000.grib \ - -point_obs 'PYTHON_NUMPY=&MET_BASE;/python/read_met_point_obs.py &OUTPUT_DIR;/ascii2nc/gauge_2012041012_24hr.nc' \ + -point_obs 'PYTHON_NUMPY=&MET_BASE;/python/examples/read_met_point_obs.py &OUTPUT_DIR;/ascii2nc/gauge_2012041012_24hr.nc' \ -outdir &OUTPUT_DIR;/python/ensemble_stat -v 1 @@ -595,7 +595,7 @@ \ &DATA_DIR_MODEL;/grib1/nam/nam_2012040900_F012.grib \ - 'PYTHON_NUMPY=&MET_BASE;/python/read_met_point_obs.py &OUTPUT_DIR;/pb2nc/gdas1.20120409.t12z.prepbufr.nc' \ + 'PYTHON_NUMPY=&MET_BASE;/python/examples/read_met_point_obs.py &OUTPUT_DIR;/pb2nc/gdas1.20120409.t12z.prepbufr.nc' \ &CONFIG_DIR;/PointStatConfig_WINDS \ -outdir &OUTPUT_DIR;/python -v 1 @@ -609,7 +609,7 @@ \ PYTHON_NUMPY \ &OUTPUT_DIR;/python/wrfout_d01_2008-08-08_12_00_00_PLEV_ZONAL_MEAN.ps \ - 'name="&MET_BASE;/python/derive_WRF_semilatlon.py &DATA_DIR_MODEL;/p_interp/wrfout_d01_2008-08-08_12:00:00_PLEV TT lat";' \ + 'name="&MET_BASE;/python/examples/derive_WRF_semilatlon.py &DATA_DIR_MODEL;/p_interp/wrfout_d01_2008-08-08_12:00:00_PLEV TT lat";' \ -title "WRF Zonal Mean" \ -v 1 @@ -622,7 +622,7 @@ &MET_BIN;/pcp_combine \ -add PYTHON_NUMPY \ - 'name="&MET_BASE;/python/derive_WRF_semilatlon.py &DATA_DIR_MODEL;/p_interp/wrfout_d01_2008-08-08_12:00:00_PLEV TT lon";' \ + 'name="&MET_BASE;/python/examples/derive_WRF_semilatlon.py &DATA_DIR_MODEL;/p_interp/wrfout_d01_2008-08-08_12:00:00_PLEV TT lon";' \ &OUTPUT_DIR;/python/wrfout_d01_2008-08-08_12_00_00_PLEV_MERIDIONAL_MEAN.nc \ -name "TT_MERIDIONAL_MEAN" -v 1 diff --git a/scripts/python/Makefile.am b/scripts/python/Makefile.am index 689708e4c3..c3b7b20042 100644 --- a/scripts/python/Makefile.am +++ b/scripts/python/Makefile.am @@ -18,6 +18,11 @@ # SUBDIRS = include +SUBDIRS = \ + examples \ + met \ + pyembed \ + utility ## Example of how to Install outside of $(pkgdatadir) ## scriptsrootdir = $(prefix)/share/scripts @@ -25,17 +30,10 @@ pythonscriptsdir = $(pkgdatadir)/python -pythonscripts_DATA = \ - met_point_obs.py \ - met_point_obs_nc.py \ - read_ascii_numpy.py \ - read_ascii_numpy_grid.py \ - read_ascii_xarray.py \ - read_ascii_point.py \ - read_ascii_mpr.py \ - read_met_point_obs.py \ - derive_WRF_semilatlon.py - -EXTRA_DIST = ${pythonscripts_DATA} +#EXTRA_DIST = ${top_DATA} \ +# sample_fcst \ +# sample_obs \ +# python \ +# copyright_notice.txt MAINTAINERCLEANFILES = Makefile.in diff --git a/scripts/python/Makefile.in b/scripts/python/Makefile.in index 6d85ed81f9..5ff5daed23 100644 --- a/scripts/python/Makefile.in +++ b/scripts/python/Makefile.in @@ -15,7 +15,6 @@ @SET_MAKE@ # SUBDIRS = include - VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ @@ -114,43 +113,74 @@ am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(pythonscriptsdir)" -DATA = $(pythonscripts_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ @@ -296,21 +326,21 @@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ +SUBDIRS = \ + examples \ + met \ + pyembed \ + utility + pythonscriptsdir = $(pkgdatadir)/python -pythonscripts_DATA = \ - met_point_obs.py \ - met_point_obs_nc.py \ - read_ascii_numpy.py \ - read_ascii_numpy_grid.py \ - read_ascii_xarray.py \ - read_ascii_point.py \ - read_ascii_mpr.py \ - read_met_point_obs.py \ - derive_WRF_semilatlon.py - -EXTRA_DIST = ${pythonscripts_DATA} + +#EXTRA_DIST = ${top_DATA} \ +# sample_fcst \ +# sample_obs \ +# python \ +# copyright_notice.txt MAINTAINERCLEANFILES = Makefile.in -all: all-am +all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @@ -342,33 +372,105 @@ $(top_srcdir)/configure: $(am__configure_deps) $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): -install-pythonscriptsDATA: $(pythonscripts_DATA) - @$(NORMAL_INSTALL) - @list='$(pythonscripts_DATA)'; test -n "$(pythonscriptsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythonscriptsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythonscriptsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonscriptsdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonscriptsdir)" || exit $$?; \ - done - -uninstall-pythonscriptsDATA: - @$(NORMAL_UNINSTALL) - @list='$(pythonscripts_DATA)'; test -n "$(pythonscriptsdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythonscriptsdir)'; $(am__uninstall_files_from_dir) -tags TAGS: -ctags CTAGS: - -cscope cscopelist: +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am @@ -403,22 +505,45 @@ distdir-am: $(DISTFILES) || exit 1; \ fi; \ done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(pythonscriptsdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am +check-am: all-am +check: check-recursive +all-am: Makefile +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am -installcheck: installcheck-am +installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ @@ -441,85 +566,85 @@ maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am +clean: clean-recursive clean-am: clean-generic mostlyclean-am -distclean: distclean-am +distclean: distclean-recursive -rm -f Makefile -distclean-am: clean-am distclean-generic +distclean-am: clean-am distclean-generic distclean-tags -dvi: dvi-am +dvi: dvi-recursive dvi-am: -html: html-am +html: html-recursive html-am: -info: info-am +info: info-recursive info-am: -install-data-am: install-pythonscriptsDATA +install-data-am: -install-dvi: install-dvi-am +install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: -install-html: install-html-am +install-html: install-html-recursive install-html-am: -install-info: install-info-am +install-info: install-info-recursive install-info-am: install-man: -install-pdf: install-pdf-am +install-pdf: install-pdf-recursive install-pdf-am: -install-ps: install-ps-am +install-ps: install-ps-recursive install-ps-am: installcheck-am: -maintainer-clean: maintainer-clean-am +maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic -mostlyclean: mostlyclean-am +mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic -pdf: pdf-am +pdf: pdf-recursive pdf-am: -ps: ps-am +ps: ps-recursive ps-am: -uninstall-am: uninstall-pythonscriptsDATA +uninstall-am: -.MAKE: install-am install-strip +.MAKE: $(am__recursive_targets) install-am install-strip -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-pythonscriptsDATA \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ - uninstall-am uninstall-pythonscriptsDATA + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am .PRECIOUS: Makefile diff --git a/scripts/python/examples/Makefile.am b/scripts/python/examples/Makefile.am new file mode 100644 index 0000000000..e0461a3564 --- /dev/null +++ b/scripts/python/examples/Makefile.am @@ -0,0 +1,39 @@ +## Makefile.am -- Process this file with automake to produce Makefile.in +## Copyright (C) 2000, 2006 Gary V. Vaughan +## +## This program is free software; you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation; either version 2, or (at your option) +## any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, write to the Free Software +## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +## MA 02110-1301, USA. + +# SUBDIRS = include + + +## Example of how to Install outside of $(pkgdatadir) +## scriptsrootdir = $(prefix)/share/scripts +## pythonscriptsdir = ${scriptsrootdir}/python + +pythonexamplesdir = $(pkgdatadir)/python/examples + +pythonexamples_DATA = \ + derive_WRF_semilatlon.py \ + read_ascii_mpr.py \ + read_ascii_numpy_grid.py \ + read_ascii_numpy.py \ + read_ascii_point.py \ + read_ascii_xarray.py \ + read_met_point_obs.py + +EXTRA_DIST = ${pythonexamples_DATA} + +MAINTAINERCLEANFILES = Makefile.in diff --git a/scripts/python/examples/Makefile.in b/scripts/python/examples/Makefile.in new file mode 100644 index 0000000000..ad4832e5a0 --- /dev/null +++ b/scripts/python/examples/Makefile.in @@ -0,0 +1,527 @@ +# Makefile.in generated by automake 1.16.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2018 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SUBDIRS = include + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = scripts/python/examples +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(pythonexamplesdir)" +DATA = $(pythonexamples_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BUFRLIB_NAME = @BUFRLIB_NAME@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FC_LIBS = @FC_LIBS@ +FFLAGS = @FFLAGS@ +FLIBS = @FLIBS@ +GREP = @GREP@ +GRIB2CLIB_NAME = @GRIB2CLIB_NAME@ +GRIB2_LIBS = @GRIB2_LIBS@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LEX = @LEX@ +LEXLIB = @LEXLIB@ +LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MET_BUFR = @MET_BUFR@ +MET_BUFRLIB = @MET_BUFRLIB@ +MET_CAIRO = @MET_CAIRO@ +MET_CAIROINC = @MET_CAIROINC@ +MET_CAIROLIB = @MET_CAIROLIB@ +MET_FREETYPE = @MET_FREETYPE@ +MET_FREETYPEINC = @MET_FREETYPEINC@ +MET_FREETYPELIB = @MET_FREETYPELIB@ +MET_GRIB2C = @MET_GRIB2C@ +MET_GRIB2CINC = @MET_GRIB2CINC@ +MET_GRIB2CLIB = @MET_GRIB2CLIB@ +MET_GSL = @MET_GSL@ +MET_GSLINC = @MET_GSLINC@ +MET_GSLLIB = @MET_GSLLIB@ +MET_HDF = @MET_HDF@ +MET_HDF5 = @MET_HDF5@ +MET_HDF5INC = @MET_HDF5INC@ +MET_HDF5LIB = @MET_HDF5LIB@ +MET_HDFEOS = @MET_HDFEOS@ +MET_HDFEOSINC = @MET_HDFEOSINC@ +MET_HDFEOSLIB = @MET_HDFEOSLIB@ +MET_HDFINC = @MET_HDFINC@ +MET_HDFLIB = @MET_HDFLIB@ +MET_NETCDF = @MET_NETCDF@ +MET_NETCDFINC = @MET_NETCDFINC@ +MET_NETCDFLIB = @MET_NETCDFLIB@ +MET_PYTHON_BIN_EXE = @MET_PYTHON_BIN_EXE@ +MET_PYTHON_CC = @MET_PYTHON_CC@ +MET_PYTHON_LD = @MET_PYTHON_LD@ +MKDIR_P = @MKDIR_P@ +OBJEXT = @OBJEXT@ +OPENMP_CFLAGS = @OPENMP_CFLAGS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PYTHON_LIBS = @PYTHON_LIBS@ +RANLIB = @RANLIB@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +YACC = @YACC@ +YFLAGS = @YFLAGS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +pythonexamplesdir = $(pkgdatadir)/python/examples +pythonexamples_DATA = \ + derive_WRF_semilatlon.py \ + read_ascii_mpr.py \ + read_ascii_numpy_grid.py \ + read_ascii_numpy.py \ + read_ascii_point.py \ + read_ascii_xarray.py \ + read_met_point_obs.py + +EXTRA_DIST = ${pythonexamples_DATA} +MAINTAINERCLEANFILES = Makefile.in +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign scripts/python/examples/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign scripts/python/examples/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-pythonexamplesDATA: $(pythonexamples_DATA) + @$(NORMAL_INSTALL) + @list='$(pythonexamples_DATA)'; test -n "$(pythonexamplesdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pythonexamplesdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pythonexamplesdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonexamplesdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonexamplesdir)" || exit $$?; \ + done + +uninstall-pythonexamplesDATA: + @$(NORMAL_UNINSTALL) + @list='$(pythonexamples_DATA)'; test -n "$(pythonexamplesdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pythonexamplesdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: + for dir in "$(DESTDIR)$(pythonexamplesdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-pythonexamplesDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pythonexamplesDATA + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-pythonexamplesDATA \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am uninstall-pythonexamplesDATA + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/scripts/python/derive_WRF_semilatlon.py b/scripts/python/examples/derive_WRF_semilatlon.py similarity index 100% rename from scripts/python/derive_WRF_semilatlon.py rename to scripts/python/examples/derive_WRF_semilatlon.py diff --git a/scripts/python/examples/read_ascii_mpr.py b/scripts/python/examples/read_ascii_mpr.py new file mode 100644 index 0000000000..d166893c98 --- /dev/null +++ b/scripts/python/examples/read_ascii_mpr.py @@ -0,0 +1,34 @@ +import os +import sys +from met.mprbase import mpr_data + + +######################################################################## + +print("Python Script:\t" + repr(sys.argv[0])) + + ## + ## input file specified on the command line + ## load the data into the numpy array + ## + +if len(sys.argv) != 2: + print("ERROR: read_ascii_mpr.py -> Must specify exactly one input file.") + sys.exit(1) + +# Read the input file as the first argument +input_file = os.path.expandvars(sys.argv[1]) +try: + print("Input File:\t" + repr(input_file)) + + # Read MPR lines by using the Pandas Python package, + # skipping the header row and first column. + # Input should be a 36 column text data matching the MPR line-type + # output from MET tools. + mpr_data = mpr_data.read_mpr(input_file, col_start=1, col_last=36, skiprows=1) + print("Data Length:\t" + repr(len(mpr_data))) + print("Data Type:\t" + repr(type(mpr_data))) +except NameError: + print("Can't find the input file") + +######################################################################## diff --git a/scripts/python/examples/read_ascii_numpy.py b/scripts/python/examples/read_ascii_numpy.py new file mode 100644 index 0000000000..a15fe17031 --- /dev/null +++ b/scripts/python/examples/read_ascii_numpy.py @@ -0,0 +1,85 @@ +import os +import sys +from met.dataplane import dataplane + +########################################### + +print("Python Script:\t" + repr(sys.argv[0])) + + ## + ## input file specified on the command line + ## load the data into the numpy array + ## + +if len(sys.argv) != 3: + print("ERROR: read_ascii_numpy.py -> Must specify exactly one input file and a name for the data.") + sys.exit(1) + +# Read the input file as the first argument +input_file = os.path.expandvars(sys.argv[1]) +data_name = sys.argv[2] + +try: + print("Input File:\t" + repr(input_file)) + print("Data Name:\t" + repr(data_name)) + # read_2d_text_input() reads n by m text data and returns 2D numpy array + met_data = dataplane.read_2d_text_input(input_file) + print("Data Shape:\t" + repr(met_data.shape)) + print("Data Type:\t" + repr(met_data.dtype)) +except NameError: + met_data = None + print("Can't find the input file") + +# attrs is a dictionary which contains attributes describing the dataplane. +# attrs should have 9 items, each of data type string: +# 'name': data name +# 'long_name': descriptive name +# 'valid': valid time (format = 'yyyymmdd_hhmmss') +# 'init': init time (format = 'yyyymmdd_hhmmss') +# 'lead': lead time (format = 'hhmmss') +# 'accum': accumulation time (format = 'hhmmss') +# 'level': vertilcal level +# 'units': units of the data +# 'grid': contains the grid information +# - a grid name (G212) +# - a gridded data file name +# - MET specific grid string, "lambert 185 129 12.19 -133.459 -95 40.635 6371.2 25 25 N" +# - a dictionary for the grid information + +valid_time = '20050807_120000' +init_time = '20050807_000000' +lead_time = '120000' +accum_time = '120000' +v_level = 'Surface' +units = 'None' + +grid_lambert_conformal = { + 'type': 'Lambert Conformal', + 'hemisphere': 'N', + + 'name': 'FooGrid', + + 'scale_lat_1': 25.0, + 'scale_lat_2': 25.0, + + 'lat_pin': 12.19, + 'lon_pin': -135.459, + + 'x_pin': 0.0, + 'y_pin': 0.0, + + 'lon_orient': -95.0, + + 'd_km': 40.635, + 'r_km': 6371.2, + + 'nx': 185, + 'ny': 129, +} + +long_name = data_name + "_word" +attrs = dataplane.set_dataplane_attrs(data_name, valid_time, init_time, + lead_time, accum_time, v_level, units, + grid_lambert_conformal, long_name) + +print("Attributes:\t" + repr(attrs)) diff --git a/scripts/python/examples/read_ascii_numpy_grid.py b/scripts/python/examples/read_ascii_numpy_grid.py new file mode 100644 index 0000000000..79e6829052 --- /dev/null +++ b/scripts/python/examples/read_ascii_numpy_grid.py @@ -0,0 +1,64 @@ +import os +import sys +from met.dataplane import dataplane + +########################################### + +print("Python Script:\t" + repr(sys.argv[0])) + + ## + ## input file specified on the command line + ## load the data into the numpy array + ## + +if len(sys.argv) != 3: + print("ERROR: read_ascii_numpy.py -> Must specify exactly one input file and a name for the data.") + sys.exit(1) + +# Read the input file as the first argument +input_file = os.path.expandvars(sys.argv[1]) +data_name = sys.argv[2] + +try: + # Print some output to verify that this script ran + print("Input File:\t" + repr(input_file)) + print("Data Name:\t" + repr(data_name)) + # read_2d_text_input() reads n by m text data and returns 2D numpy array + met_data = dataplane.read_2d_text_input(input_file) + print("Data Shape:\t" + repr(met_data.shape)) + print("Data Type:\t" + repr(met_data.dtype)) +except NameError: + print("Can't find the input file") + +# attrs is a dictionary which contains attributes describing the dataplane. +# attrs should have 9 items, each of data type string: +# 'name': data name +# 'long_name': descriptive name +# 'valid': valid time (format = 'yyyymmdd_hhmmss') +# 'init': init time (format = 'yyyymmdd_hhmmss') +# 'lead': lead time (format = 'hhmmss') +# 'accum': accumulation time (format = 'hhmmss') +# 'level': vertilcal level +# 'units': units of the data +# 'grid': contains the grid information +# - a grid name (G212) +# - a gridded data file name +# - MET specific grid string, "lambert 185 129 12.19 -133.459 -95 40.635 6371.2 25 25 N" +# - a dictionary for the grid information + +valid_time = '20050807_120000' +init_time = '20050807_000000' +lead_time = '120000' +accum_time = '120000' +v_level = 'Surface' +units = 'None' + +## create the metadata dictionary from the environment variable, +grid_info = os.path.expandvars(os.getenv('PYTHON_GRID')) + +long_name = data_name + "_word" +attrs = dataplane.set_dataplane_attrs(data_name, valid_time, init_time, + lead_time, accum_time, v_level, units, + grid_info, long_name) + +print("Attributes:\t" + repr(attrs)) diff --git a/scripts/python/examples/read_ascii_point.py b/scripts/python/examples/read_ascii_point.py new file mode 100644 index 0000000000..88e9e31ce6 --- /dev/null +++ b/scripts/python/examples/read_ascii_point.py @@ -0,0 +1,51 @@ +import os +import sys + +from met.point import met_point_tools + +######################################################################## + +print("Python Script:\t" + repr(sys.argv[0])) + +## +## input file specified on the command line +## load the data into the numpy array +## + +arg_cnt = len(sys.argv) +if arg_cnt < 2: + print("ERROR: read_ascii_point.py -> Missing an input file.") + sys.exit(1) + +last_index = 2 +if last_index < arg_cnt: + print(" INFO: read_ascii_point.py -> Too many argument, ignored {o}.".format( + o=' '.join(sys.argv[last_index:]))) + +# Read the input file as the first argument +input_file = os.path.expandvars(sys.argv[1]) +try: + print("Input File:\t" + repr(input_file)) + + # Read and format the input 11-column observations: + # (1) string: Message_Type + # (2) string: Station_ID + # (3) string: Valid_Time(YYYYMMDD_HHMMSS) + # (4) numeric: Lat(Deg North) + # (5) numeric: Lon(Deg East) + # (6) numeric: Elevation(msl) + # (7) string: Var_Name(or GRIB_Code) + # (8) numeric: Level + # (9) numeric: Height(msl or agl) + # (10) string: QC_String + # (11) numeric: Observation_Value + + # Read 11 column text input data by using pandas package + point_data = met_point_tools.read_text_point_obs(input_file) + print(" point_data: Data Length:\t" + repr(len(point_data))) + print(" point_data: Data Type:\t" + repr(type(point_data))) +except FileNotFoundError: + print(f"The input file {input_file} does not exist") + sys.exit(1) + +######################################################################## diff --git a/scripts/python/examples/read_ascii_xarray.py b/scripts/python/examples/read_ascii_xarray.py new file mode 100644 index 0000000000..8998235ea1 --- /dev/null +++ b/scripts/python/examples/read_ascii_xarray.py @@ -0,0 +1,109 @@ +import os +import sys +import xarray as xr +from met.dataplane import dataplane + +########################################### + +print("Python Script:\t" + repr(sys.argv[0])) + + ## + ## input file specified on the command line + ## load the data into the numpy array + ## + +if len(sys.argv) != 3: + print("ERROR: read_ascii_xarray.py -> Must specify exactly one input file and a name for the data.") + sys.exit(1) + +# Read the input file as the first argument +input_file = os.path.expandvars(sys.argv[1]) +data_name = sys.argv[2] + +try: + print("Input File:\t" + repr(input_file)) + print("Data Name:\t" + repr(data_name)) + # read_2d_text_input() reads n by m text data and returns 2D numpy array + met_data = dataplane.read_2d_text_input(input_file) + print("Data Shape:\t" + repr(met_data.shape)) + print("Data Type:\t" + repr(met_data.dtype)) +except NameError: + met_data = None + print("Can't read the input file") + +########################################### + + ## + ## create the metadata dictionary + ## + +# attrs is a dictionary which contains attributes describing the dataplane. +# attrs should have 9 items, each of data type string: +# 'name': data name +# 'long_name': descriptive name +# 'valid': valid time (format = 'yyyymmdd_hhmmss') +# 'init': init time (format = 'yyyymmdd_hhmmss') +# 'lead': lead time (format = 'hhmmss') +# 'accum': accumulation time (format = 'hhmmss') +# 'level': vertilcal level +# 'units': units of the data +# 'grid': contains the grid information +# - a grid name (G212) +# - a gridded data file name +# - MET specific grid string, "lambert 185 129 12.19 -133.459 -95 40.635 6371.2 25 25 N" +# - a dictionary for the grid information + +valid_time = '20050807_120000' +init_time = '20050807_000000' +lead_time = '120000' +accum_time = '120000' +v_level = 'Surface' +units = 'None' + +grid_lambert_conformal = { + 'type': 'Lambert Conformal', + 'hemisphere': 'N', + + 'name': 'FooGrid', + + 'scale_lat_1': 25.0, + 'scale_lat_2': 25.0, + + 'lat_pin': 12.19, + 'lon_pin': -135.459, + + 'x_pin': 0.0, + 'y_pin': 0.0, + + 'lon_orient': -95.0, + + 'd_km': 40.635, + 'r_km': 6371.2, + + 'nx': 185, + 'ny': 129, +} + +long_name = data_name + "_word" +attrs = dataplane.set_dataplane_attrs(data_name, valid_time, init_time, + lead_time, accum_time, v_level, units, + grid_lambert_conformal) + +print("Attributes:\t" + repr(attrs)) + +# Create an xarray DataArray object +da = xr.DataArray(met_data) +ds = xr.Dataset({"fcst":da}) + +# Add the attributes to the dataarray object +ds.attrs = attrs + +# Delete the local variable attrs to mimic the real world, +# where a user will rely on da.attrs rather than construct it themselves +del attrs + +# Delete the met_data variable, and reset it to be the Xarray object +del met_data + +# Create met_data and specify attrs because XR doesn't persist them. +met_data = xr.DataArray(ds.fcst, attrs=ds.attrs) diff --git a/scripts/python/examples/read_met_point_obs.py b/scripts/python/examples/read_met_point_obs.py new file mode 100644 index 0000000000..e16ccf2d86 --- /dev/null +++ b/scripts/python/examples/read_met_point_obs.py @@ -0,0 +1,61 @@ +''' +Created on Nov 10, 2021 + +@author: hsoh + +This script reads the MET point observation NetCDF file like MET tools do. + +Usage: + + python3 read_met_point_obs.py + python3 read_met_point_obs.py + : 11 columns + 'typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs' + string columns: 'typ', 'sid', 'vld', 'var', , 'qc' + numeric columns: 'lat', 'lon', 'elv', 'lvl', 'hgt', 'qc', 'obs' + +''' + +import os +import sys +from datetime import datetime + +from met.point import met_point_tools +from pyembed.python_embedding import pyembed_tools + +ARG_PRINT_DATA = 'show_data' +DO_PRINT_DATA = ARG_PRINT_DATA == sys.argv[-1] + +start_time = datetime.now() + +point_obs_data = None +input_name = sys.argv[1] if len(sys.argv) > 1 else None +prompt = met_point_tools.get_prompt() +if len(sys.argv) == 1 or ARG_PRINT_DATA == input_name: + # This is an example of creating a sample data + point_obs_data = met_point_tools.get_sample_point_obs() + point_obs_data.read_data([]) +elif met_point_tools.is_python_prefix(input_name): + # This is an example of calling a python script for ascii2nc + point_obs_data = pyembed_tools.call_python(sys.argv) +else: + # This is an example of reading MET's point observation NetCDF file + # from ascii2nc, madis2nc, and pb2nc + netcdf_filename = os.path.expandvars(input_name) + args = [ netcdf_filename ] + #args = { 'nc_name': netcdf_filename } + point_obs_data = met_point_tools.get_nc_point_obs() + point_obs_data.read_data(point_obs_data.get_nc_filename(args)) + +if point_obs_data is not None: + met_point_data = point_obs_data.get_point_data() + met_point_data['met_point_data'] = point_obs_data + print("met_point_data: ", met_point_data) + print(met_point_data) + + if DO_PRINT_DATA: + point_obs_data.dump() + +run_time = datetime.now() - start_time + +print('{p} Done python script {s} took {t}'.format(p=prompt, s=sys.argv[0], t=run_time)) diff --git a/scripts/python/met/Makefile.am b/scripts/python/met/Makefile.am new file mode 100644 index 0000000000..9e430722af --- /dev/null +++ b/scripts/python/met/Makefile.am @@ -0,0 +1,34 @@ +## Makefile.am -- Process this file with automake to produce Makefile.in +## Copyright (C) 2000, 2006 Gary V. Vaughan +## +## This program is free software; you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation; either version 2, or (at your option) +## any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, write to the Free Software +## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +## MA 02110-1301, USA. + +# SUBDIRS = include + +## Example of how to Install outside of $(pkgdatadir) +## scriptsrootdir = $(prefix)/share/scripts +## pythonscriptsdir = ${scriptsrootdir}/python + +pythonmetscriptsdir = $(pkgdatadir)/python/met + +pythonmetscripts_DATA = \ + dataplane.py \ + mprbase.py \ + point.py + +EXTRA_DIST = ${pythonmetscripts_DATA} + +MAINTAINERCLEANFILES = Makefile.in diff --git a/scripts/python/met/Makefile.in b/scripts/python/met/Makefile.in new file mode 100644 index 0000000000..488e85355e --- /dev/null +++ b/scripts/python/met/Makefile.in @@ -0,0 +1,523 @@ +# Makefile.in generated by automake 1.16.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2018 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SUBDIRS = include + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = scripts/python/met +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(pythonmetscriptsdir)" +DATA = $(pythonmetscripts_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BUFRLIB_NAME = @BUFRLIB_NAME@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FC_LIBS = @FC_LIBS@ +FFLAGS = @FFLAGS@ +FLIBS = @FLIBS@ +GREP = @GREP@ +GRIB2CLIB_NAME = @GRIB2CLIB_NAME@ +GRIB2_LIBS = @GRIB2_LIBS@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LEX = @LEX@ +LEXLIB = @LEXLIB@ +LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MET_BUFR = @MET_BUFR@ +MET_BUFRLIB = @MET_BUFRLIB@ +MET_CAIRO = @MET_CAIRO@ +MET_CAIROINC = @MET_CAIROINC@ +MET_CAIROLIB = @MET_CAIROLIB@ +MET_FREETYPE = @MET_FREETYPE@ +MET_FREETYPEINC = @MET_FREETYPEINC@ +MET_FREETYPELIB = @MET_FREETYPELIB@ +MET_GRIB2C = @MET_GRIB2C@ +MET_GRIB2CINC = @MET_GRIB2CINC@ +MET_GRIB2CLIB = @MET_GRIB2CLIB@ +MET_GSL = @MET_GSL@ +MET_GSLINC = @MET_GSLINC@ +MET_GSLLIB = @MET_GSLLIB@ +MET_HDF = @MET_HDF@ +MET_HDF5 = @MET_HDF5@ +MET_HDF5INC = @MET_HDF5INC@ +MET_HDF5LIB = @MET_HDF5LIB@ +MET_HDFEOS = @MET_HDFEOS@ +MET_HDFEOSINC = @MET_HDFEOSINC@ +MET_HDFEOSLIB = @MET_HDFEOSLIB@ +MET_HDFINC = @MET_HDFINC@ +MET_HDFLIB = @MET_HDFLIB@ +MET_NETCDF = @MET_NETCDF@ +MET_NETCDFINC = @MET_NETCDFINC@ +MET_NETCDFLIB = @MET_NETCDFLIB@ +MET_PYTHON_BIN_EXE = @MET_PYTHON_BIN_EXE@ +MET_PYTHON_CC = @MET_PYTHON_CC@ +MET_PYTHON_LD = @MET_PYTHON_LD@ +MKDIR_P = @MKDIR_P@ +OBJEXT = @OBJEXT@ +OPENMP_CFLAGS = @OPENMP_CFLAGS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PYTHON_LIBS = @PYTHON_LIBS@ +RANLIB = @RANLIB@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +YACC = @YACC@ +YFLAGS = @YFLAGS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +pythonmetscriptsdir = $(pkgdatadir)/python/met +pythonmetscripts_DATA = \ + dataplane.py \ + mprbase.py \ + point.py + +EXTRA_DIST = ${pythonmetscripts_DATA} +MAINTAINERCLEANFILES = Makefile.in +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign scripts/python/met/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign scripts/python/met/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-pythonmetscriptsDATA: $(pythonmetscripts_DATA) + @$(NORMAL_INSTALL) + @list='$(pythonmetscripts_DATA)'; test -n "$(pythonmetscriptsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pythonmetscriptsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pythonmetscriptsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmetscriptsdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmetscriptsdir)" || exit $$?; \ + done + +uninstall-pythonmetscriptsDATA: + @$(NORMAL_UNINSTALL) + @list='$(pythonmetscripts_DATA)'; test -n "$(pythonmetscriptsdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pythonmetscriptsdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: + for dir in "$(DESTDIR)$(pythonmetscriptsdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-pythonmetscriptsDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pythonmetscriptsDATA + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-pythonmetscriptsDATA \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am uninstall-pythonmetscriptsDATA + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/scripts/python/met/dataplane.py b/scripts/python/met/dataplane.py new file mode 100644 index 0000000000..3da0e8b9e4 --- /dev/null +++ b/scripts/python/met/dataplane.py @@ -0,0 +1,118 @@ +import os +import numpy as np +import netCDF4 as nc + +########################################### + +class dataplane(): + + ## + ## create the metadata dictionary + ## + + #@staticmethod + # Python dictionary items: + # 'name': data name + # 'long_name': descriptive name + # 'valid': valid time (format = 'yyyymmdd_hhmmss') + # 'init': init time (format = 'yyyymmdd_hhmmss') + # 'lead': lead time (format = 'hhmmss') + # 'accum': accumulation time (format = 'hhmmss') + # 'level': vertilcal level + # 'units': units of the data + # 'grid': contains the grid information + # - a grid name (G212) + # - a gridded data file name + # - MET specific grid string, "lambert 185 129 12.19 -133.459 -95 40.635 6371.2 25 25 N" + # - a dictionary for the grid information + def set_dataplane_attrs(data_name, valid_time, init_time, lead_time, + accum_time, v_level, units, grid_info, long_name=None): + hdr_attrs = { + + 'valid': valid_time, + 'init': init_time, + 'lead': lead_time, + 'accum': accum_time, + + 'name': data_name, + 'long_name': long_name if long_name is not None and long_name != "" else data_name + '_long', + 'level': v_level, + 'units': units, + + 'grid': grid_info + + } + return hdr_attrs + + #@staticmethod + def read_2d_text_input(input_file): + if os.path.exists(input_file): + met_data = np.loadtxt(input_file) + else: + met_data = None + return met_data + + #@staticmethod + def read_dataplane(netcdf_filename): + # read NetCDF file + ds = nc.Dataset(netcdf_filename, 'r') + met_data = ds['met_data'][:] + met_attrs = {} + + # grid is defined as a dictionary or string + grid = {} + for attr, attr_val in ds.__dict__.items(): + if 'grid.' in attr: + grid_attr = attr.split('.')[1] + grid[grid_attr] = attr_val + else: + met_attrs[attr] = attr_val + + if grid: + met_attrs['grid'] = grid + + met_attrs['name'] = met_attrs['name_str'] + del met_attrs['name_str'] + + met_info = {} + met_info['met_data'] = met_data + met_info['attrs'] = met_attrs + return met_info + + #@staticmethod + def write_dataplane(met_in, netcdf_filename): + met_info = {'met_data': met_in.met_data} + if hasattr(met_in.met_data, 'attrs') and met_in.met_data.attrs: + attrs = met_in.met_data.attrs + else: + attrs = met_in.attrs + met_info['attrs'] = attrs + + # determine fill value + #try: + # fill = met_in.met_data.get_fill_value() + #except: + fill = -9999. + + # write NetCDF file + ds = nc.Dataset(netcdf_filename, 'w') + + # create dimensions and variable + nx, ny = met_in.met_data.shape + ds.createDimension('x', nx) + ds.createDimension('y', ny) + dp = ds.createVariable('met_data', met_in.met_data.dtype, ('x', 'y'), fill_value=fill) + dp[:] = met_in.met_data + + # append attributes + for attr, attr_val in met_info['attrs'].items(): + if attr == 'name': + setattr(ds, 'name_str', attr_val) + elif type(attr_val) == dict: + for key in attr_val: + setattr(ds, attr + '.' + key, attr_val[key]) + else: + setattr(ds, attr, attr_val) + + ds.close() + diff --git a/scripts/python/met/mprbase.py b/scripts/python/met/mprbase.py new file mode 100644 index 0000000000..0615171313 --- /dev/null +++ b/scripts/python/met/mprbase.py @@ -0,0 +1,21 @@ +import pandas as pd + +######################################################################## + +class mpr_data(): + + # Read a text file with N columns and returns the list of N column data + # Skip first "col_start" columns if col_start is not 0. + def read_mpr(input_file, col_last, col_start = 0, header=None, + delim_whitespace=True, keep_default_na=False, + skiprows=1, dtype='string'): + mpr_data = pd.read_csv(input_file, header=header, + delim_whitespace=delim_whitespace, + keep_default_na=keep_default_na, + skiprows=skiprows, + usecols=range(col_start,col_last+1), + dtype=dtype).values.tolist() + return mpr_data + + +######################################################################## diff --git a/scripts/python/met/point.py b/scripts/python/met/point.py new file mode 100644 index 0000000000..3c64549e85 --- /dev/null +++ b/scripts/python/met/point.py @@ -0,0 +1,983 @@ +''' +Created on Nov 10, 2021 + +@author: hsoh + +- This is the base class and the customized script should extend the met_point_obs. +- The customized script (for example "custom_reader") must implement + "def read_data(self, args)" which fills the array variables at __init__(). +- The args can be 1) single string argument, 2) the list of arguments, + or 3) the dictionary of arguments. +- Either "point_data" or "met_point_data" python object (variable) must set: + + "point_data" is from 11 column text input + + "met_point_data" is array of neaders and observation data. + + "point_obs_data" is an optional to use custom python EXE. + It's a python instance which processes the point observation data +- The customized script is expected to include following codes: + + + Note: csv_point_obs is an example of met_point_data, not point_data + + + Example of "point_data": see met_point_tools.read_text_point_obs() + +def read_custom_data(data_filename): + # Implemente here + return the array of 11 column data + +# prepare arguments for the customized script +data_filename = sys.arg[1] +point_data = read_custom_data(data_filename) + + + + Example of "met_point_data": see csv_point_obs + +from met.point import met_point_obs + +class custom_reader(met_point_obs): + + def read_data(data_filename): + # Implemente here + +# prepare arguments for the customized script +data_filename = sys.argv[1] +point_obs_data = custom_reader() +point_obs_data.read_data(data_filename) +met_point_data = point_obs_data.get_point_data() + +''' + +import os +from abc import ABC, abstractmethod + +import numpy as np +import netCDF4 as nc +import pandas as pd + +COUNT_SHOW = 30 + +class base_met_point_obs(object): + ''' + classdocs + ''' + ERROR_P = " ==PYTHON_ERROR==" + INFO_P = " ==PYTHON_INFO==" + + python_prefix = 'PYTHON_POINT_USER' + + FILL_VALUE = -9999. + + def __init__(self, use_var_id=True): + ''' + Constructor + ''' + self.count_info = "" + self.input_name = None + self.ignore_input_file = False + self.use_var_id = use_var_id # True if variable index, False if GRIB code + self.error_msg = "" + self.has_error = False + + # Header + self.nhdr = 0 + self.npbhdr = 0 + self.nhdr_typ = 0 # type table + self.nhdr_sid = 0 # station_id table + self.nhdr_vld = 0 # valid time strings + self.hdr_typ = [] # (nhdr) integer + self.hdr_sid = [] # (nhdr) integer + self.hdr_vld = [] # (nhdr) integer + self.hdr_lat = [] # (nhdr) float + self.hdr_lon = [] # (nhdr) float + self.hdr_elv = [] # (nhdr) float + self.hdr_typ_table = [] # (nhdr_typ, mxstr2) string + self.hdr_sid_table = [] # (nhdr_sid, mxstr2) string + self.hdr_vld_table = [] # (nhdr_vld, mxstr) string + + #Observation data + self.nobs = 0 + self.nobs_qty = 0 + self.nobs_var = 0 + self.obs_qty = [] # (nobs_qty) integer, index of self.obs_qty_table + self.obs_hid = [] # (nobs) integer + self.obs_vid = [] # (nobs) integer, veriable index from self.obs_var_table or GRIB code + self.obs_lvl = [] # (nobs) float + self.obs_hgt = [] # (nobs) float + self.obs_val = [] # (nobs) float + self.obs_qty_table = [] # (nobs_qty, mxstr) string + self.obs_var_table = [] # (nobs_var, mxstr2) string, required if self.use_var_id is True + self.obs_var_unit = [] # (nobs_var, mxstr2) string, optional if self.use_var_id is True + self.obs_var_desc = [] # (nobs_var, mxstr3) string, optional if self.use_var_id is True + + # Optional variables for PREPBUFR, not supported yet + self.hdr_prpt_typ = [] # optional + self.hdr_irpt_typ = [] # optional + self.hdr_inst_typ = [] # optional + + def add_error_msg(self, error_msg): + self.has_error = True + self.log_error_msg(error_msg) + if 0 == len(self.error_msg): + self.error_msg = error_msg + else: + self.error_msg = "{m1}\n{m2}".format(m1=self.error_msg, m2=error_msg) + + def add_error_msgs(self, error_msgs): + self.has_error = True + for error_msg in error_msgs: + self.add_error_msg(error_msg) + + def check_data_member_float(self, local_var, var_name): + if 0 == len(local_var): + self.add_error_msg("{v} is empty (float)".format(v=var_name)) + elif isinstance(local_var, list): + if isinstance(local_var[0], str) and not self.is_number(local_var[0]): + self.add_error_msg("Not supported data type: {n}[0]={v}, string type, not a number (int or float only)".format( + n=var_name, v=local_var[0])) + elif 0 > str(type(local_var[0])).find('numpy') and not isinstance(local_var[0], (int, float)): + self.add_error_msg("Not supported data type ({t}) for {v}[0] (int or float only)".format( + v=var_name, t=type(local_var[0]))) + elif not self.is_numpy_array(local_var): + self.add_error_msg("Not supported data type ({t}) for {v} (list and numpy.ndarray)".format( + v=var_name, t=type(local_var))) + + def check_data_member_int(self, local_var, var_name): + if 0 == len(local_var): + self.add_error_msg("{v} is empty (int)".format(v=var_name)) + elif isinstance(local_var, list): + if isinstance(local_var[0], str) and not self.is_number(local_var[0]): + self.add_error_msg("Not supported data type: {n}[0]={v}, string type, not a number (int only)".format( + n=var_name, v=local_var[0])) + elif 0 > str(type(local_var[0])).find('numpy') and not isinstance(local_var[0], int): + self.add_error_msg("Not supported data type ({t}) for {v}[0] (int only)".format( + v=var_name, t=type(local_var[0]))) + elif not self.is_numpy_array(local_var): + self.add_error_msg("Not supported data type ({t}) for {v} (list and numpy.ndarray)".format( + v=var_name, t=type(local_var))) + + def check_data_member_string(self, local_var, var_name): + if 0 == len(local_var): + self.add_error_msg("{v} is empty (string)".format(v=var_name)) + elif not isinstance(local_var, (list)): + self.add_error_msg("Not supported data type ({t}) for {v} (list)".format( + v=var_name, t=type(local_var))) + + def check_point_data(self): + if not self.ignore_input_file and self.input_name is not None and not os.path.exists(self.input_name): + self.add_error_msg('The netcdf input {f} does not exist'.format(f=self.input_name)) + else: + self.check_data_member_int(self.hdr_typ,'hdr_typ') + self.check_data_member_int(self.hdr_sid,'hdr_sid') + self.check_data_member_int(self.hdr_vld,'hdr_vld') + self.check_data_member_float(self.hdr_lat,'hdr_lat') + self.check_data_member_float(self.hdr_lon,'hdr_lon') + self.check_data_member_float(self.hdr_elv,'hdr_elv') + self.check_data_member_string(self.hdr_typ_table,'hdr_typ_table') + self.check_data_member_string(self.hdr_sid_table,'hdr_sid_table') + self.check_data_member_string(self.hdr_vld_table,'hdr_vld_table') + + self.check_data_member_int(self.obs_qty,'obs_qty') + self.check_data_member_int(self.obs_hid,'obs_hid') + self.check_data_member_int(self.obs_vid,'obs_vid') + self.check_data_member_float(self.obs_lvl,'obs_lvl') + self.check_data_member_float(self.obs_hgt,'obs_hgt') + self.check_data_member_float(self.obs_val,'obs_val') + self.check_data_member_string(self.obs_qty_table,'obs_qty_table') + if self.use_var_id: + self.check_data_member_string(self.obs_var_table,'obs_var_table') + + def convert_to_numpy(self, value_list): + return np.array(value_list) + + def dump(self): + base_met_point_obs.print_point_data(self.get_point_data()) + + def get_count_string(self): + return f' nobs={self.nobs} nhdr={self.nhdr} ntyp={self.nhdr_typ} nsid={self.nhdr_sid} nvld={self.nhdr_vld} nqty={self.nobs_qty} nvar={self.nobs_var}' + + def get_point_data(self): + if self.nhdr <= 0: + self.nhdr = len(self.hdr_lat) + if self.nobs <= 0: + self.nobs = len(self.obs_val) + if self.nhdr_typ <= 0: + self.nhdr_typ = len(self.hdr_typ_table) + if self.nhdr_sid <= 0: + self.nhdr_sid = len(self.hdr_sid_table) + if self.nhdr_vld <= 0: + self.nhdr_vld = len(self.hdr_vld_table) + if self.npbhdr <= 0: + self.npbhdr = len(self.hdr_prpt_typ) + if self.nobs_qty <= 0: + self.nobs_qty = len(self.obs_qty_table) + if self.nobs_var <= 0: + self.nobs_var = len(self.obs_var_table) + self.check_point_data() + + if not self.is_numpy_array(self.hdr_typ): + self.hdr_typ = self.convert_to_numpy(self.hdr_typ) + if not self.is_numpy_array(self.hdr_sid): + self.hdr_sid = self.convert_to_numpy(self.hdr_sid) + if not self.is_numpy_array(self.hdr_vld): + self.hdr_vld = self.convert_to_numpy(self.hdr_vld) + if not self.is_numpy_array(self.hdr_lat): + self.hdr_lat = self.convert_to_numpy(self.hdr_lat) + if not self.is_numpy_array(self.hdr_lon): + self.hdr_lon = self.convert_to_numpy(self.hdr_lon) + if not self.is_numpy_array(self.hdr_elv): + self.hdr_elv = self.convert_to_numpy(self.hdr_elv) + + if not self.is_numpy_array(self.obs_qty): + self.obs_qty = self.convert_to_numpy(self.obs_qty) + if not self.is_numpy_array(self.obs_hid): + self.obs_hid = self.convert_to_numpy(self.obs_hid) + if not self.is_numpy_array(self.obs_vid): + self.obs_vid = self.convert_to_numpy(self.obs_vid) + if not self.is_numpy_array(self.obs_lvl): + self.obs_lvl = self.convert_to_numpy(self.obs_lvl) + if not self.is_numpy_array(self.obs_hgt): + self.obs_hgt = self.convert_to_numpy(self.obs_hgt) + if not self.is_numpy_array(self.obs_val): + self.obs_val = self.convert_to_numpy(self.obs_val) + + self.count_info = self.get_count_string() + self.met_point_data = self + return self.__dict__ + + def is_number(self, num_str): + return num_str.replace('-','1').replace('+','2').replace('.','3').isdigit() + + def is_numpy_array(self, var): + return isinstance(var, np.ndarray) + + def log_error_msg(self, err_msg): + base_met_point_obs.error_msg(err_msg) + + def log_error(self, err_msgs): + print(self.ERROR_P) + for err_line in err_msgs.split('\n'): + self.log_error_msg(err_line) + print(self.ERROR_P) + + def log_info(self, info_msg): + base_met_point_obs.info_msg(info_msg) + + def put_data(self, point_obs_dict): + self.use_var_id = point_obs_dict['use_var_id'] + self.hdr_typ = point_obs_dict['hdr_typ'] + self.hdr_sid = point_obs_dict['hdr_sid'] + self.hdr_vld = point_obs_dict['hdr_vld'] + self.hdr_lat = point_obs_dict['hdr_lat'] + self.hdr_lon = point_obs_dict['hdr_lon'] + self.hdr_elv = point_obs_dict['hdr_elv'] + self.hdr_typ_table = point_obs_dict['hdr_typ_table'] + self.hdr_sid_table = point_obs_dict['hdr_sid_table'] + self.hdr_vld_table = point_obs_dict['hdr_vld_table'] + + #Observation data + self.obs_qty = point_obs_dict['obs_qty'] + self.obs_hid = point_obs_dict['obs_hid'] + self.obs_lvl = point_obs_dict['obs_lvl'] + self.obs_hgt = point_obs_dict['obs_hgt'] + self.obs_val = point_obs_dict['obs_val'] + self.obs_vid = point_obs_dict['obs_vid'] + self.obs_var_table = point_obs_dict['obs_var_table'] + self.obs_qty_table = point_obs_dict['obs_qty_table'] + po_array = point_obs_dict.get('obs_unit', None) + if po_array is not None: + self.obs_var_unit = po_array + po_array = point_obs_dict.get('obs_desc', None) + if po_array is not None: + self.obs_var_desc = po_array + + po_array = point_obs_dict.get('hdr_prpt_typ', None) + if po_array is not None: + self.hdr_prpt_typ = po_array + po_array = point_obs_dict.get('hdr_irpt_typ', None) + if po_array is not None: + self.hdr_irpt_typ = po_array + po_array = point_obs_dict.get('hdr_inst_typ', None) + if po_array is not None: + self.hdr_inst_typ = po_array + + @staticmethod + def get_prompt(): + return " python:" + + @staticmethod + def error_msg(msg): + print(f'{base_met_point_obs.get_prompt()} {base_met_point_obs.ERROR_P} {msg}') + + @staticmethod + def info_msg(msg): + print(f'{base_met_point_obs.get_prompt()} {base_met_point_obs.INFO_P} {msg}') + + @staticmethod + def get_python_script(arg_value): + return arg_value[len(met_point_obs.python_prefix)+1:] + + @staticmethod + def is_python_script(arg_value): + return arg_value.startswith(met_point_obs.python_prefix) + + @staticmethod + def print_data(key, data_array, show_count=COUNT_SHOW): + if isinstance(data_array, list): + data_len = len(data_array) + if show_count >= data_len: + print(" {k:10s}: {v}".format(k=key, v= data_array)) + else: + end_offset = int(show_count/2) + print(" {k:10s}: count={v}".format(k=key, v=data_len)) + print(" {k:10s}[0:{o}] {v}".format(k=key, v=data_array[:end_offset], o=end_offset)) + print(" {k:10s}[{s}:{e}]: {v}".format(k=key, v='...', s=end_offset+1, e=data_len-end_offset-1)) + print(" {k:10s}[{s}:{e}]: {v}".format(k=key, v= data_array[-end_offset:], s=(data_len-end_offset), e=(data_len-1))) + else: + print(" {k:10s}: {v}".format(k=key, v= data_array)) + + @staticmethod + def print_point_data(met_point_data, print_subset=True): + print(' === MET point data by python embedding ===') + if print_subset: + met_point_obs.print_data('nhdr',met_point_data['nhdr']) + met_point_obs.print_data('nobs',met_point_data['nobs']) + met_point_obs.print_data('use_var_id',met_point_data['use_var_id']) + met_point_obs.print_data('hdr_typ',met_point_data['hdr_typ']) + met_point_obs.print_data('hdr_typ_table',met_point_data['hdr_typ_table']) + met_point_obs.print_data('hdr_sid',met_point_data['hdr_sid']) + met_point_obs.print_data('hdr_sid_table',met_point_data['hdr_sid_table']) + met_point_obs.print_data('hdr_vld',met_point_data['hdr_vld']) + met_point_obs.print_data('hdr_vld_table',met_point_data['hdr_vld_table']) + met_point_obs.print_data('hdr_lat',met_point_data['hdr_lat']) + met_point_obs.print_data('hdr_lon',met_point_data['hdr_lon']) + met_point_obs.print_data('hdr_elv',met_point_data['hdr_elv']) + met_point_obs.print_data('obs_hid',met_point_data['obs_hid']) + met_point_obs.print_data('obs_vid',met_point_data['obs_vid']) + met_point_obs.print_data('obs_var_table',met_point_data['obs_var_table']) + met_point_obs.print_data('obs_qty',met_point_data['obs_qty']) + met_point_obs.print_data('obs_qty_table',met_point_data['obs_qty_table']) + met_point_obs.print_data('obs_lvl',met_point_data['obs_lvl']) + met_point_obs.print_data('obs_hgt',met_point_data['obs_hgt']) + met_point_obs.print_data('obs_val',met_point_data['obs_val']) + else: + print('All',met_point_data) + print(" nhdr: ",met_point_data['nhdr']) + print(" nobs: ",met_point_data['nobs']) + print(' use_var_id: ',met_point_data['use_var_id']) + print(' hdr_typ: ',met_point_data['hdr_typ']) + print('hdr_typ_table: ',met_point_data['hdr_typ_table']) + print(' hdr_sid: ',met_point_data['hdr_sid']) + print('hdr_sid_table: ',met_point_data['hdr_sid_table']) + print(' hdr_vld: ',met_point_data['hdr_vld']) + print('hdr_vld_table: ',met_point_data['hdr_vld_table']) + print(' hdr_lat: ',met_point_data['hdr_lat']) + print(' hdr_lon: ',met_point_data['hdr_lon']) + print(' hdr_elv: ',met_point_data['hdr_elv']) + print(' obs_hid: ',met_point_data['obs_hid']) + print(' obs_vid: ',met_point_data['obs_vid']) + print('obs_var_table: ',met_point_data['obs_var_table']) + print(' obs_qty: ',met_point_data['obs_qty']) + print('obs_qty_table: ',met_point_data['obs_qty_table']) + print(' obs_lvl: ',met_point_data['obs_lvl']) + print(' obs_hgt: ',met_point_data['obs_hgt']) + print(' obs_val: ',met_point_data['obs_val']) + + print(' === MET point data by python embedding ===') + + +class csv_point_obs(base_met_point_obs): + + def __init__(self, point_data): + self.point_data = point_data + super(csv_point_obs, self).__init__() + + self.obs_cnt = obs_cnt = len(point_data) + self.obs_qty = [ 0 for _ in range(0, obs_cnt) ] # (nobs_qty) integer, index of self.obs_qty_table + self.obs_hid = [ 0 for _ in range(0, obs_cnt) ] # (nobs) integer + self.obs_vid = [ 0 for _ in range(0, obs_cnt) ] # (nobs) integer, veriable index from self.obs_var_table or GRIB code + self.obs_lvl = [ self.FILL_VALUE for _ in range(0, obs_cnt) ] # (nobs) float + self.obs_hgt = [ self.FILL_VALUE for _ in range(0, obs_cnt) ] # (nobs) float + self.obs_val = [ self.FILL_VALUE for _ in range(0, obs_cnt) ] # (nobs) float + + self.convert_point_data() + + def check_csv_record(self, csv_point_data, index): + error_msgs = [] + # names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] + # dtype={'typ':'str', 'sid':'str', 'vld':'str', 'var':'str', 'qc':'str'} + if 11 > len(csv_point_data): + error_msgs.append("{i}-th data: missing columns. should be 11 columns, not {c} columns".format( + i=index, c=len(csv_point_data))) + elif 11 < len(csv_point_data): + print("{i}-th data: ignore after 11-th columns out of {c} columns".format( + i=index, c=len(csv_point_data))) + if not isinstance(csv_point_data[0], str): + error_msgs.append("{i}-th data: message_type is not string".format(i=index)) + if not isinstance(csv_point_data[1], str): + error_msgs.append("{i}-th data: station_id is not string".format(i=index)) + if not isinstance(csv_point_data[2], str): + error_msgs.append("{i}-th data: valid_time is not string".format(i=index)) + if isinstance(csv_point_data[3], str): + error_msgs.append("{i}-th data: latitude can not be a string".format(i=index)) + elif csv_point_data[3] < -90.0 or csv_point_data[3] > 90.0: + error_msgs.append("{i}-th data: latitude ({l}) is out of range".format(i=index, l=csv_point_data[3])) + if isinstance(csv_point_data[4], str): + error_msgs.append("{i}-th data: longitude can not be a string".format(i=index)) + elif csv_point_data[4] < -180.0 or csv_point_data[4] > 360.0: + error_msgs.append("{i}-th data: longitude ({l}) is out of range".format(i=index, l=csv_point_data[4])) + if not isinstance(csv_point_data[6], str): + error_msgs.append("{i}-th data: grib_code/var_name is not string".format(i=index)) + if not isinstance(csv_point_data[9], str): + error_msgs.append("{i}-th data: quality_mark is not string".format(i=index)) + is_string, is_num = self.is_num_string(csv_point_data[5]) + if is_string and not is_num: + error_msgs.append("{i}-th data: elevation: only NA is accepted as string".format(i=index)) + is_string, is_num = self.is_num_string(csv_point_data[7]) + if is_string and not is_num: + error_msgs.append("{i}-th data: obs_level: only NA is accepted as string".format(i=index)) + is_string, is_num = self.is_num_string(csv_point_data[8]) + if is_string and not is_num: + error_msgs.append("{i}-th data: obs_height: only NA is accepted as string".format(i=index)) + is_string, is_num = self.is_num_string(csv_point_data[10]) + if is_string and not is_num: + error_msgs.append("{i}-th data: obs_value: only NA is accepted as string".format(i=index)) + return error_msgs + + def check_csv_point_data(self, all_records=False): + if 0 == len(self.point_data): + self.add_error_msg("No data!") + elif all_records: + data_idx = 0 + for csv_point_data in self.point_data: + data_idx += 1 + error_messages = self.check_csv_record(csv_point_data, data_idx) + if len(error_messages) > 0: + self.add_error_msgs(error_messages) + else: + error_messages = self.check_csv_record(self.point_data[0], index=1) + if len(error_messages) > 0: + self.add_error_msgs(error_messages) + if 1 < len(self.point_data): + error_messages = self.check_csv_record(self.point_data[-1], index=len(self.point_data)) + if len(error_messages) > 0: + self.add_error_msgs(error_messages) + + def convert_point_data(self): + hdr_cnt = hdr_typ_cnt = hdr_sid_cnt = hdr_vld_cnt = 0 + var_name_cnt = qc_cnt = 0 + + hdr_map = {} + hdr_typ_map = {} + hdr_sid_map = {} + hdr_vld_map = {} + obs_var_map = {} + obs_qty_map = {} + self.use_var_id = not self.is_grib_code() + + index = 0 + #names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] + for csv_point_record in self.point_data: + # Build header map. + hdr_typ_str = csv_point_record[0] + hdr_typ_idx = hdr_typ_map.get(hdr_typ_str,-1) + if hdr_typ_idx < 0: + hdr_typ_idx = hdr_typ_cnt + hdr_typ_map[hdr_typ_str] = hdr_typ_idx + hdr_typ_cnt += 1 + + hdr_sid_str = csv_point_record[1] + hdr_sid_idx = hdr_sid_map.get(hdr_sid_str,-1) + if hdr_sid_idx < 0: + hdr_sid_idx = hdr_sid_cnt + hdr_sid_map[hdr_sid_str] = hdr_sid_idx + hdr_sid_cnt += 1 + + hdr_vld_str = csv_point_record[2] + hdr_vld_idx = hdr_vld_map.get(hdr_vld_str,-1) + if hdr_vld_idx < 0: + hdr_vld_idx = hdr_vld_cnt + hdr_vld_map[hdr_vld_str] = hdr_vld_idx + hdr_vld_cnt += 1 + + lat = csv_point_record[3] + lon = csv_point_record[4] + elv = self.get_num_value(csv_point_record[5] ) + hdr_key = (hdr_typ_idx,hdr_sid_idx,hdr_vld_idx,lat,lon,elv) + hdr_idx = hdr_map.get(hdr_key,-1) + if hdr_idx < 0: + hdr_idx = hdr_cnt + hdr_map[hdr_key] = hdr_idx + hdr_cnt += 1 + + var_id_str = csv_point_record[6] + if self.use_var_id: + var_id = obs_var_map.get(var_id_str,-1) + if var_id < 0: + var_id = var_name_cnt + obs_var_map[var_id_str] = var_id + var_name_cnt += 1 + else: + var_id = int(var_id_str) + + qc_str = csv_point_record[9] + qc_id = obs_qty_map.get(qc_str,-1) + if qc_id < 0: + qc_id = qc_cnt + obs_qty_map[qc_str] = qc_id + qc_cnt += 1 + + # names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] + self.obs_vid[index] = var_id + self.obs_hid[index] = hdr_idx + self.obs_lvl[index] = self.get_num_value(csv_point_record[7]) + self.obs_hgt[index] = self.get_num_value(csv_point_record[8]) + self.obs_val[index] = self.get_num_value(csv_point_record[10]) + self.obs_qty[index] = qc_id + + index += 1 + + self.nhdr = hdr_cnt + self.nhdr_typ = hdr_typ_cnt + self.nhdr_sid = hdr_sid_cnt + self.nhdr_vld = hdr_vld_cnt + self.nobs_var = var_name_cnt + self.nobs_qty = qc_cnt + + # Fill header array and table array based on the map + self.hdr_typ = [ 0 for _ in range(0, hdr_cnt) ] + self.hdr_sid = [ 0 for _ in range(0, hdr_cnt) ] + self.hdr_vld = [ 0 for _ in range(0, hdr_cnt) ] + self.hdr_lat = [ self.FILL_VALUE for _ in range(0, hdr_cnt) ] + self.hdr_lon = [ self.FILL_VALUE for _ in range(0, hdr_cnt) ] + self.hdr_elv = [ self.FILL_VALUE for _ in range(0, hdr_cnt) ] + for key, idx in hdr_map.items(): + self.hdr_typ[idx] = key[0] + self.hdr_sid[idx] = key[1] + self.hdr_vld[idx] = key[2] + self.hdr_lat[idx] = key[3] + self.hdr_lon[idx] = key[4] + self.hdr_elv[idx] = key[5] + + self.hdr_typ_table = [ "" for _ in range(0, hdr_typ_cnt) ] + self.hdr_sid_table = [ "" for _ in range(0, hdr_sid_cnt) ] + self.hdr_vld_table = [ "" for _ in range(0, hdr_vld_cnt) ] + self.obs_qty_table = [ "" for _ in range(0, qc_cnt) ] + self.obs_var_table = [ "" for _ in range(0, var_name_cnt) ] + for key, idx in hdr_typ_map.items(): + self.hdr_typ_table[idx] = key + for key, idx in hdr_sid_map.items(): + self.hdr_sid_table[idx] = key + for key, idx in hdr_vld_map.items(): + self.hdr_vld_table[idx] = key + for key, idx in obs_qty_map.items(): + self.obs_qty_table[idx] = key + for key, idx in obs_var_map.items(): + self.obs_var_table[idx] = key + + def get_num_value(self, column_value): + num_value = column_value + if isinstance(column_value, str): + if self.is_number(column_value): + num_value = float(column_value) + else: + num_value = self.FILL_VALUE + if column_value.lower() != 'na' and column_value.lower() != 'n/a': + self.log_info(f'{column_value} is not a number, converted to the missing value') + return num_value + + def is_grib_code(self): + grib_code = True + for _point_data in self.point_data: + if isinstance(_point_data[6], int): + continue + elif isinstance(_point_data[6], str) and not _point_data[6].isdecimal(): + grib_code = False + break; + return grib_code + + def is_num_string(self, column_value): + is_string = isinstance(column_value, str) + if is_string: + is_num = True if self.is_number(column_value) or column_value.lower() == 'na' or column_value.lower() == 'n/a' else False + else: + is_num = True + return is_string, is_num + + +class met_point_obs(ABC, base_met_point_obs): + + MET_ENV_RUN = 'MET_FORCE_TO_RUN' + + @abstractmethod + def read_data(self, args): + # args can be input_file_name, list, or dictionary + # - The variables at __init__ should be filled as python list or numpy array + # - set self.input_name + # + # Here is a template + ''' + if isinstance(args, dict): + in_filename = args.get('in_name',None) + elif isinstance(args, list): + in_filename = args[0] + else: + in_filename = args + self.input_name = in_filename + ''' + pass + + +class met_point_tools(): + + @staticmethod + def convert_point_data(point_data, check_all_records=False, input_type='csv'): + tmp_point_data = {} + if 'csv' == input_type: + csv_point_data = csv_point_obs(point_data) + csv_point_data.check_csv_point_data(check_all_records) + tmp_point_data = csv_point_data.get_point_data() + else: + base_met_point_obs.error_msg('Not supported input type: {input_type}') + return tmp_point_data + + @staticmethod + def get_prompt(): + return " python:" + + @staticmethod + def get_nc_point_obs(): + return nc_point_obs() + + @staticmethod + def get_sample_point_obs(): + return sample_met_point_obs() + + @staticmethod + def is_python_prefix(user_cmd): + return user_cmd.startswith(base_met_point_obs.python_prefix) + + @staticmethod + # Read the input file which is 11 column text file as the first argument + def read_text_point_obs(input_file, header=None, + delim_whitespace=True, keep_default_na=False): + # Read and format the input 11-column observations: + # (1) string: Message_Type + # (2) string: Station_ID + # (3) string: Valid_Time(YYYYMMDD_HHMMSS) + # (4) numeric: Lat(Deg North) + # (5) numeric: Lon(Deg East) + # (6) numeric: Elevation(msl) + # (7) string: Var_Name(or GRIB_Code) + # (8) numeric: Level + # (9) numeric: Height(msl or agl) + # (10) string: QC_String + # (11) numeric: Observation_Value + ascii_point_data = pd.read_csv(input_file, header=header, + delim_whitespace=delim_whitespace, + keep_default_na=keep_default_na, + names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'], + dtype={'typ':'string', 'sid':'string', 'vld':'string', 'var':'string', 'qc':'string'}).values.tolist() + return ascii_point_data + +# Note: caller should import netCDF4 +# The argements nc_group(dataset) and nc_var should not be None +class nc_tools(): + + met_missing = -99999999. + + @staticmethod + def get_num_array(nc_group, var_name): + nc_var = nc_group.variables.get(var_name, None) + return [] if nc_var is None else nc_var[:] + + @staticmethod + def get_ncbyte_array_to_str(nc_var): + nc_str_data = nc_var[:] + if nc_var.datatype.name == 'bytes8': + nc_str_data = [ str(s.compressed(),"utf-8") for s in nc_var[:] ] + return nc_str_data + + @staticmethod + def get_string_array(nc_group, var_name): + nc_var = nc_group.variables.get(var_name, None) + return [] if nc_var is None else nc_tools.get_ncbyte_array_to_str(nc_var) + + +class nc_point_obs(met_point_obs): + + # args should be string, list, or dictionary + def get_nc_filename(self, args): + nc_filename = None + if isinstance(args, dict): + nc_filename = args.get('nc_name',None) + elif isinstance(args, list): + nc_filename = args[0] + elif args != ARG_PRINT_DATA: + nc_filename = args + + return nc_filename + + def read_data(self, nc_filename): + if nc_filename is None: + self.log_error_msg("The input NetCDF filename is missing") + elif not os.path.exists(nc_filename): + self.log_error_msg(f"input NetCDF file ({nc_filename}) does not exist") + else: + dataset = nc.Dataset(nc_filename, 'r') + + attr_name = 'use_var_id' + use_var_id_str = dataset.getncattr(attr_name) if attr_name in dataset.ncattrs() else "false" + self.use_var_id = use_var_id_str.lower() == 'true' + + # Header + self.hdr_typ = dataset['hdr_typ'][:] + self.hdr_sid = dataset['hdr_sid'][:] + self.hdr_vld = dataset['hdr_vld'][:] + self.hdr_lat = dataset['hdr_lat'][:] + self.hdr_lon = dataset['hdr_lon'][:] + self.hdr_elv = dataset['hdr_elv'][:] + self.hdr_typ_table = nc_tools.get_string_array(dataset, 'hdr_typ_table') + self.hdr_sid_table = nc_tools.get_string_array(dataset, 'hdr_sid_table') + self.hdr_vld_table = nc_tools.get_string_array(dataset, 'hdr_vld_table') + + nc_var = dataset.variables.get('obs_unit', None) + if nc_var: + self.obs_var_unit = nc_var[:] + nc_var = dataset.variables.get('obs_desc', None) + if nc_var: + self.obs_var_desc = nc_var[:] + + nc_var = dataset.variables.get('hdr_prpt_typ', None) + if nc_var: + self.hdr_prpt_typ = nc_var[:] + nc_var = dataset.variables.get('hdr_irpt_typ', None) + if nc_var: + self.hdr_irpt_typ = nc_var[:] + nc_var = dataset.variables.get('hdr_inst_typ', None) + if nc_var: + self.hdr_inst_typ =nc_var[:] + + #Observation data + self.hdr_sid = dataset['hdr_sid'][:] + self.obs_qty = np.array(dataset['obs_qty'][:]) + self.obs_hid = np.array(dataset['obs_hid'][:]) + self.obs_lvl = np.array(dataset['obs_lvl'][:]) + self.obs_hgt = np.array(dataset['obs_hgt'][:]) + self.obs_val = np.array(dataset['obs_val'][:]) + nc_var = dataset.variables.get('obs_vid', None) + if nc_var is None: + self.use_var_id = False + nc_var = dataset.variables.get('obs_gc', None) + else: + self.obs_var_table = nc_tools.get_string_array(dataset, 'obs_var') + if nc_var: + self.obs_vid = np.array(nc_var[:]) + + self.obs_qty_table = nc_tools.get_string_array(dataset, 'obs_qty_table') + + def save_ncfile(self, nc_filename): + met_data = self.get_point_data() + with nc.Dataset(nc_filename, 'w') as nc_dataset: + self.set_nc_data(nc_dataset) + return met_data + + def set_nc_data(self, nc_dataset): + return nc_point_obs.write_nc_data(nc_dataset, self) + + @staticmethod + def write_nc_file(nc_filename, point_obs): + with nc.Dataset(nc_filename, 'w') as nc_dataset: + nc_point_obs.set_nc_data(nc_dataset, point_obs) + + @staticmethod + def write_nc_data(nc_dataset, point_obs): + do_nothing = False + if 0 == point_obs.nhdr: + do_nothing = True + base_met_point_obs.info_msg("the header is empty") + if 0 == point_obs.nobs: + do_nothing = True + base_met_point_obs.info_msg("the observation data is empty") + if do_nothing: + print() + return + + # Set global attributes + nc_dataset.MET_Obs_version = "1.02" ; + nc_dataset.use_var_id = "true" if point_obs.use_var_id else "false" + + # Create dimensions + nc_dataset.createDimension('mxstr', 16) + nc_dataset.createDimension('mxstr2', 40) + nc_dataset.createDimension('mxstr3', 80) + nc_dataset.createDimension('nhdr', point_obs.nhdr) + nc_dataset.createDimension('nobs', point_obs.nobs) + #npbhdr = len(point_obs.hdr_prpt_typ) + if 0 < point_obs.npbhdr: + nc_dataset.createDimension('npbhdr', point_obs.npbhdr) + nc_dataset.createDimension('nhdr_typ', point_obs.nhdr_typ) + nc_dataset.createDimension('nhdr_sid', point_obs.nhdr_sid) + nc_dataset.createDimension('nhdr_vld', point_obs.nhdr_vld) + nc_dataset.createDimension('nobs_qty', point_obs.nobs_qty) + nc_dataset.createDimension('obs_var_num', point_obs.nobs_var) + + type_for_string = 'S1' # np.byte + dims_hdr = ('nhdr',) + dims_obs = ('nobs',) + + # Create header and observation variables + var_hdr_typ = nc_dataset.createVariable('hdr_typ', np.int32, dims_hdr, fill_value=-9999) + var_hdr_sid = nc_dataset.createVariable('hdr_sid', np.int32, dims_hdr, fill_value=-9999) + var_hdr_vld = nc_dataset.createVariable('hdr_vld', np.int32, dims_hdr, fill_value=-9999) + var_hdr_lat = nc_dataset.createVariable('hdr_lat', np.float32, dims_hdr, fill_value=-9999.) + var_hdr_lon = nc_dataset.createVariable('hdr_lon', np.float32, dims_hdr, fill_value=-9999.) + var_hdr_elv = nc_dataset.createVariable('hdr_elv', np.float32, dims_hdr, fill_value=-9999.) + + var_obs_qty = nc_dataset.createVariable('obs_qty', np.int32, dims_obs, fill_value=-9999) + var_obs_hid = nc_dataset.createVariable('obs_hid', np.int32, dims_obs, fill_value=-9999) + var_obs_vid = nc_dataset.createVariable('obs_vid', np.int32, dims_obs, fill_value=-9999) + var_obs_lvl = nc_dataset.createVariable('obs_lvl', np.float32, dims_obs, fill_value=-9999.) + var_obs_hgt = nc_dataset.createVariable('obs_hgt', np.float32, dims_obs, fill_value=-9999.) + var_obs_val = nc_dataset.createVariable('obs_val', np.float32, dims_obs, fill_value=-9999.) + + if 0 == point_obs.npbhdr: + var_hdr_prpt_typ = None + var_hdr_irpt_typ = None + var_hdr_inst_typ = None + else: + dims_npbhdr = ('npbhdr',) + var_hdr_prpt_typ = nc_dataset.createVariable('hdr_prpt_typ', np.int32, dims_npbhdr, fill_value=-9999.) + var_hdr_irpt_typ = nc_dataset.createVariable('hdr_irpt_typ', np.int32, dims_npbhdr, fill_value=-9999.) + var_hdr_inst_typ = nc_dataset.createVariable('hdr_inst_typ', np.int32, dims_npbhdr, fill_value=-9999.) + + var_hdr_typ_table = nc_dataset.createVariable('hdr_typ_table', type_for_string, ('nhdr_typ','mxstr2')) + var_hdr_sid_table = nc_dataset.createVariable('hdr_sid_table', type_for_string, ('nhdr_sid','mxstr2')) + var_hdr_vld_table = nc_dataset.createVariable('hdr_vld_table', type_for_string, ('nhdr_vld','mxstr')) + var_obs_qty_table = nc_dataset.createVariable('obs_qty_table', type_for_string, ('nobs_qty','mxstr')) + var_obs_var_table = nc_dataset.createVariable('obs_var', type_for_string, ('obs_var_num','mxstr2')) + var_obs_var_unit = nc_dataset.createVariable('obs_unit', type_for_string, ('obs_var_num','mxstr2')) + var_obs_var_desc = nc_dataset.createVariable('obs_desc', type_for_string, ('obs_var_num','mxstr3')) + + # Set variables + var_hdr_typ[:] = point_obs.hdr_typ[:] + var_hdr_sid[:] = point_obs.hdr_sid[:] + var_hdr_vld[:] = point_obs.hdr_vld[:] + var_hdr_lat[:] = point_obs.hdr_lat[:] + var_hdr_lon[:] = point_obs.hdr_lon[:] + var_hdr_elv[:] = point_obs.hdr_elv[:] + for i in range(0, point_obs.nhdr_typ): + for j in range(0, len(point_obs.hdr_typ_table[i])): + var_hdr_typ_table[i,j] = point_obs.hdr_typ_table[i][j] + for i in range(0, point_obs.nhdr_sid): + for j in range(0, len(point_obs.hdr_sid_table[i])): + var_hdr_sid_table[i,j] = point_obs.hdr_sid_table[i][j] + for i in range(0, point_obs.nhdr_vld): + for j in range(0, len(point_obs.hdr_vld_table[i])): + var_hdr_vld_table[i,j] = point_obs.hdr_vld_table[i][j] + if 0 < point_obs.npbhdr: + var_hdr_prpt_typ[:] = point_obs.hdr_prpt_typ[:] + var_hdr_irpt_typ[:] = point_obs.hdr_irpt_typ[:] + var_hdr_inst_typ[:] = point_obs.hdr_inst_typ[:] + + var_obs_qty[:] = point_obs.obs_qty[:] + var_obs_hid[:] = point_obs.obs_hid[:] + var_obs_vid[:] = point_obs.obs_vid[:] + var_obs_lvl[:] = point_obs.obs_lvl[:] + var_obs_hgt[:] = point_obs.obs_hgt[:] + var_obs_val[:] = point_obs.obs_val[:] + for i in range(0, point_obs.nobs_var): + for j in range(0, len(point_obs.obs_var_table[i])): + var_obs_var_table[i,j] = point_obs.obs_var_table[i][j] + var_obs_var_unit[i] = "" if i >= len(point_obs.obs_var_unit) else point_obs.obs_var_unit[i] + var_obs_var_desc[i] = "" if i >= len(point_obs.obs_var_desc) else point_obs.obs_var_desc[i] + for i in range(0, point_obs.nobs_qty): + for j in range(0, len(point_obs.obs_qty_table[i])): + var_obs_qty_table[i,j] = point_obs.obs_qty_table[i][j] + + # Set variable attributes + var_hdr_typ.long_name = "index of message type" + var_hdr_sid.long_name = "index of station identification" + var_hdr_vld.long_name = "index of valid time" + var_hdr_lat.long_name = "latitude" + var_hdr_lat.units = "degrees_north" + var_hdr_lon.long_name = "longitude" + var_hdr_lon.units = "degrees_east" + var_hdr_elv.long_name = "elevation" + var_hdr_elv.units = "meters above sea level (msl)" + + var_obs_qty.long_name = "index of quality flag" + var_obs_hid.long_name = "index of matching header data" + var_obs_vid.long_name = "index of BUFR variable corresponding to the observation type" + var_obs_lvl.long_name = "pressure level (hPa) or accumulation interval (sec)" + var_obs_hgt.long_name = "height in meters above sea level (msl)" + var_obs_val.long_name = "observation value" + var_hdr_typ_table.long_name = "message type" + var_hdr_sid_table.long_name = "station identification" + var_hdr_vld_table.long_name = "valid time" + var_hdr_vld_table.units = "YYYYMMDD_HHMMSS UTC" + var_obs_qty_table.long_name = "quality flag" + var_obs_var_table.long_name = "variable names" + var_obs_var_unit.long_name = "variable units" + var_obs_var_desc.long_name = "variable descriptions" + + +# This is a sample drived class +class sample_met_point_obs(met_point_obs): + + #@abstractmethod + def read_data(self, arg_map={}): + self.hdr_typ = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]) + self.hdr_sid = np.array([ 0, 0, 0, 0, 0, 1, 2, 3, 3, 1, 2, 2, 3, 0, 0, 0, 0, 0, 1, 2, 3, 3, 1, 2, 2, 3 ]) + self.hdr_vld = np.array([ 0, 1, 2, 3, 4, 4, 3, 4, 3, 4, 5, 4, 3, 0, 1, 2, 3, 4, 4, 3, 4, 3, 4, 5, 4, 3 ]) + self.hdr_lat = np.array([ 43., 43., 43., 43., 43., 43., 43., 43., 43., 46., 46., 46., 46., 43., 43., 43., 43., 43., 43., 43., 43., 43., 46., 46., 46., 46. ]) + self.hdr_lon = np.array([ -89., -89., -89., -89., -89., -89., -89., -89., -89., -92., -92., -92., -92., -89., -89., -89., -89., -89., -89., -89., -89., -89., -92., -92., -92., -92. ]) + self.hdr_elv = np.array([ 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220. ]) + + self.obs_hid = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25 ]) + self.obs_vid = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]) + self.obs_qty = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]) + self.obs_lvl = np.array([ 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000. ]) + self.obs_hgt = np.array([ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2. ]) + self.obs_val = np.array([ 292., 292.5, 293., 293.5, 294., 294.5, 295., 295.5, 296., 292., 293.4, 293., 296., 294., 92., 92.5, 93., 93.5, 94., 94.5, 95., 95.5, 96., 92., 93.4, 93., 96., 94. ]) + + self.hdr_typ_table = [ "ADPSFC" ] + self.hdr_sid_table = [ "001", "002", "003", "004" ] + self.hdr_vld_table = [ + "20120409_115000", "20120409_115500", "20120409_120100", "20120409_120500", "20120409_121000", + "20120409_120000" ] + self.obs_var_table = [ "TMP", "RH" ] + self.obs_qty_table = [ "NA" ] + +def convert_point_data(point_data, check_all_records=False, input_type='csv'): + tmp_point_data = {} + if 'csv' == input_type: + csv_point_data = csv_point_obs(point_data) + csv_point_data.check_csv_point_data(check_all_records) + tmp_point_data = csv_point_data.get_point_data() + else: + base_met_point_obs.error_msg('Not supported input type: {input_type}') + return tmp_point_data + +def main(): + args = {} # or args = [] + point_obs_data = sample_met_point_obs() + point_obs_data.read_data(args) + met_point_data = point_obs_data.get_point_data() + + point_obs_data.print_point_data(met_point_data, print_subset=False) + +def main_nc(argv): + if len(argv) != 1 and argv[1] != ARG_PRINT_DATA: + netcdf_filename = argv[1] + tmp_nc_name = 'tmp_met_point.nc' + point_obs_data = nc_point_obs() + point_obs_data.read_data(point_obs_data.get_nc_filename(netcdf_filename)) + met_point_data = point_obs_data.save_ncfile(tmp_nc_name) + print(f'{base_met_point_obs.get_prompt()} saved met_point_data to {tmp_nc_name}') + met_point_data['met_point_data'] = point_obs_data + + if DO_PRINT_DATA or ARG_PRINT_DATA == argv[-1]: + met_point_obs.print_point_data(met_point_data) + + +if __name__ == '__main__': + main() + print('Done python scripot') diff --git a/scripts/python/met_point_obs.py b/scripts/python/met_point_obs.py deleted file mode 100755 index fb108705fd..0000000000 --- a/scripts/python/met_point_obs.py +++ /dev/null @@ -1,660 +0,0 @@ -#!/usr/bin/env python3 -''' -Created on Nov 10, 2021 - -@author: hsoh - -- This is the base class and the customized script should extend the met_point_obs. -- The customized script (for example "custom_reader") must implement - "def read_data(self, args)" which fills the array variables at __init__(). -- The args can be 1) single string argument, 2) the list of arguments, - or 3) the dictionary of arguments. -- A python objects, met_point_data, must set: - + "point_obs_data" is an optional to use custom python EXE. - It's a python instance which processes the point observation data -- The customized script is expected to include following codes: - - # prepare arguments for the customized script - args = {'input', sys.argv[1]} # or args = [] - point_obs_data = custom_reader() - point_obs_data.read_data(args) - met_point_data = point_obs_data.get_point_data() - -''' - -import os -from abc import ABC, abstractmethod -import numpy as np - -COUNT_SHOW = 30 - -def get_prompt(): - return " python:" - -def met_is_python_prefix(user_cmd): - return user_cmd.startswith(base_met_point_obs.python_prefix) - - -class base_met_point_obs(object): - ''' - classdocs - ''' - ERROR_P = " ==PYTHON_ERROR==" - INFO_P = " ==PYTHON_INFO==" - - python_prefix = 'PYTHON_POINT_USER' - - FILL_VALUE = -9999. - - def __init__(self, use_var_id=True): - ''' - Constructor - ''' - self.count_info = "" - self.input_name = None - self.ignore_input_file = False - self.use_var_id = use_var_id # True if variable index, False if GRIB code - self.error_msg = "" - self.has_error = False - - # Header - self.nhdr = 0 - self.npbhdr = 0 - self.nhdr_typ = 0 # type table - self.nhdr_sid = 0 # station_id table - self.nhdr_vld = 0 # valid time strings - self.hdr_typ = [] # (nhdr) integer - self.hdr_sid = [] # (nhdr) integer - self.hdr_vld = [] # (nhdr) integer - self.hdr_lat = [] # (nhdr) float - self.hdr_lon = [] # (nhdr) float - self.hdr_elv = [] # (nhdr) float - self.hdr_typ_table = [] # (nhdr_typ, mxstr2) string - self.hdr_sid_table = [] # (nhdr_sid, mxstr2) string - self.hdr_vld_table = [] # (nhdr_vld, mxstr) string - - #Observation data - self.nobs = 0 - self.nobs_qty = 0 - self.nobs_var = 0 - self.obs_qty = [] # (nobs_qty) integer, index of self.obs_qty_table - self.obs_hid = [] # (nobs) integer - self.obs_vid = [] # (nobs) integer, veriable index from self.obs_var_table or GRIB code - self.obs_lvl = [] # (nobs) float - self.obs_hgt = [] # (nobs) float - self.obs_val = [] # (nobs) float - self.obs_qty_table = [] # (nobs_qty, mxstr) string - self.obs_var_table = [] # (nobs_var, mxstr2) string, required if self.use_var_id is True - self.obs_var_unit = [] # (nobs_var, mxstr2) string, optional if self.use_var_id is True - self.obs_var_desc = [] # (nobs_var, mxstr3) string, optional if self.use_var_id is True - - # Optional variables for PREPBUFR, not supported yet - self.hdr_prpt_typ = [] # optional - self.hdr_irpt_typ = [] # optional - self.hdr_inst_typ = [] # optional - - def add_error_msg(self, error_msg): - self.has_error = True - self.log_error_msg(error_msg) - if 0 == len(self.error_msg): - self.error_msg = error_msg - else: - self.error_msg = "{m1}\n{m2}".format(m1=self.error_msg, m2=error_msg) - - def add_error_msgs(self, error_msgs): - self.has_error = True - for error_msg in error_msgs: - self.add_error_msg(error_msg) - - def check_data_member_float(self, local_var, var_name): - if 0 == len(local_var): - self.add_error_msg("{v} is empty (float)".format(v=var_name)) - elif isinstance(local_var, list): - if isinstance(local_var[0], str) and not self.is_number(local_var[0]): - self.add_error_msg("Not supported data type: {n}[0]={v}, string type, not a number (int or float only)".format( - n=var_name, v=local_var[0])) - elif 0 > str(type(local_var[0])).find('numpy') and not isinstance(local_var[0], (int, float)): - self.add_error_msg("Not supported data type ({t}) for {v}[0] (int or float only)".format( - v=var_name, t=type(local_var[0]))) - elif not self.is_numpy_array(local_var): - self.add_error_msg("Not supported data type ({t}) for {v} (list and numpy.ndarray)".format( - v=var_name, t=type(local_var))) - - def check_data_member_int(self, local_var, var_name): - if 0 == len(local_var): - self.add_error_msg("{v} is empty (int)".format(v=var_name)) - elif isinstance(local_var, list): - if isinstance(local_var[0], str) and not self.is_number(local_var[0]): - self.add_error_msg("Not supported data type: {n}[0]={v}, string type, not a number (int only)".format( - n=var_name, v=local_var[0])) - elif 0 > str(type(local_var[0])).find('numpy') and not isinstance(local_var[0], int): - self.add_error_msg("Not supported data type ({t}) for {v}[0] (int only)".format( - v=var_name, t=type(local_var[0]))) - elif not self.is_numpy_array(local_var): - self.add_error_msg("Not supported data type ({t}) for {v} (list and numpy.ndarray)".format( - v=var_name, t=type(local_var))) - - def check_data_member_string(self, local_var, var_name): - if 0 == len(local_var): - self.add_error_msg("{v} is empty (string)".format(v=var_name)) - elif not isinstance(local_var, (list)): - self.add_error_msg("Not supported data type ({t}) for {v} (list)".format( - v=var_name, t=type(local_var))) - - def check_point_data(self): - if not self.ignore_input_file and self.input_name is not None and not os.path.exists(self.input_name): - self.add_error_msg('The netcdf input {f} does not exist'.format(f=self.input_name)) - else: - self.check_data_member_int(self.hdr_typ,'hdr_typ') - self.check_data_member_int(self.hdr_sid,'hdr_sid') - self.check_data_member_int(self.hdr_vld,'hdr_vld') - self.check_data_member_float(self.hdr_lat,'hdr_lat') - self.check_data_member_float(self.hdr_lon,'hdr_lon') - self.check_data_member_float(self.hdr_elv,'hdr_elv') - self.check_data_member_string(self.hdr_typ_table,'hdr_typ_table') - self.check_data_member_string(self.hdr_sid_table,'hdr_sid_table') - self.check_data_member_string(self.hdr_vld_table,'hdr_vld_table') - - self.check_data_member_int(self.obs_qty,'obs_qty') - self.check_data_member_int(self.obs_hid,'obs_hid') - self.check_data_member_int(self.obs_vid,'obs_vid') - self.check_data_member_float(self.obs_lvl,'obs_lvl') - self.check_data_member_float(self.obs_hgt,'obs_hgt') - self.check_data_member_float(self.obs_val,'obs_val') - self.check_data_member_string(self.obs_qty_table,'obs_qty_table') - if self.use_var_id: - self.check_data_member_string(self.obs_var_table,'obs_var_table') - - def convert_to_numpy(self, value_list): - return np.array(value_list) - - def dump(self): - base_met_point_obs.print_point_data(self.get_point_data()) - - def get_count_string(self): - return f' nobs={self.nobs} nhdr={self.nhdr} ntyp={self.nhdr_typ} nsid={self.nhdr_sid} nvld={self.nhdr_vld} nqty={self.nobs_qty} nvar={self.nobs_var}' - - def get_point_data(self): - if self.nhdr <= 0: - self.nhdr = len(self.hdr_lat) - if self.nobs <= 0: - self.nobs = len(self.obs_val) - if self.nhdr_typ <= 0: - self.nhdr_typ = len(self.hdr_typ_table) - if self.nhdr_sid <= 0: - self.nhdr_sid = len(self.hdr_sid_table) - if self.nhdr_vld <= 0: - self.nhdr_vld = len(self.hdr_vld_table) - if self.npbhdr <= 0: - self.npbhdr = len(self.hdr_prpt_typ) - if self.nobs_qty <= 0: - self.nobs_qty = len(self.obs_qty_table) - if self.nobs_var <= 0: - self.nobs_var = len(self.obs_var_table) - self.check_point_data() - - if not self.is_numpy_array(self.hdr_typ): - self.hdr_typ = self.convert_to_numpy(self.hdr_typ) - if not self.is_numpy_array(self.hdr_sid): - self.hdr_sid = self.convert_to_numpy(self.hdr_sid) - if not self.is_numpy_array(self.hdr_vld): - self.hdr_vld = self.convert_to_numpy(self.hdr_vld) - if not self.is_numpy_array(self.hdr_lat): - self.hdr_lat = self.convert_to_numpy(self.hdr_lat) - if not self.is_numpy_array(self.hdr_lon): - self.hdr_lon = self.convert_to_numpy(self.hdr_lon) - if not self.is_numpy_array(self.hdr_elv): - self.hdr_elv = self.convert_to_numpy(self.hdr_elv) - - if not self.is_numpy_array(self.obs_qty): - self.obs_qty = self.convert_to_numpy(self.obs_qty) - if not self.is_numpy_array(self.obs_hid): - self.obs_hid = self.convert_to_numpy(self.obs_hid) - if not self.is_numpy_array(self.obs_vid): - self.obs_vid = self.convert_to_numpy(self.obs_vid) - if not self.is_numpy_array(self.obs_lvl): - self.obs_lvl = self.convert_to_numpy(self.obs_lvl) - if not self.is_numpy_array(self.obs_hgt): - self.obs_hgt = self.convert_to_numpy(self.obs_hgt) - if not self.is_numpy_array(self.obs_val): - self.obs_val = self.convert_to_numpy(self.obs_val) - - self.count_info = self.get_count_string() - self.met_point_data = self - return self.__dict__ - - def is_number(self, num_str): - return num_str.replace('-','1').replace('+','2').replace('.','3').isdigit() - - def is_numpy_array(self, var): - return isinstance(var, np.ndarray) - - def log_error_msg(self, err_msg): - base_met_point_obs.error_msg(err_msg) - - def log_error(self, err_msgs): - print(self.ERROR_P) - for err_line in err_msgs.split('\n'): - self.log_error_msg(err_line) - print(self.ERROR_P) - - def log_info(self, info_msg): - base_met_point_obs.info_msg(info_msg) - - def put_data(self, point_obs_dict): - self.use_var_id = point_obs_dict['use_var_id'] - self.hdr_typ = point_obs_dict['hdr_typ'] - self.hdr_sid = point_obs_dict['hdr_sid'] - self.hdr_vld = point_obs_dict['hdr_vld'] - self.hdr_lat = point_obs_dict['hdr_lat'] - self.hdr_lon = point_obs_dict['hdr_lon'] - self.hdr_elv = point_obs_dict['hdr_elv'] - self.hdr_typ_table = point_obs_dict['hdr_typ_table'] - self.hdr_sid_table = point_obs_dict['hdr_sid_table'] - self.hdr_vld_table = point_obs_dict['hdr_vld_table'] - - #Observation data - self.obs_qty = point_obs_dict['obs_qty'] - self.obs_hid = point_obs_dict['obs_hid'] - self.obs_lvl = point_obs_dict['obs_lvl'] - self.obs_hgt = point_obs_dict['obs_hgt'] - self.obs_val = point_obs_dict['obs_val'] - self.obs_vid = point_obs_dict['obs_vid'] - self.obs_var_table = point_obs_dict['obs_var_table'] - self.obs_qty_table = point_obs_dict['obs_qty_table'] - po_array = point_obs_dict.get('obs_unit', None) - if po_array is not None: - self.obs_var_unit = po_array - po_array = point_obs_dict.get('obs_desc', None) - if po_array is not None: - self.obs_var_desc = po_array - - po_array = point_obs_dict.get('hdr_prpt_typ', None) - if po_array is not None: - self.hdr_prpt_typ = po_array - po_array = point_obs_dict.get('hdr_irpt_typ', None) - if po_array is not None: - self.hdr_irpt_typ = po_array - po_array = point_obs_dict.get('hdr_inst_typ', None) - if po_array is not None: - self.hdr_inst_typ = po_array - - @staticmethod - def error_msg(msg): - print(f'{get_prompt()} {base_met_point_obs.ERROR_P} {msg}') - - @staticmethod - def info_msg(msg): - print(f'{get_prompt()} {base_met_point_obs.INFO_P} {msg}') - - @staticmethod - def get_python_script(arg_value): - return arg_value[len(met_point_obs.python_prefix)+1:] - - @staticmethod - def is_python_script(arg_value): - return arg_value.startswith(met_point_obs.python_prefix) - - @staticmethod - def print_data(key, data_array, show_count=COUNT_SHOW): - if isinstance(data_array, list): - data_len = len(data_array) - if show_count >= data_len: - print(" {k:10s}: {v}".format(k=key, v= data_array)) - else: - end_offset = int(show_count/2) - print(" {k:10s}: count={v}".format(k=key, v=data_len)) - print(" {k:10s}[0:{o}] {v}".format(k=key, v=data_array[:end_offset], o=end_offset)) - print(" {k:10s}[{s}:{e}]: {v}".format(k=key, v='...', s=end_offset+1, e=data_len-end_offset-1)) - print(" {k:10s}[{s}:{e}]: {v}".format(k=key, v= data_array[-end_offset:], s=(data_len-end_offset), e=(data_len-1))) - else: - print(" {k:10s}: {v}".format(k=key, v= data_array)) - - @staticmethod - def print_point_data(met_point_data, print_subset=True): - print(' === MET point data by python embedding ===') - if print_subset: - met_point_obs.print_data('nhdr',met_point_data['nhdr']) - met_point_obs.print_data('nobs',met_point_data['nobs']) - met_point_obs.print_data('use_var_id',met_point_data['use_var_id']) - met_point_obs.print_data('hdr_typ',met_point_data['hdr_typ']) - met_point_obs.print_data('hdr_typ_table',met_point_data['hdr_typ_table']) - met_point_obs.print_data('hdr_sid',met_point_data['hdr_sid']) - met_point_obs.print_data('hdr_sid_table',met_point_data['hdr_sid_table']) - met_point_obs.print_data('hdr_vld',met_point_data['hdr_vld']) - met_point_obs.print_data('hdr_vld_table',met_point_data['hdr_vld_table']) - met_point_obs.print_data('hdr_lat',met_point_data['hdr_lat']) - met_point_obs.print_data('hdr_lon',met_point_data['hdr_lon']) - met_point_obs.print_data('hdr_elv',met_point_data['hdr_elv']) - met_point_obs.print_data('obs_hid',met_point_data['obs_hid']) - met_point_obs.print_data('obs_vid',met_point_data['obs_vid']) - met_point_obs.print_data('obs_var_table',met_point_data['obs_var_table']) - met_point_obs.print_data('obs_qty',met_point_data['obs_qty']) - met_point_obs.print_data('obs_qty_table',met_point_data['obs_qty_table']) - met_point_obs.print_data('obs_lvl',met_point_data['obs_lvl']) - met_point_obs.print_data('obs_hgt',met_point_data['obs_hgt']) - met_point_obs.print_data('obs_val',met_point_data['obs_val']) - else: - print('All',met_point_data) - print(" nhdr: ",met_point_data['nhdr']) - print(" nobs: ",met_point_data['nobs']) - print(' use_var_id: ',met_point_data['use_var_id']) - print(' hdr_typ: ',met_point_data['hdr_typ']) - print('hdr_typ_table: ',met_point_data['hdr_typ_table']) - print(' hdr_sid: ',met_point_data['hdr_sid']) - print('hdr_sid_table: ',met_point_data['hdr_sid_table']) - print(' hdr_vld: ',met_point_data['hdr_vld']) - print('hdr_vld_table: ',met_point_data['hdr_vld_table']) - print(' hdr_lat: ',met_point_data['hdr_lat']) - print(' hdr_lon: ',met_point_data['hdr_lon']) - print(' hdr_elv: ',met_point_data['hdr_elv']) - print(' obs_hid: ',met_point_data['obs_hid']) - print(' obs_vid: ',met_point_data['obs_vid']) - print('obs_var_table: ',met_point_data['obs_var_table']) - print(' obs_qty: ',met_point_data['obs_qty']) - print('obs_qty_table: ',met_point_data['obs_qty_table']) - print(' obs_lvl: ',met_point_data['obs_lvl']) - print(' obs_hgt: ',met_point_data['obs_hgt']) - print(' obs_val: ',met_point_data['obs_val']) - - print(' === MET point data by python embedding ===') - - -class csv_point_obs(ABC, base_met_point_obs): - - def __init__(self, point_data): - self.point_data = point_data - super(csv_point_obs, self).__init__() - - self.obs_cnt = obs_cnt = len(point_data) - self.obs_qty = [ 0 for _ in range(0, obs_cnt) ] # (nobs_qty) integer, index of self.obs_qty_table - self.obs_hid = [ 0 for _ in range(0, obs_cnt) ] # (nobs) integer - self.obs_vid = [ 0 for _ in range(0, obs_cnt) ] # (nobs) integer, veriable index from self.obs_var_table or GRIB code - self.obs_lvl = [ self.FILL_VALUE for _ in range(0, obs_cnt) ] # (nobs) float - self.obs_hgt = [ self.FILL_VALUE for _ in range(0, obs_cnt) ] # (nobs) float - self.obs_val = [ self.FILL_VALUE for _ in range(0, obs_cnt) ] # (nobs) float - - self.convert_point_data() - - def check_csv_record(self, csv_point_data, index): - error_msgs = [] - # names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] - # dtype={'typ':'str', 'sid':'str', 'vld':'str', 'var':'str', 'qc':'str'} - if 11 > len(csv_point_data): - error_msgs.append("{i}-th data: missing columns. should be 11 columns, not {c} columns".format( - i=index, c=len(csv_point_data))) - elif 11 < len(csv_point_data): - print("{i}-th data: ignore after 11-th columns out of {c} columns".format( - i=index, c=len(csv_point_data))) - if not isinstance(csv_point_data[0], str): - error_msgs.append("{i}-th data: message_type is not string".format(i=index)) - if not isinstance(csv_point_data[1], str): - error_msgs.append("{i}-th data: station_id is not string".format(i=index)) - if not isinstance(csv_point_data[2], str): - error_msgs.append("{i}-th data: valid_time is not string".format(i=index)) - if isinstance(csv_point_data[3], str): - error_msgs.append("{i}-th data: latitude can not be a string".format(i=index)) - elif csv_point_data[3] < -90.0 or csv_point_data[3] > 90.0: - error_msgs.append("{i}-th data: latitude ({l}) is out of range".format(i=index, l=csv_point_data[3])) - if isinstance(csv_point_data[4], str): - error_msgs.append("{i}-th data: longitude can not be a string".format(i=index)) - elif csv_point_data[4] < -180.0 or csv_point_data[4] > 360.0: - error_msgs.append("{i}-th data: longitude ({l}) is out of range".format(i=index, l=csv_point_data[4])) - if not isinstance(csv_point_data[6], str): - error_msgs.append("{i}-th data: grib_code/var_name is not string".format(i=index)) - if not isinstance(csv_point_data[9], str): - error_msgs.append("{i}-th data: quality_mark is not string".format(i=index)) - is_string, is_num = self.is_num_string(csv_point_data[5]) - if is_string and not is_num: - error_msgs.append("{i}-th data: elevation: only NA is accepted as string".format(i=index)) - is_string, is_num = self.is_num_string(csv_point_data[7]) - if is_string and not is_num: - error_msgs.append("{i}-th data: obs_level: only NA is accepted as string".format(i=index)) - is_string, is_num = self.is_num_string(csv_point_data[8]) - if is_string and not is_num: - error_msgs.append("{i}-th data: obs_height: only NA is accepted as string".format(i=index)) - is_string, is_num = self.is_num_string(csv_point_data[10]) - if is_string and not is_num: - error_msgs.append("{i}-th data: obs_value: only NA is accepted as string".format(i=index)) - return error_msgs - - def check_csv_point_data(self, all_records=False): - if 0 == len(self.point_data): - self.add_error_msg("No data!") - elif all_records: - data_idx = 0 - for csv_point_data in self.point_data: - data_idx += 1 - error_messages = self.check_csv_record(csv_point_data, data_idx) - if len(error_messages) > 0: - self.add_error_msgs(error_messages) - else: - error_messages = self.check_csv_record(self.point_data[0], index=1) - if len(error_messages) > 0: - self.add_error_msgs(error_messages) - if 1 < len(self.point_data): - error_messages = self.check_csv_record(self.point_data[-1], index=len(self.point_data)) - if len(error_messages) > 0: - self.add_error_msgs(error_messages) - - def convert_point_data(self): - hdr_cnt = hdr_typ_cnt = hdr_sid_cnt = hdr_vld_cnt = 0 - var_name_cnt = qc_cnt = 0 - - hdr_map = {} - hdr_typ_map = {} - hdr_sid_map = {} - hdr_vld_map = {} - obs_var_map = {} - obs_qty_map = {} - self.use_var_id = not self.is_grib_code() - - index = 0 - #names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] - for csv_point_record in self.point_data: - # Build header map. - hdr_typ_str = csv_point_record[0] - hdr_typ_idx = hdr_typ_map.get(hdr_typ_str,-1) - if hdr_typ_idx < 0: - hdr_typ_idx = hdr_typ_cnt - hdr_typ_map[hdr_typ_str] = hdr_typ_idx - hdr_typ_cnt += 1 - - hdr_sid_str = csv_point_record[1] - hdr_sid_idx = hdr_sid_map.get(hdr_sid_str,-1) - if hdr_sid_idx < 0: - hdr_sid_idx = hdr_sid_cnt - hdr_sid_map[hdr_sid_str] = hdr_sid_idx - hdr_sid_cnt += 1 - - hdr_vld_str = csv_point_record[2] - hdr_vld_idx = hdr_vld_map.get(hdr_vld_str,-1) - if hdr_vld_idx < 0: - hdr_vld_idx = hdr_vld_cnt - hdr_vld_map[hdr_vld_str] = hdr_vld_idx - hdr_vld_cnt += 1 - - lat = csv_point_record[3] - lon = csv_point_record[4] - elv = self.get_num_value(csv_point_record[5] ) - hdr_key = (hdr_typ_idx,hdr_sid_idx,hdr_vld_idx,lat,lon,elv) - hdr_idx = hdr_map.get(hdr_key,-1) - if hdr_idx < 0: - hdr_idx = hdr_cnt - hdr_map[hdr_key] = hdr_idx - hdr_cnt += 1 - - var_id_str = csv_point_record[6] - if self.use_var_id: - var_id = obs_var_map.get(var_id_str,-1) - if var_id < 0: - var_id = var_name_cnt - obs_var_map[var_id_str] = var_id - var_name_cnt += 1 - else: - var_id = int(var_id_str) - - qc_str = csv_point_record[9] - qc_id = obs_qty_map.get(qc_str,-1) - if qc_id < 0: - qc_id = qc_cnt - obs_qty_map[qc_str] = qc_id - qc_cnt += 1 - - # names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] - self.obs_vid[index] = var_id - self.obs_hid[index] = hdr_idx - self.obs_lvl[index] = self.get_num_value(csv_point_record[7]) - self.obs_hgt[index] = self.get_num_value(csv_point_record[8]) - self.obs_val[index] = self.get_num_value(csv_point_record[10]) - self.obs_qty[index] = qc_id - - index += 1 - - self.nhdr = hdr_cnt - self.nhdr_typ = hdr_typ_cnt - self.nhdr_sid = hdr_sid_cnt - self.nhdr_vld = hdr_vld_cnt - self.nobs_var = var_name_cnt - self.nobs_qty = qc_cnt - - # Fill header array and table array based on the map - self.hdr_typ = [ 0 for _ in range(0, hdr_cnt) ] - self.hdr_sid = [ 0 for _ in range(0, hdr_cnt) ] - self.hdr_vld = [ 0 for _ in range(0, hdr_cnt) ] - self.hdr_lat = [ self.FILL_VALUE for _ in range(0, hdr_cnt) ] - self.hdr_lon = [ self.FILL_VALUE for _ in range(0, hdr_cnt) ] - self.hdr_elv = [ self.FILL_VALUE for _ in range(0, hdr_cnt) ] - for key, idx in hdr_map.items(): - self.hdr_typ[idx] = key[0] - self.hdr_sid[idx] = key[1] - self.hdr_vld[idx] = key[2] - self.hdr_lat[idx] = key[3] - self.hdr_lon[idx] = key[4] - self.hdr_elv[idx] = key[5] - - self.hdr_typ_table = [ "" for _ in range(0, hdr_typ_cnt) ] - self.hdr_sid_table = [ "" for _ in range(0, hdr_sid_cnt) ] - self.hdr_vld_table = [ "" for _ in range(0, hdr_vld_cnt) ] - self.obs_qty_table = [ "" for _ in range(0, qc_cnt) ] - self.obs_var_table = [ "" for _ in range(0, var_name_cnt) ] - for key, idx in hdr_typ_map.items(): - self.hdr_typ_table[idx] = key - for key, idx in hdr_sid_map.items(): - self.hdr_sid_table[idx] = key - for key, idx in hdr_vld_map.items(): - self.hdr_vld_table[idx] = key - for key, idx in obs_qty_map.items(): - self.obs_qty_table[idx] = key - for key, idx in obs_var_map.items(): - self.obs_var_table[idx] = key - - def get_num_value(self, column_value): - num_value = column_value - if isinstance(column_value, str): - if self.is_number(column_value): - num_value = float(column_value) - else: - num_value = self.FILL_VALUE - if column_value.lower() != 'na' and column_value.lower() != 'n/a': - self.log_info(f'{column_value} is not a number, converted to the missing value') - return num_value - - def is_grib_code(self): - grib_code = True - for _point_data in self.point_data: - if isinstance(_point_data[6], int): - continue - elif isinstance(_point_data[6], str) and not _point_data[6].isdecimal(): - grib_code = False - break; - return grib_code - - def is_num_string(self, column_value): - is_string = isinstance(column_value, str) - if is_string: - is_num = True if self.is_number(column_value) or column_value.lower() == 'na' or column_value.lower() == 'n/a' else False - else: - is_num = True - return is_string, is_num - - -class met_point_obs(ABC, base_met_point_obs): - - MET_ENV_RUN = 'MET_FORCE_TO_RUN' - - @abstractmethod - def read_data(self, args): - # args can be input_file_name, list, or dictionary - # - The variables at __init__ should be filled as python list or numpy array - # - set self.input_name - # - # Here is a template - ''' - if isinstance(args, dict): - in_filename = args.get('in_name',None) - elif isinstance(args, list): - in_filename = args[0] - else: - in_filename = args - self.input_name = in_filename - ''' - pass - - @staticmethod - def get_prompt(): - return get_prompt() - - @staticmethod - def is_python_prefix(user_cmd): - return user_cmd.startswith(base_met_point_obs.python_prefix) - - -# This is a sample drived class -class sample_met_point_obs(met_point_obs): - - #@abstractmethod - def read_data(self, arg_map={}): - self.hdr_typ = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]) - self.hdr_sid = np.array([ 0, 0, 0, 0, 0, 1, 2, 3, 3, 1, 2, 2, 3, 0, 0, 0, 0, 0, 1, 2, 3, 3, 1, 2, 2, 3 ]) - self.hdr_vld = np.array([ 0, 1, 2, 3, 4, 4, 3, 4, 3, 4, 5, 4, 3, 0, 1, 2, 3, 4, 4, 3, 4, 3, 4, 5, 4, 3 ]) - self.hdr_lat = np.array([ 43., 43., 43., 43., 43., 43., 43., 43., 43., 46., 46., 46., 46., 43., 43., 43., 43., 43., 43., 43., 43., 43., 46., 46., 46., 46. ]) - self.hdr_lon = np.array([ -89., -89., -89., -89., -89., -89., -89., -89., -89., -92., -92., -92., -92., -89., -89., -89., -89., -89., -89., -89., -89., -89., -92., -92., -92., -92. ]) - self.hdr_elv = np.array([ 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220., 220. ]) - - self.obs_hid = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25 ]) - self.obs_vid = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]) - self.obs_qty = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]) - self.obs_lvl = np.array([ 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000. ]) - self.obs_hgt = np.array([ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2. ]) - self.obs_val = np.array([ 292., 292.5, 293., 293.5, 294., 294.5, 295., 295.5, 296., 292., 293.4, 293., 296., 294., 92., 92.5, 93., 93.5, 94., 94.5, 95., 95.5, 96., 92., 93.4, 93., 96., 94. ]) - - self.hdr_typ_table = [ "ADPSFC" ] - self.hdr_sid_table = [ "001", "002", "003", "004" ] - self.hdr_vld_table = [ - "20120409_115000", "20120409_115500", "20120409_120100", "20120409_120500", "20120409_121000", - "20120409_120000" ] - self.obs_var_table = [ "TMP", "RH" ] - self.obs_qty_table = [ "NA" ] - -def convert_point_data(point_data, check_all_records=False, input_type='csv'): - tmp_point_data = {} - if 'csv' == input_type: - csv_point_data = csv_point_obs(point_data) - csv_point_data.check_csv_point_data(check_all_records) - tmp_point_data = csv_point_data.get_point_data() - else: - base_met_point_obs.error_msg('Not supported input type: {input_type}') - return tmp_point_data - -def main(): - args = {} # or args = [] - point_obs_data = sample_met_point_obs() - point_obs_data.read_data(args) - met_point_data = point_obs_data.get_point_data() - - point_obs_data.print_point_data(met_point_data, print_subset=False) - -if __name__ == '__main__': - main() - print('Done python scripot') diff --git a/scripts/python/met_point_obs_nc.py b/scripts/python/met_point_obs_nc.py deleted file mode 100644 index e6680c0689..0000000000 --- a/scripts/python/met_point_obs_nc.py +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/env python3 - -''' -Separated from read_met_point_obs on Feb 09, 2023 - -@author: hsoh - -This script reads the MET point observation NetCDF file like MET tools do. -''' - -import os -import sys -from datetime import datetime -import numpy as np -import netCDF4 as nc - -from met_point_obs import met_point_obs, base_met_point_obs, get_prompt - -DO_PRINT_DATA = False -ARG_PRINT_DATA = 'show_data' - -# Note: caller should import netCDF4 -# the argements nc_group(dataset) and nc_var should not be None -class nc_tools(): - - met_missing = -99999999. - - @staticmethod - def get_num_array(nc_group, var_name): - nc_var = nc_group.variables.get(var_name, None) - return [] if nc_var is None else nc_var[:] - - @staticmethod - def get_ncbyte_array_to_str(nc_var): - nc_str_data = nc_var[:] - if nc_var.datatype.name == 'bytes8': - nc_str_data = [ str(s.compressed(),"utf-8") for s in nc_var[:] ] - return nc_str_data - - @staticmethod - def get_string_array(nc_group, var_name): - nc_var = nc_group.variables.get(var_name, None) - return [] if nc_var is None else nc_tools.get_ncbyte_array_to_str(nc_var) - - -class nc_point_obs(met_point_obs): - - # args should be string, list, or dictionary - def get_nc_filename(self, args): - nc_filename = None - if isinstance(args, dict): - nc_filename = args.get('nc_name',None) - elif isinstance(args, list): - nc_filename = args[0] - elif args != ARG_PRINT_DATA: - nc_filename = args - - return nc_filename - - def read_data(self, nc_filename): - if nc_filename is None: - self.log_error_msg("The input NetCDF filename is missing") - elif not os.path.exists(nc_filename): - self.log_error_msg(f"input NetCDF file ({nc_filename}) does not exist") - else: - dataset = nc.Dataset(nc_filename, 'r') - - attr_name = 'use_var_id' - use_var_id_str = dataset.getncattr(attr_name) if attr_name in dataset.ncattrs() else "false" - self.use_var_id = use_var_id_str.lower() == 'true' - - # Header - self.hdr_typ = dataset['hdr_typ'][:] - self.hdr_sid = dataset['hdr_sid'][:] - self.hdr_vld = dataset['hdr_vld'][:] - self.hdr_lat = dataset['hdr_lat'][:] - self.hdr_lon = dataset['hdr_lon'][:] - self.hdr_elv = dataset['hdr_elv'][:] - self.hdr_typ_table = nc_tools.get_string_array(dataset, 'hdr_typ_table') - self.hdr_sid_table = nc_tools.get_string_array(dataset, 'hdr_sid_table') - self.hdr_vld_table = nc_tools.get_string_array(dataset, 'hdr_vld_table') - - nc_var = dataset.variables.get('obs_unit', None) - if nc_var: - self.obs_var_unit = nc_var[:] - nc_var = dataset.variables.get('obs_desc', None) - if nc_var: - self.obs_var_desc = nc_var[:] - - nc_var = dataset.variables.get('hdr_prpt_typ', None) - if nc_var: - self.hdr_prpt_typ = nc_var[:] - nc_var = dataset.variables.get('hdr_irpt_typ', None) - if nc_var: - self.hdr_irpt_typ = nc_var[:] - nc_var = dataset.variables.get('hdr_inst_typ', None) - if nc_var: - self.hdr_inst_typ =nc_var[:] - - #Observation data - self.hdr_sid = dataset['hdr_sid'][:] - self.obs_qty = np.array(dataset['obs_qty'][:]) - self.obs_hid = np.array(dataset['obs_hid'][:]) - self.obs_lvl = np.array(dataset['obs_lvl'][:]) - self.obs_hgt = np.array(dataset['obs_hgt'][:]) - self.obs_val = np.array(dataset['obs_val'][:]) - nc_var = dataset.variables.get('obs_vid', None) - if nc_var is None: - self.use_var_id = False - nc_var = dataset.variables.get('obs_gc', None) - else: - self.obs_var_table = nc_tools.get_string_array(dataset, 'obs_var') - if nc_var: - self.obs_vid = np.array(nc_var[:]) - - self.obs_qty_table = nc_tools.get_string_array(dataset, 'obs_qty_table') - - def save_ncfile(self, nc_filename): - met_data = self.get_point_data() - with nc.Dataset(nc_filename, 'w') as nc_dataset: - self.set_nc_data(nc_dataset) - return met_data - - def set_nc_data(self, nc_dataset): - return nc_point_obs.write_nc_data(nc_dataset, self) - - @staticmethod - def write_nc_file(nc_filename, point_obs): - with nc.Dataset(nc_filename, 'w') as nc_dataset: - nc_point_obs.set_nc_data(nc_dataset, point_obs) - - @staticmethod - def write_nc_data(nc_dataset, point_obs): - do_nothing = False - if 0 == point_obs.nhdr: - do_nothing = True - base_met_point_obs.info_msg("the header is empty") - if 0 == point_obs.nobs: - do_nothing = True - base_met_point_obs.info_msg("the observation data is empty") - if do_nothing: - print() - return - - # Set global attributes - nc_dataset.MET_Obs_version = "1.02" ; - nc_dataset.use_var_id = "true" if point_obs.use_var_id else "false" - - # Create dimensions - nc_dataset.createDimension('mxstr', 16) - nc_dataset.createDimension('mxstr2', 40) - nc_dataset.createDimension('mxstr3', 80) - nc_dataset.createDimension('nhdr', point_obs.nhdr) - nc_dataset.createDimension('nobs', point_obs.nobs) - #npbhdr = len(point_obs.hdr_prpt_typ) - if 0 < point_obs.npbhdr: - nc_dataset.createDimension('npbhdr', point_obs.npbhdr) - nc_dataset.createDimension('nhdr_typ', point_obs.nhdr_typ) - nc_dataset.createDimension('nhdr_sid', point_obs.nhdr_sid) - nc_dataset.createDimension('nhdr_vld', point_obs.nhdr_vld) - nc_dataset.createDimension('nobs_qty', point_obs.nobs_qty) - nc_dataset.createDimension('obs_var_num', point_obs.nobs_var) - - type_for_string = 'S1' # np.byte - dims_hdr = ('nhdr',) - dims_obs = ('nobs',) - - # Create header and observation variables - var_hdr_typ = nc_dataset.createVariable('hdr_typ', np.int32, dims_hdr, fill_value=-9999) - var_hdr_sid = nc_dataset.createVariable('hdr_sid', np.int32, dims_hdr, fill_value=-9999) - var_hdr_vld = nc_dataset.createVariable('hdr_vld', np.int32, dims_hdr, fill_value=-9999) - var_hdr_lat = nc_dataset.createVariable('hdr_lat', np.float32, dims_hdr, fill_value=-9999.) - var_hdr_lon = nc_dataset.createVariable('hdr_lon', np.float32, dims_hdr, fill_value=-9999.) - var_hdr_elv = nc_dataset.createVariable('hdr_elv', np.float32, dims_hdr, fill_value=-9999.) - - var_obs_qty = nc_dataset.createVariable('obs_qty', np.int32, dims_obs, fill_value=-9999) - var_obs_hid = nc_dataset.createVariable('obs_hid', np.int32, dims_obs, fill_value=-9999) - var_obs_vid = nc_dataset.createVariable('obs_vid', np.int32, dims_obs, fill_value=-9999) - var_obs_lvl = nc_dataset.createVariable('obs_lvl', np.float32, dims_obs, fill_value=-9999.) - var_obs_hgt = nc_dataset.createVariable('obs_hgt', np.float32, dims_obs, fill_value=-9999.) - var_obs_val = nc_dataset.createVariable('obs_val', np.float32, dims_obs, fill_value=-9999.) - - if 0 == point_obs.npbhdr: - var_hdr_prpt_typ = None - var_hdr_irpt_typ = None - var_hdr_inst_typ = None - else: - dims_npbhdr = ('npbhdr',) - var_hdr_prpt_typ = nc_dataset.createVariable('hdr_prpt_typ', np.int32, dims_npbhdr, fill_value=-9999.) - var_hdr_irpt_typ = nc_dataset.createVariable('hdr_irpt_typ', np.int32, dims_npbhdr, fill_value=-9999.) - var_hdr_inst_typ = nc_dataset.createVariable('hdr_inst_typ', np.int32, dims_npbhdr, fill_value=-9999.) - - var_hdr_typ_table = nc_dataset.createVariable('hdr_typ_table', type_for_string, ('nhdr_typ','mxstr2')) - var_hdr_sid_table = nc_dataset.createVariable('hdr_sid_table', type_for_string, ('nhdr_sid','mxstr2')) - var_hdr_vld_table = nc_dataset.createVariable('hdr_vld_table', type_for_string, ('nhdr_vld','mxstr')) - var_obs_qty_table = nc_dataset.createVariable('obs_qty_table', type_for_string, ('nobs_qty','mxstr')) - var_obs_var_table = nc_dataset.createVariable('obs_var', type_for_string, ('obs_var_num','mxstr2')) - var_obs_var_unit = nc_dataset.createVariable('obs_unit', type_for_string, ('obs_var_num','mxstr2')) - var_obs_var_desc = nc_dataset.createVariable('obs_desc', type_for_string, ('obs_var_num','mxstr3')) - - # Set variables - var_hdr_typ[:] = point_obs.hdr_typ[:] - var_hdr_sid[:] = point_obs.hdr_sid[:] - var_hdr_vld[:] = point_obs.hdr_vld[:] - var_hdr_lat[:] = point_obs.hdr_lat[:] - var_hdr_lon[:] = point_obs.hdr_lon[:] - var_hdr_elv[:] = point_obs.hdr_elv[:] - for i in range(0, point_obs.nhdr_typ): - for j in range(0, len(point_obs.hdr_typ_table[i])): - var_hdr_typ_table[i,j] = point_obs.hdr_typ_table[i][j] - for i in range(0, point_obs.nhdr_sid): - for j in range(0, len(point_obs.hdr_sid_table[i])): - var_hdr_sid_table[i,j] = point_obs.hdr_sid_table[i][j] - for i in range(0, point_obs.nhdr_vld): - for j in range(0, len(point_obs.hdr_vld_table[i])): - var_hdr_vld_table[i,j] = point_obs.hdr_vld_table[i][j] - if 0 < point_obs.npbhdr: - var_hdr_prpt_typ[:] = point_obs.hdr_prpt_typ[:] - var_hdr_irpt_typ[:] = point_obs.hdr_irpt_typ[:] - var_hdr_inst_typ[:] = point_obs.hdr_inst_typ[:] - - var_obs_qty[:] = point_obs.obs_qty[:] - var_obs_hid[:] = point_obs.obs_hid[:] - var_obs_vid[:] = point_obs.obs_vid[:] - var_obs_lvl[:] = point_obs.obs_lvl[:] - var_obs_hgt[:] = point_obs.obs_hgt[:] - var_obs_val[:] = point_obs.obs_val[:] - for i in range(0, point_obs.nobs_var): - for j in range(0, len(point_obs.obs_var_table[i])): - var_obs_var_table[i,j] = point_obs.obs_var_table[i][j] - var_obs_var_unit[i] = "" if i >= len(point_obs.obs_var_unit) else point_obs.obs_var_unit[i] - var_obs_var_desc[i] = "" if i >= len(point_obs.obs_var_desc) else point_obs.obs_var_desc[i] - for i in range(0, point_obs.nobs_qty): - for j in range(0, len(point_obs.obs_qty_table[i])): - var_obs_qty_table[i,j] = point_obs.obs_qty_table[i][j] - - # Set variable attributes - var_hdr_typ.long_name = "index of message type" - var_hdr_sid.long_name = "index of station identification" - var_hdr_vld.long_name = "index of valid time" - var_hdr_lat.long_name = "latitude" - var_hdr_lat.units = "degrees_north" - var_hdr_lon.long_name = "longitude" - var_hdr_lon.units = "degrees_east" - var_hdr_elv.long_name = "elevation" - var_hdr_elv.units = "meters above sea level (msl)" - - var_obs_qty.long_name = "index of quality flag" - var_obs_hid.long_name = "index of matching header data" - var_obs_vid.long_name = "index of BUFR variable corresponding to the observation type" - var_obs_lvl.long_name = "pressure level (hPa) or accumulation interval (sec)" - var_obs_hgt.long_name = "height in meters above sea level (msl)" - var_obs_val.long_name = "observation value" - var_hdr_typ_table.long_name = "message type" - var_hdr_sid_table.long_name = "station identification" - var_hdr_vld_table.long_name = "valid time" - var_hdr_vld_table.units = "YYYYMMDD_HHMMSS UTC" - var_obs_qty_table.long_name = "quality flag" - var_obs_var_table.long_name = "variable names" - var_obs_var_unit.long_name = "variable units" - var_obs_var_desc.long_name = "variable descriptions" - - -def main(argv): - if len(argv) != 1 and argv[1] != ARG_PRINT_DATA: - netcdf_filename = argv[1] - tmp_nc_name = 'tmp_met_point.nc' - point_obs_data = nc_point_obs() - point_obs_data.read_data(point_obs_data.get_nc_filename(netcdf_filename)) - met_point_data = point_obs_data.save_ncfile(tmp_nc_name) - print(f'{get_prompt()} saved met_point_data to {tmp_nc_name}') - met_point_data['met_point_data'] = point_obs_data - - if DO_PRINT_DATA or ARG_PRINT_DATA == argv[-1]: - met_point_obs.print_point_data(met_point_data) - -if __name__ == '__main__': - start_time = datetime.now() - main(sys.argv) - run_time = datetime.now() - start_time - print(f'{get_prompt()} Done python script {sys.argv[0]} took {run_time}') diff --git a/data/wrappers/Makefile.am b/scripts/python/pyembed/Makefile.am similarity index 90% rename from data/wrappers/Makefile.am rename to scripts/python/pyembed/Makefile.am index deb919438e..ca8a3cb66e 100644 --- a/data/wrappers/Makefile.am +++ b/scripts/python/pyembed/Makefile.am @@ -18,18 +18,19 @@ SUBDIRS = -wrappersdir = $(pkgdatadir)/wrappers +pyembeddir = $(pkgdatadir)/python/pyembed -wrappers_DATA = \ - set_python_env.py \ +pyembed_DATA = \ + python_embedding.py \ read_tmp_dataplane.py \ read_tmp_ascii.py \ read_tmp_point_nc.py \ + set_python_env.py \ write_tmp_dataplane.py \ write_tmp_point.py \ write_tmp_point_nc.py \ write_tmp_mpr.py -EXTRA_DIST = ${wrappers_DATA} +EXTRA_DIST = ${pyembed_DATA} MAINTAINERCLEANFILES = Makefile.in diff --git a/data/wrappers/Makefile.in b/scripts/python/pyembed/Makefile.in similarity index 95% rename from data/wrappers/Makefile.in rename to scripts/python/pyembed/Makefile.in index da04b2b2a0..bd0848e94e 100644 --- a/data/wrappers/Makefile.in +++ b/scripts/python/pyembed/Makefile.in @@ -88,7 +88,7 @@ PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ -subdir = data/wrappers +subdir = scripts/python/pyembed ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ @@ -152,8 +152,8 @@ am__uninstall_files_from_dir = { \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } -am__installdirs = "$(DESTDIR)$(wrappersdir)" -DATA = $(wrappers_DATA) +am__installdirs = "$(DESTDIR)$(pyembeddir)" +DATA = $(pyembed_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ @@ -355,18 +355,19 @@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SUBDIRS = -wrappersdir = $(pkgdatadir)/wrappers -wrappers_DATA = \ - set_python_env.py \ +pyembeddir = $(pkgdatadir)/python/pyembed +pyembed_DATA = \ + python_embedding.py \ read_tmp_dataplane.py \ read_tmp_ascii.py \ read_tmp_point_nc.py \ + set_python_env.py \ write_tmp_dataplane.py \ write_tmp_point.py \ write_tmp_point_nc.py \ write_tmp_mpr.py -EXTRA_DIST = ${wrappers_DATA} +EXTRA_DIST = ${pyembed_DATA} MAINTAINERCLEANFILES = Makefile.in all: all-recursive @@ -380,9 +381,9 @@ $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) exit 1;; \ esac; \ done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign data/wrappers/Makefile'; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign scripts/python/pyembed/Makefile'; \ $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --foreign data/wrappers/Makefile + $(AUTOMAKE) --foreign scripts/python/pyembed/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ @@ -400,27 +401,27 @@ $(top_srcdir)/configure: $(am__configure_deps) $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): -install-wrappersDATA: $(wrappers_DATA) +install-pyembedDATA: $(pyembed_DATA) @$(NORMAL_INSTALL) - @list='$(wrappers_DATA)'; test -n "$(wrappersdir)" || list=; \ + @list='$(pyembed_DATA)'; test -n "$(pyembeddir)" || list=; \ if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(wrappersdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(wrappersdir)" || exit 1; \ + echo " $(MKDIR_P) '$(DESTDIR)$(pyembeddir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pyembeddir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(wrappersdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(wrappersdir)" || exit $$?; \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pyembeddir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pyembeddir)" || exit $$?; \ done -uninstall-wrappersDATA: +uninstall-pyembedDATA: @$(NORMAL_UNINSTALL) - @list='$(wrappers_DATA)'; test -n "$(wrappersdir)" || list=; \ + @list='$(pyembed_DATA)'; test -n "$(pyembeddir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(wrappersdir)'; $(am__uninstall_files_from_dir) + dir='$(DESTDIR)$(pyembeddir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. @@ -584,7 +585,7 @@ check: check-recursive all-am: Makefile $(DATA) installdirs: installdirs-recursive installdirs-am: - for dir in "$(DESTDIR)$(wrappersdir)"; do \ + for dir in "$(DESTDIR)$(pyembeddir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive @@ -638,7 +639,7 @@ info: info-recursive info-am: -install-data-am: install-wrappersDATA +install-data-am: install-pyembedDATA install-dvi: install-dvi-recursive @@ -682,7 +683,7 @@ ps: ps-recursive ps-am: -uninstall-am: uninstall-wrappersDATA +uninstall-am: uninstall-pyembedDATA .MAKE: $(am__recursive_targets) install-am install-strip @@ -693,11 +694,11 @@ uninstall-am: uninstall-wrappersDATA install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip install-wrappersDATA \ + install-ps install-ps-am install-pyembedDATA install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \ - uninstall-am uninstall-wrappersDATA + uninstall-am uninstall-pyembedDATA .PRECIOUS: Makefile diff --git a/scripts/python/pyembed/python_embedding.py b/scripts/python/pyembed/python_embedding.py new file mode 100644 index 0000000000..5240eb1120 --- /dev/null +++ b/scripts/python/pyembed/python_embedding.py @@ -0,0 +1,112 @@ + +######################################################################## +# +# Common APIs for python wrappers by Howard Soh (from scripts by +# George McCabe and Randy Bullock). +# +# This is called when an user specifies python executable (MET_PYTHON_EXE). +# The target python object is saved as a temporary file by user defined python. +# And the python binaries (complied with MET) reads the temporary file and +# builds the python object for MET. +# The temporary file can be any form with matching write/read scripts. +# - NetCDF for gridded data and point observation data. +# - text file (ASCII data) (MPR, point observation). +# +# NOTE: sys.argv is changed by calling call_embedded_python +# +######################################################################## + +import os +import sys +from importlib import util as import_util + +class pyembed_tools(): + + debug = False + class_name = "pyembed_tools" + + @staticmethod + def add_python_path(called_file): # called_file = __file__ + method_name = f"{pyembed_tools.class_name}.add_python_path()" + script_dir = os.path.abspath(os.path.dirname(called_file)) + if os.path.exists(script_dir) and script_dir != os.curdir: + if pyembed_tools.debug: + print(f"{method_name} added python path {script_dir}") + sys.path.append(os.path.abspath(script_dir)) + + # testing purpose (to switch the python path by using MET_BASE) + met_base_dir = os.environ.get('MET_BASE', None) + if met_base_dir is not None: + met_python_path = os.path.join(met_base_dir, 'python') + if os.path.exists(met_python_path): + if pyembed_tools.debug: + print(f"{method_name} added python path {os.path.abspath(met_python_path)} from MET_BASE") + sys.path.append(os.path.abspath(met_python_path)) + + # add share/met/python directory to system path + met_python_path = os.path.join(script_dir, os.pardir, 'python') + if not os.path.exists(met_python_path): + met_python_path = os.path.join(script_dir, os.pardir, os.pardir, 'python') + if os.path.exists(met_python_path) and met_python_path != met_base_dir: + if pyembed_tools.debug: + print(f"{method_name} added python path {os.path.abspath(met_python_path)}") + sys.path.append(os.path.abspath(met_python_path)) + + @staticmethod + def call_python(argv): + print("Python Script:\t" + repr(argv[0])) + print("User Command:\t" + repr(' '.join(argv[2:]))) + print("Temporary File:\t" + repr(argv[1])) + + # argv[0] is the python wrapper script (caller) + # argv[1] contains the temporary filename + # argv[2] contains the user defined python script + pyembed_module_name = argv[2] + sys.argv = argv[2:] + + # add share/met/python directory to system path to find met_point_obs + pyembed_tools.add_python_path(pyembed_module_name) + + # append user script dir to system path + pyembed_dir, _ = os.path.split(pyembed_module_name) + if pyembed_dir: + sys.path.insert(0, pyembed_dir) + + if not pyembed_module_name.endswith('.py'): + pyembed_module_name += '.py' + + user_base = os.path.basename(pyembed_module_name).replace('.py','') + + spec = import_util.spec_from_file_location(user_base, pyembed_module_name) + met_in = import_util.module_from_spec(spec) + spec.loader.exec_module(met_in) + return met_in + + @staticmethod + def read_tmp_ascii(filename): + """ + Arguments: + filename (string): temporary file created by write_tmp_point.py or write_tmp_mpr.py + + Returns: + (list of lists): point or mpr data + """ + f = open(filename, 'r') + lines = f.readlines() + f.close() + + ascii_data = [eval(line.strip('\n')) for line in lines] + + return ascii_data + + @staticmethod + def write_tmp_ascii(filename, met_data): + with open(filename, 'w') as f: + for line in met_data: + f.write(str(line) + '\n') + + +if __name__ == '__main__': + argv_org = sys.argv[:] # save original sys.argv + met_in = pyembed_tools.call_python(os.path.dirname(__file__), sys.argv) + sys.argv[:] = argv_org[:] # restore diff --git a/data/wrappers/read_tmp_ascii.py b/scripts/python/pyembed/read_tmp_ascii.py similarity index 52% rename from data/wrappers/read_tmp_ascii.py rename to scripts/python/pyembed/read_tmp_ascii.py index fb7eb7b4e7..1e9573171c 100644 --- a/data/wrappers/read_tmp_ascii.py +++ b/scripts/python/pyembed/read_tmp_ascii.py @@ -20,29 +20,22 @@ import argparse +try: + from python_embedding import pyembed_tools +except: + from pyembed.python_embedding import pyembed_tools + def read_tmp_ascii(filename): - """ - Arguments: - filename (string): temporary file created by write_tmp_point.py or write_tmp_mpr.py - - Returns: - (list of lists): point or mpr data - """ - f = open(filename, 'r') - lines = f.readlines() - f.close() - - global ascii_data - ascii_data = [eval(line.strip('\n')) for line in lines] - - return ascii_data + global ascii_data # defined at python_handler.cc (tmp_list_name) + ascii_data = pyembed_tools.read_tmp_ascii(filename) + return ascii_data if __name__ == '__main__': - """ - Parse command line arguments - """ - parser = argparse.ArgumentParser() - parser.add_argument('--filename', type=str) - args = parser.parse_args() - - data = read_tmp_ascii(args.filename) + """ + Parse command line arguments + """ + parser = argparse.ArgumentParser() + parser.add_argument('--filename', type=str) + args = parser.parse_args() + + data = read_tmp_ascii(args.filename) diff --git a/scripts/python/pyembed/read_tmp_dataplane.py b/scripts/python/pyembed/read_tmp_dataplane.py new file mode 100644 index 0000000000..aa2bc6046a --- /dev/null +++ b/scripts/python/pyembed/read_tmp_dataplane.py @@ -0,0 +1,16 @@ +######################################################################## +# +# Reads temporary file into memory. +# +# usage: /path/to/python read_tmp_dataplane.py dataplane.tmp +# +######################################################################## + +import sys + +# PYTHON path for met.dataplane is added by write_tmp_dataplane.py +from met.dataplane import dataplane + +netcdf_filename = sys.argv[1] +# read NetCDF file +met_info = dataplane.read_dataplane(netcdf_filename) diff --git a/scripts/python/pyembed/read_tmp_point_nc.py b/scripts/python/pyembed/read_tmp_point_nc.py new file mode 100644 index 0000000000..622405c520 --- /dev/null +++ b/scripts/python/pyembed/read_tmp_point_nc.py @@ -0,0 +1,28 @@ +######################################################################## +# +# Reads temporary point obs. file into memory. +# +# usage: /path/to/python read_tmp_point_nc.py tmp_output_filename +# +######################################################################## + +import sys + +from met.point import met_point_tools +try: + from python_embedding import pyembed_tools +except: + from pyembed.python_embedding import pyembed_tools + +input_filename = sys.argv[1] + +# read NetCDF file +print('{p} reading {f}'.format(p=met_point_tools.get_prompt(), f=input_filename)) +try: + point_obs_data = met_point_tools.get_nc_point_obs() + point_obs_data.read_data(input_filename) + + met_point_data = point_obs_data.get_point_data() + met_point_data['met_point_data'] = point_obs_data +except: + point_data = pyembed_tools.read_tmp_ascii(input_filename) diff --git a/data/wrappers/set_python_env.py b/scripts/python/pyembed/set_python_env.py similarity index 100% rename from data/wrappers/set_python_env.py rename to scripts/python/pyembed/set_python_env.py diff --git a/scripts/python/pyembed/write_tmp_dataplane.py b/scripts/python/pyembed/write_tmp_dataplane.py new file mode 100644 index 0000000000..991ca0c0fd --- /dev/null +++ b/scripts/python/pyembed/write_tmp_dataplane.py @@ -0,0 +1,27 @@ +######################################################################## +# +# Adapted from a script provided by George McCabe +# Adapted by Randy Bullock +# +# usage: /path/to/python write_tmp_dataplane.py \ +# tmp_output_filename .py +# +######################################################################## + +import sys + +try: + from python_embedding import pyembed_tools + pyembed_tools.add_python_path(__file__) +except: + from pyembed.python_embedding import pyembed_tools + +from met.dataplane import dataplane + +#def write_dataplane(met_in, netcdf_filename): +# dataplane.write_dataplane(met_in, netcdf_filename) + +if __name__ == '__main__': + netcdf_filename = sys.argv[1] + met_in = pyembed_tools.call_python(sys.argv) + dataplane.write_dataplane(met_in, netcdf_filename) diff --git a/scripts/python/pyembed/write_tmp_mpr.py b/scripts/python/pyembed/write_tmp_mpr.py new file mode 100644 index 0000000000..0e6141b76c --- /dev/null +++ b/scripts/python/pyembed/write_tmp_mpr.py @@ -0,0 +1,22 @@ +######################################################################## +# +# Adapted from a script provided by George McCabe +# Adapted by Randy Bullock +# +# usage: /path/to/python write_tmp_mpr.py \ +# tmp_output_filename .py +# +######################################################################## + +import sys +try: + from python_embedding import pyembed_tools +except: + from pyembed.python_embedding import pyembed_tools + +if __name__ == '__main__': + argv_org = sys.argv[:] + tmp_filename = sys.argv[1] + met_in = pyembed_tools.call_python(sys.argv) + + pyembed_tools.write_tmp_ascii(tmp_filename, met_in.mpr_data) diff --git a/scripts/python/pyembed/write_tmp_point.py b/scripts/python/pyembed/write_tmp_point.py new file mode 100644 index 0000000000..95f2992094 --- /dev/null +++ b/scripts/python/pyembed/write_tmp_point.py @@ -0,0 +1,21 @@ +######################################################################## +# +# Adapted from a script provided by George McCabe +# Adapted by Randy Bullock +# +# usage: /path/to/python write_tmp_point.py \ +# tmp_output_filename .py +# +######################################################################## + +import sys + +try: + from python_embedding import pyembed_tools +except: + from pyembed.python_embedding import pyembed_tools + +if __name__ == '__main__': + tmp_filename = sys.argv[1] + met_in = pyembed_tools.call_python(sys.argv) + pyembed_tools.write_tmp_ascii(tmp_filename, met_in.point_data) diff --git a/scripts/python/pyembed/write_tmp_point_nc.py b/scripts/python/pyembed/write_tmp_point_nc.py new file mode 100644 index 0000000000..6d6c69f693 --- /dev/null +++ b/scripts/python/pyembed/write_tmp_point_nc.py @@ -0,0 +1,38 @@ +######################################################################## +# +# Adapted from a script provided by George McCabe +# Adapted by Howard Soh +# +# usage: /path/to/python write_tmp_point_nc.py \ +# tmp_output_filename .py +# +######################################################################## + +import os +import sys + +try: + from python_embedding import pyembed_tools + pyembed_tools.add_python_path(__file__) +except: + from pyembed.python_embedding import pyembed_tools + + +from met.point import met_point_tools + +if __name__ == '__main__': + argv_org = sys.argv[:] + tmp_filename = sys.argv[1] + met_in = pyembed_tools.call_python(sys.argv) + + if hasattr(met_in, 'point_data'): + pyembed_tools.write_tmp_ascii(tmp_filename, met_in.point_data) + elif hasattr(met_in, 'point_obs_data'): + met_in.point_obs_data.save_ncfile(tmp_filename) + else: + if hasattr(met_in.met_point_data, 'point_obs_data'): + met_in.met_point_data['point_obs_data'].save_ncfile(tmp_filename) + else: + tmp_point_obs = met_point_tools.get_nc_point_obs() + tmp_point_obs.put_data(met_in.met_point_data) + tmp_point_obs.save_ncfile(tmp_filename) diff --git a/scripts/python/read_ascii_mpr.py b/scripts/python/read_ascii_mpr.py deleted file mode 100755 index fa71b8e6d2..0000000000 --- a/scripts/python/read_ascii_mpr.py +++ /dev/null @@ -1,33 +0,0 @@ -import pandas as pd -import os -import sys - -######################################################################## - -print("Python Script:\t" + repr(sys.argv[0])) - - ## - ## input file specified on the command line - ## load the data into the numpy array - ## - -if len(sys.argv) != 2: - print("ERROR: read_ascii_point.py -> Must specify exactly one input file.") - sys.exit(1) - -# Read the input file as the first argument -input_file = os.path.expandvars(sys.argv[1]) -try: - print("Input File:\t" + repr(input_file)) - - # Read MPR lines, skipping the header row and first column. - mpr_data = pd.read_csv(input_file, header=None, - delim_whitespace=True, keep_default_na=False, - skiprows=1, usecols=range(1,37), - dtype=str).values.tolist() - print("Data Length:\t" + repr(len(mpr_data))) - print("Data Type:\t" + repr(type(mpr_data))) -except NameError: - print("Can't find the input file") - -######################################################################## diff --git a/scripts/python/read_ascii_numpy.py b/scripts/python/read_ascii_numpy.py deleted file mode 100755 index 6d129afc1c..0000000000 --- a/scripts/python/read_ascii_numpy.py +++ /dev/null @@ -1,75 +0,0 @@ -import numpy as np -import os -import sys - -########################################### - -print("Python Script:\t" + repr(sys.argv[0])) - - ## - ## input file specified on the command line - ## load the data into the numpy array - ## - -if len(sys.argv) != 3: - print("ERROR: read_ascii_numpy.py -> Must specify exactly one input file and a name for the data.") - sys.exit(1) - -# Read the input file as the first argument -input_file = os.path.expandvars(sys.argv[1]) -data_name = sys.argv[2] -try: - # Print some output to verify that this script ran - print("Input File:\t" + repr(input_file)) - print("Data Name:\t" + repr(data_name)) - met_data = np.loadtxt(input_file) - print("Data Shape:\t" + repr(met_data.shape)) - print("Data Type:\t" + repr(met_data.dtype)) -except NameError: - print("Can't find the input file") - -########################################### - - ## - ## create the metadata dictionary - ## - -attrs = { - - 'valid': '20050807_120000', - 'init': '20050807_000000', - 'lead': '120000', - 'accum': '120000', - - 'name': data_name, - 'long_name': data_name + '_word', - 'level': 'Surface', - 'units': 'None', - - 'grid': { - 'type': 'Lambert Conformal', - 'hemisphere': 'N', - - 'name': 'FooGrid', - - 'scale_lat_1': 25.0, - 'scale_lat_2': 25.0, - - 'lat_pin': 12.19, - 'lon_pin': -135.459, - - 'x_pin': 0.0, - 'y_pin': 0.0, - - 'lon_orient': -95.0, - - 'd_km': 40.635, - 'r_km': 6371.2, - - 'nx': 185, - 'ny': 129, - } - -} - -print("Attributes:\t" + repr(attrs)) diff --git a/scripts/python/read_ascii_numpy_grid.py b/scripts/python/read_ascii_numpy_grid.py deleted file mode 100755 index 3e4cc25f69..0000000000 --- a/scripts/python/read_ascii_numpy_grid.py +++ /dev/null @@ -1,51 +0,0 @@ -import numpy as np -import os -import sys - -########################################### - -print("Python Script:\t" + repr(sys.argv[0])) - - ## - ## input file specified on the command line - ## load the data into the numpy array - ## - -if len(sys.argv) != 3: - print("ERROR: read_ascii_numpy.py -> Must specify exactly one input file and a name for the data.") - sys.exit(1) - -# Read the input file as the first argument -input_file = os.path.expandvars(sys.argv[1]) -data_name = sys.argv[2] -try: - # Print some output to verify that this script ran - print("Input File:\t" + repr(input_file)) - print("Data Name:\t" + repr(data_name)) - met_data = np.loadtxt(input_file) - print("Data Shape:\t" + repr(met_data.shape)) - print("Data Type:\t" + repr(met_data.dtype)) -except NameError: - print("Can't find the input file") - -########################################### - - ## - ## create the metadata dictionary - ## - -attrs = { - - 'valid': '20050807_120000', - 'init': '20050807_000000', - 'lead': '120000', - 'accum': '120000', - - 'name': data_name, - 'long_name': data_name + '_word', - 'level': 'Surface', - 'units': 'None', - 'grid': os.path.expandvars(os.getenv('PYTHON_GRID')) -} - -print("Attributes:\t" + repr(attrs)) diff --git a/scripts/python/read_ascii_point.py b/scripts/python/read_ascii_point.py deleted file mode 100755 index 7fb8eb076a..0000000000 --- a/scripts/python/read_ascii_point.py +++ /dev/null @@ -1,48 +0,0 @@ -import pandas as pd -import os -import sys -from met_point_obs import convert_point_data - -######################################################################## - -print("Python Script:\t" + repr(sys.argv[0])) - -## -## input file specified on the command line -## load the data into the numpy array -## - -if len(sys.argv) != 2: - print("ERROR: read_ascii_point.py -> Must specify exactly one input file.") - sys.exit(1) - -# Read the input file as the first argument -input_file = os.path.expandvars(sys.argv[1]) -try: - print("Input File:\t" + repr(input_file)) - - # Read and format the input 11-column observations: - # (1) string: Message_Type - # (2) string: Station_ID - # (3) string: Valid_Time(YYYYMMDD_HHMMSS) - # (4) numeric: Lat(Deg North) - # (5) numeric: Lon(Deg East) - # (6) numeric: Elevation(msl) - # (7) string: Var_Name(or GRIB_Code) - # (8) numeric: Level - # (9) numeric: Height(msl or agl) - # (10) string: QC_String - # (11) numeric: Observation_Value - - point_data = pd.read_csv(input_file, header=None, delim_whitespace=True, keep_default_na=False, - names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'], - dtype={'typ':'str', 'sid':'str', 'vld':'str', 'var':'str', 'qc':'str'}).values.tolist() - print(" point_data: Data Length:\t" + repr(len(point_data))) - print(" point_data: Data Type:\t" + repr(type(point_data))) - met_point_data = convert_point_data(point_data) - print(" met_point_data: Data Type:\t" + repr(type(met_point_data))) -except NameError: - print("Can't find the input file") - sys.exit(1) - -######################################################################## diff --git a/scripts/python/read_ascii_xarray.py b/scripts/python/read_ascii_xarray.py deleted file mode 100755 index 6e906863a7..0000000000 --- a/scripts/python/read_ascii_xarray.py +++ /dev/null @@ -1,93 +0,0 @@ -import numpy as np -import os -import sys -import xarray as xr - -########################################### - -print("Python Script:\t" + repr(sys.argv[0])) - - ## - ## input file specified on the command line - ## load the data into the numpy array - ## - -if len(sys.argv) != 3: - print("ERROR: read_ascii_xarray.py -> Must specify exactly one input file and a name for the data.") - sys.exit(1) - -# Read the input file as the first argument -input_file = os.path.expandvars(sys.argv[1]) -data_name = sys.argv[2] -try: - # Print some output to verify that this script ran - print("Input File:\t" + repr(input_file)) - print("Data Name:\t" + repr(data_name)) - met_data = np.loadtxt(input_file) - print("Data Shape:\t" + repr(met_data.shape)) - print("Data Type:\t" + repr(met_data.dtype)) -except NameError: - print("Can't find the input file") - -########################################### - - ## - ## create the metadata dictionary - ## - -attrs = { - - 'valid': '20050807_120000', - 'init': '20050807_000000', - 'lead': '120000', - 'accum': '120000', - - 'name': data_name, - 'long_name': data_name + '_word', - 'level': 'Surface', - 'units': 'None', - - 'grid': { - 'type': 'Lambert Conformal', - 'hemisphere': 'N', - - 'name': 'FooGrid', - - 'scale_lat_1': 25.0, - 'scale_lat_2': 25.0, - - 'lat_pin': 12.19, - 'lon_pin': -135.459, - - 'x_pin': 0.0, - 'y_pin': 0.0, - - 'lon_orient': -95.0, - - 'd_km': 40.635, - 'r_km': 6371.2, - - 'nx': 185, - 'ny': 129, - } - -} - -print("Attributes:\t" + repr(attrs)) - -# Create an xarray DataArray object -da = xr.DataArray(met_data) -ds = xr.Dataset({"fcst":da}) - -# Add the attributes to the dataarray object -ds.attrs = attrs - -# Delete the local variable attrs to mimic the real world, -# where a user will rely on da.attrs rather than construct it themselves -del attrs - -# Delete the met_data variable, and reset it to be the Xarray object -del met_data - -# Create met_data and specify attrs because XR doesn't persist them. -met_data = xr.DataArray(ds.fcst, attrs=ds.attrs) diff --git a/scripts/python/read_met_point_obs.py b/scripts/python/read_met_point_obs.py deleted file mode 100755 index 57ccd22e7a..0000000000 --- a/scripts/python/read_met_point_obs.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python3 -''' -Created on Nov 10, 2021 - -@author: hsoh - -This script reads the MET point observation NetCDF file like MET tools do. - -Usage: - - python3 read_met_point_obs.py - python3 read_met_point_obs.py - : 11 columns - 'typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs' - string columns: 'typ', 'sid', 'vld', 'var', , 'qc' - numeric columns 'lat', 'lon', 'elv', 'lvl', 'hgt', 'qc', 'obs' - python3 read_met_point_obs.py - -''' - -import os -import sys -from datetime import datetime - -met_base_dir = os.getenv('MET_BASE',None) -if met_base_dir is not None: - sys.path.append(os.path.join(met_base_dir, 'python')) - -from met_point_obs import met_point_obs, sample_met_point_obs -from met_point_obs_nc import nc_point_obs - -DO_PRINT_DATA = False -ARG_PRINT_DATA = 'show_data' - -start_time = datetime.now() - -prompt = met_point_obs.get_prompt() -point_obs_data = None -if len(sys.argv) == 1 or ARG_PRINT_DATA == sys.argv[1]: - point_obs_data = sample_met_point_obs() - point_obs_data.read_data([]) -elif met_point_obs.is_python_prefix(sys.argv[1]): - import importlib.util - - print("{p} Python Script:\t".format(p=prompt) + repr(sys.argv[0])) - print("{p} User Command:\t".format(p=prompt) + repr(' '.join(sys.argv[2:]))) - - pyembed_module_name = sys.argv[2] - sys.argv = sys.argv[1:] - - # append user script dir to system path - pyembed_dir, pyembed_file = os.path.split(pyembed_module_name) - if pyembed_dir: - sys.path.insert(0, pyembed_dir) - - if not pyembed_module_name.endswith('.py'): - pyembed_module_name += '.py' - os.environ[met_point_obs.MET_ENV_RUN] = "TRUE" - - user_base = os.path.basename(pyembed_module_name).replace('.py','') - - spec = importlib.util.spec_from_file_location(user_base, pyembed_module_name) - met_in = importlib.util.module_from_spec(spec) - spec.loader.exec_module(met_in) - - met_point_obs = met_in.met_point_obs - print("met_point_obs: ", met_point_obs) - met_point_data = met_in.met_point_data - print("met_point_data: ", met_point_data) - #print(hasattr("met_in: ", dir(met_in))) - #met_point_data = met_point_obs.get_point_data() - #met_point_data = None if met_in.get('met_point_data', None) else met_in.met_point_data - #met_data = None if met_in.get('met_data', None) else met_in.met_data - print(met_point_data) -else: - netcdf_filename = sys.argv[1] - args = [ netcdf_filename ] - #args = { 'nc_name': netcdf_filename } - point_obs_data = nc_point_obs() - point_obs_data.read_data(point_obs_data.get_nc_filename(args)) - -if point_obs_data is not None: - met_point_data = point_obs_data.get_point_data() - met_point_data['met_point_data'] = point_obs_data - - if DO_PRINT_DATA or ARG_PRINT_DATA == sys.argv[-1]: - point_obs_data.dump() - -run_time = datetime.now() - start_time - -print('{p} Done python script {s} took {t}'.format(p=prompt, s=sys.argv[0], t=run_time)) diff --git a/scripts/utility/Makefile.am b/scripts/python/utility/Makefile.am similarity index 96% rename from scripts/utility/Makefile.am rename to scripts/python/utility/Makefile.am index d807a69977..5efd02b01e 100644 --- a/scripts/utility/Makefile.am +++ b/scripts/python/utility/Makefile.am @@ -23,7 +23,7 @@ ## scriptsrootdir = $(prefix)/share/scripts ## pythonutilitydir = ${scriptsrootdir}/utility -pythonutilitydir = $(pkgdatadir)/utility +pythonutilitydir = $(pkgdatadir)/python/utility pythonutility_DATA = \ print_pointnc2ascii.py \ diff --git a/scripts/utility/Makefile.in b/scripts/python/utility/Makefile.in similarity index 98% rename from scripts/utility/Makefile.in rename to scripts/python/utility/Makefile.in index bdaec7b3f9..7a994964d1 100644 --- a/scripts/utility/Makefile.in +++ b/scripts/python/utility/Makefile.in @@ -90,7 +90,7 @@ PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ -subdir = scripts/utility +subdir = scripts/python/utility ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ @@ -296,7 +296,7 @@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ -pythonutilitydir = $(pkgdatadir)/utility +pythonutilitydir = $(pkgdatadir)/python/utility pythonutility_DATA = \ print_pointnc2ascii.py \ build_ndbc_stations_from_web.py @@ -315,9 +315,9 @@ $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) exit 1;; \ esac; \ done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign scripts/utility/Makefile'; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign scripts/python/utility/Makefile'; \ $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --foreign scripts/utility/Makefile + $(AUTOMAKE) --foreign scripts/python/utility/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ diff --git a/scripts/utility/build_ndbc_stations_from_web.py b/scripts/python/utility/build_ndbc_stations_from_web.py similarity index 100% rename from scripts/utility/build_ndbc_stations_from_web.py rename to scripts/python/utility/build_ndbc_stations_from_web.py diff --git a/scripts/utility/print_pointnc2ascii.py b/scripts/python/utility/print_pointnc2ascii.py similarity index 100% rename from scripts/utility/print_pointnc2ascii.py rename to scripts/python/utility/print_pointnc2ascii.py diff --git a/src/basic/enum_to_string/code.cc b/src/basic/enum_to_string/code.cc index f9af7db14d..eb9066929b 100644 --- a/src/basic/enum_to_string/code.cc +++ b/src/basic/enum_to_string/code.cc @@ -80,7 +80,7 @@ char upper[256]; char pound_define[256]; char junk[256]; int len, scope_len, max_len; -char * len_name = (char *) NULL; +char * len_name = (char *) nullptr; max_len = 0; @@ -240,7 +240,7 @@ f << "\n\n" f.close(); -if ( len_name ) { delete [] len_name; len_name = (char *) NULL; } +if ( len_name ) { delete [] len_name; len_name = (char *) nullptr; } return; @@ -263,7 +263,7 @@ char upper[256]; char pound_define[256]; char junk[256]; int len, scope_len, max_len; -char * len_name = (char *) NULL; +char * len_name = (char *) nullptr; max_len = 0; @@ -407,7 +407,7 @@ f << "\n\n" f.close(); -if ( len_name ) { delete [] len_name; len_name = (char *) NULL; } +if ( len_name ) { delete [] len_name; len_name = (char *) nullptr; } return; @@ -962,7 +962,7 @@ void warning(ofstream & f) int j; -const char * short_name = (const char *) NULL; +const char * short_name = (const char *) nullptr; // // strip the leading path from header_filename @@ -1001,7 +1001,7 @@ void patch_name(char * len_name) int j, n; int pos; -char * new_name = (char *) NULL; +char * new_name = (char *) nullptr; char c; const char *method_name = "patch_name() -> "; @@ -1042,7 +1042,7 @@ m_strcpy(len_name, new_name, method_name); // done // -if ( new_name ) { delete [] new_name; new_name = (char *) NULL; } +if ( new_name ) { delete [] new_name; new_name = (char *) nullptr; } return; diff --git a/src/basic/enum_to_string/enum_to_string.cc b/src/basic/enum_to_string/enum_to_string.cc index 24c1644b60..bd0e03dc19 100644 --- a/src/basic/enum_to_string/enum_to_string.cc +++ b/src/basic/enum_to_string/enum_to_string.cc @@ -137,7 +137,7 @@ header_filename = argv[1]; yydebug = debug; -if ( (yyin = met_fopen(header_filename, "r")) == NULL ) { +if ( (yyin = met_fopen(header_filename, "r")) == nullptr ) { cerr << "\n\n unable to open input file \"" << header_filename << "\"\n\n"; @@ -386,8 +386,6 @@ if ( debug ) { exit ( 1 ); -return; - } diff --git a/src/basic/enum_to_string/info.cc b/src/basic/enum_to_string/info.cc index 769618d8db..224ea9df07 100644 --- a/src/basic/enum_to_string/info.cc +++ b/src/basic/enum_to_string/info.cc @@ -94,17 +94,17 @@ void EnumInfo::init_from_scratch() { -s = (char **) NULL; +s = (char **) nullptr; -Name = (char *) NULL; +Name = (char *) nullptr; -LowerCaseName = (char *) NULL; +LowerCaseName = (char *) nullptr; -Scope = (char *) NULL; +Scope = (char *) nullptr; -U_Scope = (char *) NULL; +U_Scope = (char *) nullptr; -Header = (char *) NULL; +Header = (char *) nullptr; Nalloc = Nids = 0; @@ -127,21 +127,21 @@ int j; for (j=0; j unrecognized op ... \"" << op << "\"\n\n"; exit ( 1 ); - break; - } // switch @@ -2371,7 +2367,6 @@ switch ( op ) { mlog << Error << "\ndo_integer_op() -> " << "bad operator ... \"" << op << "\"\n\n"; exit ( 1 ); - break; } diff --git a/src/basic/vx_config/config.tab.yy b/src/basic/vx_config/config.tab.yy index a2ce5f6184..3544627fef 100644 --- a/src/basic/vx_config/config.tab.yy +++ b/src/basic/vx_config/config.tab.yy @@ -476,8 +476,6 @@ mlog << Error exit ( 1 ); -return; - } @@ -528,8 +526,6 @@ switch ( op ) { default: cerr << "\n\n do_op() -> unrecognized op ... \"" << op << "\"\n\n"; exit ( 1 ); - break; - } // switch @@ -579,7 +575,6 @@ switch ( op ) { mlog << Error << "\ndo_integer_op() -> " << "bad operator ... \"" << op << "\"\n\n"; exit ( 1 ); - break; } diff --git a/src/basic/vx_config/config_file.cc b/src/basic/vx_config/config_file.cc index c79a87a1b6..97a72cdcd8 100644 --- a/src/basic/vx_config/config_file.cc +++ b/src/basic/vx_config/config_file.cc @@ -332,7 +332,7 @@ Filename.add(bison_input_filename); configdebug = (Debug ? 1 : 0); -if ( (configin = met_fopen(bison_input_filename, "r")) == NULL ) { +if ( (configin = met_fopen(bison_input_filename, "r")) == nullptr ) { mlog << Error << "\nMetConfig::read(const char *) -> " << "unable to open input file \"" << bison_input_filename << "\"\n\n"; diff --git a/src/basic/vx_config/config_util.cc b/src/basic/vx_config/config_util.cc index ef7a916c76..66475afdee 100644 --- a/src/basic/vx_config/config_util.cc +++ b/src/basic/vx_config/config_util.cc @@ -2031,7 +2031,7 @@ int parse_conf_percentile(Dictionary *dict) { /////////////////////////////////////////////////////////////////////////////// ConcatString parse_conf_tmp_dir(Dictionary *dict) { - DIR* odir = NULL; + DIR* odir = nullptr; ConcatString tmp_dir_path; if(!get_env("MET_TMP_DIR", tmp_dir_path)) { @@ -2045,7 +2045,7 @@ ConcatString parse_conf_tmp_dir(Dictionary *dict) { } // Make sure that it exists - if((odir = met_opendir(tmp_dir_path.c_str())) == NULL) { + if((odir = met_opendir(tmp_dir_path.c_str())) == nullptr) { mlog << Error << "\nparse_conf_tmp_dir() -> " << "Cannot access the \"" << conf_key_tmp_dir << "\" directory: " << tmp_dir_path << "\n\n"; diff --git a/src/basic/vx_config/dictionary.cc b/src/basic/vx_config/dictionary.cc index c9028090d5..ae93e52b60 100644 --- a/src/basic/vx_config/dictionary.cc +++ b/src/basic/vx_config/dictionary.cc @@ -2417,18 +2417,12 @@ DictionaryStack & DictionaryStack::operator=(const DictionaryStack & s) { -// if ( this == &s ) return ( * this ); -// -// assign(s); - mlog << Error << "\nDictionaryStack::operator=(const DictionaryStack &) -> " << "should never be called!\n\n"; exit ( 1 ); -return ( * this ); - } diff --git a/src/basic/vx_log/concat_string.cc b/src/basic/vx_log/concat_string.cc index bde00de1c5..20ac3cf95e 100644 --- a/src/basic/vx_log/concat_string.cc +++ b/src/basic/vx_log/concat_string.cc @@ -530,7 +530,7 @@ int ConcatString::format(const char *fmt, ...) { va_list vl; int status = -1; - char *tmp = NULL; + char *tmp = nullptr; va_start(vl, fmt); status = vasprintf(&tmp, fmt, vl); @@ -1180,7 +1180,7 @@ bool is_empty(const char * text) { -return ( (text == NULL) || (*text == 0) || (m_strlen(text) == 0)); +return ( (text == nullptr) || (*text == 0) || (m_strlen(text) == 0)); } @@ -1201,7 +1201,7 @@ env_value.clear(); // SonarQube: two ifs to avoid the side effect by the logical || operator if (str.find('/') != string::npos) return(false); -if ((ptr = getenv(env_name)) == NULL) return(false); +if ((ptr = getenv(env_name)) == nullptr) return(false); env_value = ptr; str = env_value; @@ -1241,7 +1241,7 @@ while ((pos = str.find('$', pos)) != string::npos) { } } nested_name = str.substr(pos_env, (pos_env_end-pos_env)); - if((ptr = getenv(nested_name.c_str())) == NULL) { + if((ptr = getenv(nested_name.c_str())) == nullptr) { mlog << Error << "\n" << method_name << "can't get value of nested environment variable \"" << nested_name << "\" from " << env_name << "\n\n"; diff --git a/src/basic/vx_log/logger.cc b/src/basic/vx_log/logger.cc index f4ecb77ef2..5c531468d7 100644 --- a/src/basic/vx_log/logger.cc +++ b/src/basic/vx_log/logger.cc @@ -421,8 +421,6 @@ Logger & Logger::operator=(const Logger & l) cerr << "\n\n operator=(const Logger & l) -> This function should never be called\n\n"; exit (1); - return (*this); // left in to keep the compiler quiet - } diff --git a/src/basic/vx_log/str_wrappers.cc b/src/basic/vx_log/str_wrappers.cc index f79c8ed2b0..4361388d71 100644 --- a/src/basic/vx_log/str_wrappers.cc +++ b/src/basic/vx_log/str_wrappers.cc @@ -66,7 +66,7 @@ char *m_strcpy2(const char *from_str, const char *method_name, const char *extra } else { mlog << Error << "\n" << method_name - << " Do not copy the string because a from_string is NULL. " + << " Do not copy the string because a from_string is nullptr. " << (extra_msg == 0 ? "" : extra_msg) << "\n\n"; } @@ -79,12 +79,12 @@ void m_strncpy(char *to_str, const char *from_str, const int buf_len, const char *method_name, const char *extra_msg, bool truncate) { if (!from_str){ mlog << Warning << "\n" << method_name - << " Do not copy the string because a from_string is NULL. " + << " Do not copy the string because a from_string is nullptr. " << (extra_msg == 0 ? "" : extra_msg) << "\n\n"; } else if (!to_str){ mlog << Warning << "\n" << method_name - << " Do not copy the string because a to_string is NULL. " + << " Do not copy the string because a to_string is nullptr. " << (extra_msg == 0 ? "" : extra_msg) << "\n\n"; } else { // (from_str && to_str) @@ -131,7 +131,7 @@ bool m_replace_char(char *str_buf, char from_ch, char to_ch, bool all_instances) //////////////////////////////////////////////////////////////////////// void m_rstrip(char *str_buf, int buf_len, bool find_white_ch) { - // Make sure it's NULL terminated + // Make sure it's nullptr terminated if (buf_len >= 0) str_buf[buf_len] = '\0'; // Change the trailing blank space to a null int str_len = m_strlen(str_buf); diff --git a/src/basic/vx_log/string_array.cc b/src/basic/vx_log/string_array.cc index 91e84f573b..5723348a93 100644 --- a/src/basic/vx_log/string_array.cc +++ b/src/basic/vx_log/string_array.cc @@ -267,6 +267,30 @@ return; //////////////////////////////////////////////////////////////////////// +void StringArray::add_uniq(const std::string text) + +{ + + // + // Only store unique strings + // + +if(!has(text)) { + + s.push_back(text); + + Sorted = false; + +} + +return; + +} + + +//////////////////////////////////////////////////////////////////////// + + void StringArray::add(const StringArray & a) { @@ -285,6 +309,35 @@ return; //////////////////////////////////////////////////////////////////////// +void StringArray::add_uniq(const StringArray & a) + +{ + +if ( a.n() == 0 ) return; + + // + // Only store unique strings + // + +for(int i=0; i error in \"" exit ( 1 ); -return ( -100 ); // just to satisfy the compiler - } diff --git a/src/basic/vx_util/GridTemplate.cc b/src/basic/vx_util/GridTemplate.cc index 387173db31..832183084d 100644 --- a/src/basic/vx_util/GridTemplate.cc +++ b/src/basic/vx_util/GridTemplate.cc @@ -99,7 +99,7 @@ GridPoint *GridTemplate::getFirstInGrid( /////////////////////////////////////////////////////////////////////////////// // // Get the next template grid point within the grid. -// Returns NULL when there are no more points in the grid. +// Returns nullptr when there are no more points in the grid. // // Returns a pointer to a static object which must NOT be deleted // by the calling routine. @@ -129,7 +129,7 @@ GridPoint *GridTemplate::getNextInGrid(void) const { } } // end while - return (GridPoint *)NULL; + return (GridPoint *)nullptr; } /////////////////////////////////////////////////////////////////////////////// @@ -155,7 +155,7 @@ GridPoint *GridTemplate::getFirst(const int &base_x, const int &base_y, /////////////////////////////////////////////////////////////////////////////// // // Get the next template grid point without checking the grid bounds. -// Returns NULL when there are no more points. +// Returns nullptr when there are no more points. // // Returns a pointer to a static object which must NOT be deleted // by the calling routine. @@ -164,7 +164,7 @@ GridPoint *GridTemplate::getFirst(const int &base_x, const int &base_y, GridPoint *GridTemplate::getNext(void) const { - GridPoint *next_point = (GridPoint *)NULL; + GridPoint *next_point = (GridPoint *)nullptr; if(_pointInGridIterator != _offsetList.end()) { GridOffset *offset = *_pointInGridIterator; @@ -204,7 +204,7 @@ GridPoint *GridTemplate::getFirstInLftEdge(void) const { /////////////////////////////////////////////////////////////////////////////// // // Get the next template grid point in the first column. -// Returns NULL when there are no more points in the first column. +// Returns nullptr when there are no more points in the first column. // // Returns a pointer to a static object which must NOT be deleted // by the calling routine. @@ -235,7 +235,7 @@ GridPoint *GridTemplate::getNextInLftEdge(void) const { } } // end while - return (GridPoint *)NULL; + return (GridPoint *)nullptr; } /////////////////////////////////////////////////////////////////////////////// @@ -258,7 +258,7 @@ GridPoint *GridTemplate::getFirstInTopEdge(void) const { /////////////////////////////////////////////////////////////////////////////// // // Get the next template grid point in the top row. -// Returns NULL when there are no more points in the top row. +// Returns nullptr when there are no more points in the top row. // // Returns a pointer to a static object which must NOT be deleted // by the calling routine. @@ -288,7 +288,7 @@ GridPoint *GridTemplate::getNextInTopEdge(void) const { } } // end while - return (GridPoint *)NULL; + return (GridPoint *)nullptr; } /////////////////////////////////////////////////////////////////////////////// @@ -311,7 +311,7 @@ GridPoint *GridTemplate::getFirstInRgtEdge(void) const { /////////////////////////////////////////////////////////////////////////////// // // Get the next template grid point in the right column. -// Returns NULL when there are no more points in the right column. +// Returns nullptr when there are no more points in the right column. // // Returns a pointer to a static object which must NOT be deleted // by the calling routine. @@ -341,7 +341,7 @@ GridPoint *GridTemplate::getNextInRgtEdge(void) const { } } // end while - return (GridPoint *)NULL; + return (GridPoint *)nullptr; } /////////////////////////////////////////////////////////////////////////////// @@ -364,7 +364,7 @@ GridPoint *GridTemplate::getFirstInBotEdge(void) const { /////////////////////////////////////////////////////////////////////////////// // // Get the next template grid point in the bottom row. -// Returns NULL when there are no more points in the bottom row. +// Returns nullptr when there are no more points in the bottom row. // Initialize the grid dimensions and base location. // // Returns a pointer to a static object which must NOT be deleted @@ -395,7 +395,7 @@ GridPoint *GridTemplate::getNextInBotEdge(void) const { } } // end while - return (GridPoint *)NULL; + return (GridPoint *)nullptr; } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/GridTemplate.h b/src/basic/vx_util/GridTemplate.h index bbef7675f0..130e65d476 100644 --- a/src/basic/vx_util/GridTemplate.h +++ b/src/basic/vx_util/GridTemplate.h @@ -46,7 +46,7 @@ class GridTemplate { // Methods for iterating through the template within the grid centered // on the given point. To use these methods, first call getFirstInGrid() // to get the first point. Then call getNextInGrid() to get all remaining - // points until a (GridPoint *)NULL is returned. Any time getFirstInGrid() + // points until a (GridPoint *) nullptr is returned. Any time getFirstInGrid() // is called, the iteration will be cleared and will start over again. // // base_x and base_y give the coordinates of the point around which the diff --git a/src/basic/vx_util/ascii_header.cc b/src/basic/vx_util/ascii_header.cc index 9789dcaa46..27c1d69a1a 100644 --- a/src/basic/vx_util/ascii_header.cc +++ b/src/basic/vx_util/ascii_header.cc @@ -525,7 +525,7 @@ void parse_mctc_fi_oj(const char *str, int &i, int &j) { // Parse Fi_Oj strings i = atoi(ptr); - if((ptr = strrchr(str, '_')) != NULL) { + if((ptr = strrchr(str, '_')) != nullptr) { ptr += 2; j = atoi(ptr); } diff --git a/src/basic/vx_util/command_line.cc b/src/basic/vx_util/command_line.cc index 5937237722..73aab12b2b 100644 --- a/src/basic/vx_util/command_line.cc +++ b/src/basic/vx_util/command_line.cc @@ -847,11 +847,8 @@ else { } - exit ( 1 ); -return; - } @@ -893,8 +890,6 @@ cout << "\n"; exit ( 1 ); -return; - } diff --git a/src/basic/vx_util/data_line.cc b/src/basic/vx_util/data_line.cc index 1424ea2f58..2246dfbba4 100644 --- a/src/basic/vx_util/data_line.cc +++ b/src/basic/vx_util/data_line.cc @@ -649,9 +649,6 @@ mlog << Error << "\nLineDataFile::operator=(const LineDataFile &) -> " exit ( 1 ); - -return ( * this ); - } diff --git a/src/basic/vx_util/data_plane_util.cc b/src/basic/vx_util/data_plane_util.cc index c22a84dcbc..e20c49712d 100644 --- a/src/basic/vx_util/data_plane_util.cc +++ b/src/basic/vx_util/data_plane_util.cc @@ -208,7 +208,7 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, int width, GridTemplateFactory::GridTemplates shape, bool wrap_lon, SingleThresh t, const DataPlane *cmn, const DataPlane *csd, double vld_t) { - GridPoint *gp = NULL; + GridPoint *gp = nullptr; int x, y; int n_vld = 0; int n_thr = 0; @@ -286,7 +286,7 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, // Sum all the points for(gp = gt->getFirstInGrid(x, y, dp.nx(), dp.ny()); - gp != NULL; + gp != nullptr; gp = gt->getNextInGrid()) { if(is_bad_data(v = dp.get(gp->x, gp->y))) continue; n_vld++; @@ -300,7 +300,7 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, // Subtract points from the the bottom edge for(gp = gt->getFirstInBotEdge(); - gp != NULL; + gp != nullptr; gp = gt->getNextInBotEdge()) { if(is_bad_data(v = dp.get(gp->x, gp->y))) continue; n_vld--; @@ -314,7 +314,7 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, // Add points from the the top edge for(gp = gt->getFirstInTopEdge(); - gp != NULL; + gp != nullptr; gp = gt->getNextInTopEdge()) { if(is_bad_data(v = dp.get(gp->x, gp->y))) continue; n_vld++; diff --git a/src/basic/vx_util/get_filenames.cc b/src/basic/vx_util/get_filenames.cc index 5d77bece11..f8d1bb5d3f 100644 --- a/src/basic/vx_util/get_filenames.cc +++ b/src/basic/vx_util/get_filenames.cc @@ -185,7 +185,7 @@ if ( !directory ) { } -while ( (entry = readdir(directory)) != NULL ) { +while ( (entry = readdir(directory)) != nullptr ) { if ( strcmp(entry->d_name, "." ) == 0 ) continue; if ( strcmp(entry->d_name, "..") == 0 ) continue; diff --git a/src/basic/vx_util/interp_util.cc b/src/basic/vx_util/interp_util.cc index 1ebd2444b6..b28ecc40dd 100644 --- a/src/basic/vx_util/interp_util.cc +++ b/src/basic/vx_util/interp_util.cc @@ -70,9 +70,9 @@ NumArray interp_points(const DataPlane &dp, const GridTemplate >, int x, int y NumArray points; // Search the neighborhood, storing any points off the grid as bad data - GridPoint *gp = NULL; + GridPoint *gp = nullptr; for(gp = gt.getFirst(x, y, dp.nx(), dp.ny()); - gp != NULL; gp = gt.getNext()) { + gp != nullptr; gp = gt.getNext()) { if(gp->x < 0 || gp->x >= dp.nx() || gp->y < 0 || gp->y >= dp.ny()) { points.add(bad_data_double); @@ -95,9 +95,9 @@ double interp_min(const DataPlane &dp, const GridTemplate >, double min_v = bad_data_double; // Search the neighborhood - GridPoint *gp = NULL; + GridPoint *gp = nullptr; for(gp = gt.getFirstInGrid(x, y, dp.nx(), dp.ny()); - gp != NULL; gp = gt.getNextInGrid()) { + gp != nullptr; gp = gt.getNextInGrid()) { // Check the optional mask if(mp) { @@ -165,7 +165,7 @@ double interp_max(const DataPlane &dp, const GridTemplate >, // Search the neighborhood for(GridPoint *gp = gt.getFirstInGrid(x, y, dp.nx(), dp.ny()); - gp != NULL; gp = gt.getNextInGrid()) { + gp != nullptr; gp = gt.getNextInGrid()) { // Check the optional mask if(mp) { @@ -236,7 +236,7 @@ double interp_median(const DataPlane &dp, const GridTemplate >, // Search the neighborhood for(GridPoint *gp = gt.getFirstInGrid(x, y, dp.nx(), dp.ny()); - gp != NULL; gp = gt.getNextInGrid()) { + gp != nullptr; gp = gt.getNextInGrid()) { // Check the optional mask if(mp) { @@ -319,7 +319,7 @@ double interp_uw_mean(const DataPlane &dp, const GridTemplate >, // Sum the valid data in the neighborhood for(GridPoint *gp = gt.getFirstInGrid(x, y, dp.nx(), dp.ny()); - gp != NULL; gp = gt.getNextInGrid()) { + gp != nullptr; gp = gt.getNextInGrid()) { // Check the optional mask if(mp) { @@ -405,7 +405,7 @@ double interp_dw_mean(const DataPlane &dp, const GridTemplate >, } for(GridPoint *gp = gt.getFirstInGrid(x, y, dp.nx(), dp.ny()); - gp != NULL; gp = gt.getNextInGrid()) { + gp != nullptr; gp = gt.getNextInGrid()) { // Check the optional mask if(mp) { @@ -457,7 +457,7 @@ double interp_ls_fit(const DataPlane &dp, const GridTemplate >, // I am going to simply pull out the relevant values from the GT object, // and leave the rest of the function to work the same way as before. const RectangularTemplate* tmpGT = dynamic_cast(>); - if((tmpGT == NULL ) || (tmpGT->getHeight() != tmpGT->getWidth())) { + if((tmpGT == nullptr ) || (tmpGT->getHeight() != tmpGT->getWidth())) { mlog << Error << "\ninterp_ls_fit() -> " << "Least Squares Interpolation only supports SQUARE shapes.\n\n"; exit(1); @@ -656,7 +656,7 @@ double interp_geog_match(const DataPlane &dp, const GridTemplate >, interp_d = interp_v = bad_data_double; for(GridPoint *gp = gt.getFirstInGrid(x, y, dp.nx(), dp.ny()); - gp != NULL; gp = gt.getNextInGrid()) { + gp != nullptr; gp = gt.getNextInGrid()) { // Check the optional mask if(mp) { @@ -710,7 +710,7 @@ double interp_nbrhd(const DataPlane &dp, const GridTemplate >, int x, int y, count = count_thr = 0; for(GridPoint *gp = gt.getFirstInGrid(x, y, dp.nx(), dp.ny()); - gp != NULL; gp = gt.getNextInGrid()) { + gp != nullptr; gp = gt.getNextInGrid()) { // Check the optional mask if(mp) { @@ -874,7 +874,7 @@ double interp_best(const DataPlane &dp, const GridTemplate >, count = 0; min_d = min_v = bad_data_double; for(GridPoint *gp = gt.getFirstInGrid(x, y, dp.nx(), dp.ny()); - gp != NULL; gp = gt.getNextInGrid()) { + gp != nullptr; gp = gt.getNextInGrid()) { // Check the optional mask if(mp) { @@ -1056,9 +1056,9 @@ MaskPlane compute_sfc_mask(const GridTemplate >, int x, int y, // Search the neighborhood // mp.set_size(nx, ny, false); - GridPoint *gp = NULL; + GridPoint *gp = nullptr; for(gp = gt.getFirstInGrid(x, y, nx, ny); - gp != NULL; gp = gt.getNextInGrid()) { + gp != nullptr; gp = gt.getNextInGrid()) { // Check the land mask if(sfc_info.land_ptr) { diff --git a/src/basic/vx_util/memory.cc b/src/basic/vx_util/memory.cc index 563a010977..a832b79840 100644 --- a/src/basic/vx_util/memory.cc +++ b/src/basic/vx_util/memory.cc @@ -26,7 +26,6 @@ void oom() { mlog << Error << "\nOut of memory! Exiting!\n\n"; exit(1); - return; } //////////////////////////////////////////////////////////////////////// @@ -39,7 +38,6 @@ void oom_grib2() { << "flag.\n\n"; exit(1); - return; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/python_line.cc b/src/basic/vx_util/python_line.cc index ebc4bfb4cc..555e6151ee 100644 --- a/src/basic/vx_util/python_line.cc +++ b/src/basic/vx_util/python_line.cc @@ -32,9 +32,13 @@ using namespace std; //////////////////////////////////////////////////////////////////////// -static const char set_python_env_wrapper [] = "set_python_env"; +static const char env_PYTHONPATH [] = "PYTHONPATH"; -static const char write_tmp_mpr_wrapper [] = "MET_BASE/wrappers/write_tmp_mpr.py"; +static const char met_python_path [] = "MET_BASE/python"; + +static const char set_python_env_wrapper [] = "pyembed.set_python_env"; + +static const char write_tmp_mpr_wrapper [] = "MET_BASE/python/pyembed/write_tmp_mpr.py"; static const char list_name [] = "mpr_data"; @@ -301,9 +305,7 @@ void PyLineDataFile::do_straight() { -ConcatString command, path, user_base; - -path = set_python_env_wrapper; +ConcatString command, user_base; mlog << Debug(3) << "PyLineDataFile::do_straight() -> " @@ -318,7 +320,7 @@ user_base.chomp(".py"); // start up the python interpreter // -script = new Python3_Script (path.text()); +script = get_python3_script(); // // set up a "new" sys.argv list @@ -344,8 +346,6 @@ if ( PyErr_Occurred() ) { exit ( 1 ); - return; - } // @@ -434,11 +434,7 @@ if ( status ) { } -ConcatString wrapper; - -wrapper = set_python_env_wrapper; - -script = new Python3_Script (wrapper.text()); +script = get_python3_script(); mlog << Debug(4) << "Reading temporary Python line data file: " << tmp_ascii_path << "\n"; @@ -574,6 +570,36 @@ return; } +//////////////////////////////////////////////////////////////////////// + +Python3_Script *get_python3_script() + +{ + +const char *method_name = "get_python3_script()"; +ConcatString path = set_python_env_wrapper; +ConcatString python_path = met_python_path; + +const char *env_pythonpath = getenv(env_PYTHONPATH); + +if (env_pythonpath) { + python_path = env_pythonpath; + python_path.add(':'); +} +python_path.add(replace_path(met_python_path)); +mlog << Debug(0) << method_name << " -> added python path " + << replace_path(met_python_path) << ") to " << env_PYTHONPATH << "\n"; + +setenv(env_PYTHONPATH, python_path.c_str(),1); + + // + // start up the python interpreter + // + +return new Python3_Script (path.text()); + +} + //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/python_line.h b/src/basic/vx_util/python_line.h index 604066e126..9b39e074da 100644 --- a/src/basic/vx_util/python_line.h +++ b/src/basic/vx_util/python_line.h @@ -91,6 +91,9 @@ class PyLineDataFile : public LineDataFile { //////////////////////////////////////////////////////////////////////// +extern Python3_Script *get_python3_script(); + +//////////////////////////////////////////////////////////////////////// #endif /* __PYTHON_LINE_H__ */ diff --git a/src/basic/vx_util/string_fxns.cc b/src/basic/vx_util/string_fxns.cc index 4622cc74f1..cfbf1e5861 100644 --- a/src/basic/vx_util/string_fxns.cc +++ b/src/basic/vx_util/string_fxns.cc @@ -112,7 +112,7 @@ const char * get_short_name(const char * path) { -const char * short_name = (const char *) NULL; +const char * short_name = (const char *) nullptr; if ( path ) { int j; @@ -138,7 +138,7 @@ return ( short_name ); void append_char(char *str, const char c) { - char *ptr = (char *) NULL; + char *ptr = (char *) nullptr; // // If the specified characater does not already exist at the @@ -164,7 +164,7 @@ void append_char(char *str, const char c) void strip_char(char *str, const char c) { - char *ptr = (char *) NULL; + char *ptr = (char *) nullptr; // // If the specified character exists at the end of the string, @@ -187,8 +187,8 @@ int num_tokens(const char *test_str, const char *separator) { int n = 0; - char *temp_str = (char *) NULL; - char *c = (char *) NULL; + char *temp_str = (char *) nullptr; + char *c = (char *) nullptr; const char *method_name = "num_tokens() -> "; // @@ -221,12 +221,12 @@ int num_tokens(const char *test_str, const char *separator) // Parse remaining tokens // // - while((c = strtok(0, separator)) != NULL) n++; + while((c = strtok(0, separator)) != nullptr) n++; } } - if(temp_str) { delete [] temp_str; temp_str = (char *) NULL; } + if(temp_str) { delete [] temp_str; temp_str = (char *) nullptr; } return(n); } @@ -300,15 +300,15 @@ int regex_apply(const char* pat, int num_mat, const char* str, char** &mat) m_strncpy(mat[i], str_dat.substr(pmatch[i].rm_so, mat_len).data(), mat_len, method_name, "mat[i]"); } - mat[num_act] = NULL; + mat[num_act] = nullptr; } } else { - mat = NULL; + mat = nullptr; } regfree(re); - if( re ) { delete re; re = NULL; } + if( re ) { delete re; re = nullptr; } return num_act; } @@ -317,9 +317,9 @@ int regex_apply(const char* pat, int num_mat, const char* str, char** &mat) void regex_clean(char** &mat) { if( !mat ) return; - for(int i=0; mat[i] != NULL; i++) delete [] mat[i]; + for(int i=0; mat[i] != nullptr; i++) delete [] mat[i]; delete [] mat; - mat = NULL; + mat = nullptr; } //////////////////////////////////////////////////////////////////////// @@ -383,9 +383,9 @@ ConcatString str_trim(const ConcatString str){ int parse_thresh_index(const char *col_name) { int i = 0; - const char *ptr = (const char *) NULL; + const char *ptr = (const char *) nullptr; - if((ptr = strrchr(col_name, '_')) != NULL) i = atoi(++ptr); + if((ptr = strrchr(col_name, '_')) != nullptr) i = atoi(++ptr); else { mlog << Error << "\nparse_thresh_index() -> " << "unexpected column name specified: \"" diff --git a/src/basic/vx_util/thresh_array.cc b/src/basic/vx_util/thresh_array.cc index 6ec06f8757..19716163cb 100644 --- a/src/basic/vx_util/thresh_array.cc +++ b/src/basic/vx_util/thresh_array.cc @@ -277,7 +277,7 @@ void ThreshArray::parse_thresh_str(const char *thresh_str) { lp = line; - while((c = strtok(lp, delim)) != NULL ) { + while((c = strtok(lp, delim)) != nullptr ) { add(c); diff --git a/src/basic/vx_util/two_d_array.h b/src/basic/vx_util/two_d_array.h index 5109e2cbda..42d169d303 100644 --- a/src/basic/vx_util/two_d_array.h +++ b/src/basic/vx_util/two_d_array.h @@ -274,7 +274,7 @@ T TwoD_Array::operator()(int _x, int _y) const { -return ( E[two_to_one(_x, _y)] ); +return ( get(_x, _y) ); } @@ -287,6 +287,11 @@ T TwoD_Array::get(int _x, int _y) const { +if (E == nullptr) { + mlog << Error << "\nTwoD_Array::get(x,y) -> E is not initialized\n\n"; + exit ( 1 ); +} + return ( E[two_to_one(_x, _y)] ); } @@ -301,7 +306,7 @@ bool TwoD_Array::s_is_on(int _x, int _y) const { -return ( (bool) (E[two_to_one(_x, _y)]) ); +return ( (bool) (E == nullptr) ? false : (E[two_to_one(_x, _y)]) ); } diff --git a/src/libcode/vx_analysis_util/stat_job.cc b/src/libcode/vx_analysis_util/stat_job.cc index 8b2e65aa8b..1addf4dfdc 100644 --- a/src/libcode/vx_analysis_util/stat_job.cc +++ b/src/libcode/vx_analysis_util/stat_job.cc @@ -1094,7 +1094,7 @@ void STATAnalysisJob::parse_job_command(const char *jobstring) { // // Parse the command line entries into a StringArray object // - while((c = strtok(lp, delim)) != NULL) { + while((c = strtok(lp, delim)) != nullptr) { // Skip newline characters if(strcmp(c, "\n") == 0) continue; diff --git a/src/libcode/vx_bool_calc/bool_calc.cc b/src/libcode/vx_bool_calc/bool_calc.cc index bdd05c2c0a..4f7f62330f 100644 --- a/src/libcode/vx_bool_calc/bool_calc.cc +++ b/src/libcode/vx_bool_calc/bool_calc.cc @@ -186,7 +186,6 @@ for (j=0; j<((int) P.size()); ++j) { << "bad token in program ... \n\n"; tok.dump(cerr, 1); exit ( 1 ); - break; } // switch diff --git a/src/libcode/vx_color/color_parser_yacc.cc b/src/libcode/vx_color/color_parser_yacc.cc index b6acc48b1f..ecd795a0e0 100644 --- a/src/libcode/vx_color/color_parser_yacc.cc +++ b/src/libcode/vx_color/color_parser_yacc.cc @@ -196,15 +196,15 @@ static Color dcolor_to_color(const Dcolor &); #line 199 "color_parser_yacc.cc" /* yacc.c:337 */ -# ifndef YY_NULLPTR +# ifndef YY_nullptrPTR # if defined __cplusplus # if 201103L <= __cplusplus -# define YY_NULLPTR nullptr +# define YY_nullptrPTR nullptr # else -# define YY_NULLPTR 0 +# define YY_nullptrPTR 0 # endif # else -# define YY_NULLPTR ((void*)0) +# define YY_nullptrPTR ((void*)0) # endif # endif @@ -591,7 +591,7 @@ static const char *const yytname[] = "$end", "error", "$undefined", "ID", "COLOR_NAME", "INTEGER", "QUOTED_STRING", "FLOAT", "BLEND", "HSV", "GRAYVALUE", "CMYK", "'='", "'{'", "','", "'}'", "'('", "')'", "$accept", "statement_list", - "statement", "ctable_entry", "color_assignment", "color", "number", YY_NULLPTR + "statement", "ctable_entry", "color_assignment", "color", "number", YY_nullptrPTR }; #endif @@ -987,11 +987,11 @@ static int yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, yytype_int16 *yyssp, int yytoken) { - YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); + YYSIZE_T yysize0 = yytnamerr (YY_nullptrPTR, yytname[yytoken]); YYSIZE_T yysize = yysize0; enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; /* Internationalized format string. */ - const char *yyformat = YY_NULLPTR; + const char *yyformat = YY_nullptrPTR; /* Arguments of yyformat. */ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* Number of reported tokens (one for the "unexpected", one per @@ -1048,7 +1048,7 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, } yyarg[yycount++] = yytname[yyx]; { - YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); + YYSIZE_T yysize1 = yysize + yytnamerr (YY_nullptrPTR, yytname[yyx]); if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM) yysize = yysize1; else @@ -2226,8 +2226,6 @@ cout.flush(); exit ( 1 ); -return; - } diff --git a/src/libcode/vx_color/color_parser_yacc.yy b/src/libcode/vx_color/color_parser_yacc.yy index 13c4cbb415..25b9422778 100644 --- a/src/libcode/vx_color/color_parser_yacc.yy +++ b/src/libcode/vx_color/color_parser_yacc.yy @@ -729,8 +729,6 @@ cout.flush(); exit ( 1 ); -return; - } diff --git a/src/libcode/vx_color/color_table.cc b/src/libcode/vx_color/color_table.cc index 7170293a2e..406a3fa7e3 100644 --- a/src/libcode/vx_color/color_table.cc +++ b/src/libcode/vx_color/color_table.cc @@ -536,8 +536,6 @@ mlog << Error << "\nColorTable::interp(double) const -> confused!\n\n"; exit ( 1 ); -return ( black ); - } @@ -554,7 +552,7 @@ clear(); input_filename = filename; -if ( (colorin = fopen(input_filename, "r")) == NULL ) { +if ( (colorin = fopen(input_filename, "r")) == nullptr ) { colorin = (FILE *) 0; diff --git a/src/libcode/vx_data2d/var_info.cc b/src/libcode/vx_data2d/var_info.cc index 908dae7a9e..f074b9bc53 100644 --- a/src/libcode/vx_data2d/var_info.cc +++ b/src/libcode/vx_data2d/var_info.cc @@ -601,7 +601,7 @@ void VarInfo::set_level_info_grib(Dictionary & dict){ // parse the level string components int num_mat = 0; - char** mat = NULL; + char** mat = nullptr; const char* pat_mag = "([ALPRZ])([0-9\\.]+)(\\-[0-9\\.]+)?"; if( 3 > (num_mat = regex_apply(pat_mag, 4, field_level.text(), mat)) ){ mlog << Error << "\nVarInfo::set_level_info_grib() - failed to parse level string '" @@ -811,7 +811,7 @@ int parse_set_attr_flag(Dictionary &dict, const char *key) { /////////////////////////////////////////////////////////////////////////////// EnsVarInfo::EnsVarInfo() { - ctrl_info = NULL; + ctrl_info = nullptr; } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d/var_info.h b/src/libcode/vx_data2d/var_info.h index d8f360f204..e990c587a2 100644 --- a/src/libcode/vx_data2d/var_info.h +++ b/src/libcode/vx_data2d/var_info.h @@ -289,7 +289,7 @@ class EnsVarInfo { void set_ctrl(VarInfo *); VarInfo * get_ctrl(int); - // Get VarInfo from first InputInfo if requested VarInfo is NULL + // Get VarInfo from first InputInfo if requested VarInfo is nullptr VarInfo * get_var_info(int index=0); ConcatString get_file(int index=0); int get_file_index(int index=0); diff --git a/src/libcode/vx_data2d_factory/data2d_factory.cc b/src/libcode/vx_data2d_factory/data2d_factory.cc index 2b94b9fc7e..0c292521b5 100644 --- a/src/libcode/vx_data2d_factory/data2d_factory.cc +++ b/src/libcode/vx_data2d_factory/data2d_factory.cc @@ -123,7 +123,7 @@ MetPythonDataFile * p = 0; exit(1); case FileType_None: - // For FileType_None, silently return a NULL pointer + // For FileType_None, silently return a nullptr pointer mtddf = (Met2dDataFile *) 0; break; diff --git a/src/libcode/vx_data2d_grib/data2d_grib.cc b/src/libcode/vx_data2d_grib/data2d_grib.cc index c46cb3531a..98720ea120 100644 --- a/src/libcode/vx_data2d_grib/data2d_grib.cc +++ b/src/libcode/vx_data2d_grib/data2d_grib.cc @@ -78,10 +78,6 @@ mlog << Error << "\nMetGrib1DataFile::MetGrib1DataFile(const MetGrib1DataFile &) exit ( 1 ); -// grib1_init_from_scratch(); -// -// assign(f); - } @@ -97,12 +93,6 @@ mlog << Error << "\nMetGrib1DataFile::operator=(const MetGrib1DataFile &) -> " exit ( 1 ); -// if ( this == &f ) return ( * this ); -// -// assign(f); - -return ( * this ); - } @@ -160,8 +150,6 @@ if ( ! (GF->open(_filename)) ) { mlog << Error << "\nMetGrib1DataFile::open(const char *) -> " << "unable to open grib1 file \"" << _filename << "\"\n\n"; - // exit ( 1 ); - close(); return ( false ); @@ -343,8 +331,6 @@ if ( !GF ) { mlog << Error << "\nMetGrib1DataFile::read_record(const VarInfoGrib &) -> " << "no grib file open!\n\n"; - // exit ( 1 ); - return ( -1 ); } @@ -363,8 +349,6 @@ for (j=0; j<(GF->n_records()); ++j) { mlog << Error << "\nMetGrib1DataFile::read_record(const VarInfoGrib &) -> trouble reading record!\n\n"; - // exit ( 1 ); - return ( -1 ); } @@ -404,8 +388,6 @@ if ( j_match >= 0 ) { mlog << Error << "\nMetGrib1DataFile::read_record(const VarInfoGrib &) -> " << "trouble reading record!\n\n"; - // exit ( 1 ); - return ( -1 ); } diff --git a/src/libcode/vx_data2d_grib/grib_classes.cc b/src/libcode/vx_data2d_grib/grib_classes.cc index b490ea6f0e..cb5107c57e 100644 --- a/src/libcode/vx_data2d_grib/grib_classes.cc +++ b/src/libcode/vx_data2d_grib/grib_classes.cc @@ -73,7 +73,6 @@ if ( !is || !gds || !bms || !bds ) { mlog << Error << "\nGribRecord::GribRecord() -> memory allocation error\n\n"; exit ( 1 ); -// throw GribError(mem_alloc_error, __LINE__, __FILE__, "\n\n GribRecord::GribRecord() -> memory allocation error\n\n"); } @@ -164,7 +163,6 @@ if ( !is || !pds || !gds || !bms || !bds ) { mlog << Error << "\nGribRecord::GribRecord(const GribRecord &) -> memory allocation error\n\n"; exit ( 1 ); -// throw GribError(mem_alloc_error, __LINE__, __FILE__, "\n\n GribRecord::GribRecord(const GribRecord &) -> memory allocation error\n\n"); } @@ -437,11 +435,6 @@ if ( (n < 0) || (gds_flag && (nx > 0) && (ny > 0) && (n >= nx*ny)) ) { mlog << Error << "\nGribRecord::bms_bit(int) -> range check error ... n = " << n << "\n\n"; exit ( 1 ); -// char temp_str[max_temp_str_length]; - -// snprintf(temp_str, sizeof(temp_str), "\n\n GribRecord::bms_bit(int) -> range check error ... n = %d\n\n", n); - -// throw GribError(range_chk_error, __LINE__, __FILE__, temp_str); } @@ -588,7 +581,6 @@ if ( !r ) { mlog << Error << "\nvoid GribFileRep::record_extend(int) -> memory allocation error\n\n"; exit ( 1 ); -// throw GribError(mem_alloc_error, __LINE__, __FILE__, "\n\n void GribFileRep::record_extend(int) -> memory allocation error\n\n"); } @@ -719,7 +711,6 @@ if ( (rep->fd = met_open(filename, O_RDONLY)) < 0 ) { exit ( 1 ); - return( false ); } if ( !(rep->buf = new unsigned char [default_gribfile_buf_size]) ) { @@ -878,8 +869,6 @@ if ( strncmp(g.is->grib_name, "GRIB", 4) != 0 ) { return ( 0 ); - // exit ( 1 ); - } s = char3_to_int(g.is->length); @@ -888,30 +877,12 @@ g.record_lseek_offset = file_pos; if ( s > (rep->buf_size) ) rep->realloc_buf(s); -// if ( s > (rep->buf_size) ) { -// -// mlog << Error << "\nGribFile::read_record(GribRecord &) -> " -// << "found a grib record larger than the buffer size.\n\n" -// << " Increase the buffer to at least " << s << " bytes.\n\n\n"; -// -// exit ( 1 ); - -// char temp_str[max_temp_str_length]; - -// snprintf(temp_str, sizeof(temp_str), "\n\n GribFile::read_record(GribRecord &) -> found a grib record larger than the buffer size.\n\n Increase the buffer to at least %d bytes.\n\n\n", s); - -// throw GribError(record_size_error, __LINE__, __FILE__, temp_str); - -// } - if ( read(8, s - 8) == 0 ) return ( 0 ); if ( strncmp((char *) (rep->buf + (s - 4)), "7777", 4) != 0 ) { mlog << Error << "\nGribFile::read_record(GribRecord &) -> trailing \"7777\" not found in grib record\n\n"; -// exit ( 1 ); -// throw GribError(missing_trail_7777_error, __LINE__, __FILE__, "\n\n GribFile::read_record(GribRecord &) -> trailing \"7777\" not found in grib record\n\n"); return ( 0 ); } @@ -1076,7 +1047,6 @@ if ( (g.bds->flag) & 128 ) { mlog << Error << "\nGribFile::read_record(GribRecord &) -> Spherical Harmonic data not implemented.\n\n"; exit ( 1 ); -// throw GribError(spher_harm_not_impl_error, __LINE__, __FILE__, "\n\n GribFile::read_record(GribRecord &) -> Spherical Harmonic data not implemented.\n\n"); } @@ -1085,7 +1055,6 @@ if ( (g.bds->flag) & 64 ) { mlog << Error << "\nGribFile::read_record(GribRecord &) -> Second order packing not implemented.\n\n"; exit ( 1 ); -// throw GribError(second_ord_pkg_not_impl_error, __LINE__, __FILE__, "\n\n GribFile::read_record(GribRecord &) -> Second order packing not implemented.\n\n"); } @@ -1098,11 +1067,6 @@ if ( g.word_size > 32 ) { << " Binary data word sizes > 32 bits are not implemented.\n\n"; exit ( 1 ); -// char temp_str[max_temp_str_length]; - -// snprintf(temp_str, sizeof(temp_str), "\n\n GribFile::read_record(GribRecord &) -> Binary data word size of %d found\n\n Binary data word sizes > 32 bits are not implemented.\n\n", g.word_size); - -// throw GribError(word_size_error, __LINE__, __FILE__, temp_str); } @@ -1241,7 +1205,6 @@ if ( (n_read = ::read(rep->fd, (char *) rep->buf, rep->buf_size)) < 0 ) { mlog << Error << "\nGribFile::read() -> file read error\n\n"; exit ( 1 ); -// throw GribError(file_read_error, __LINE__, __FILE__, "\n\n GribFile::read() -> file read error\n\n"); } @@ -1265,11 +1228,6 @@ if ( bytes > rep->buf_size ) { << " bytes into a " << (rep->buf_size) << " byte buffer\n\n"; exit ( 1 ); -// char temp_str[max_temp_str_length]; - -// snprintf(temp_str, sizeof(temp_str), "\n\n GribFile::read(int) -> can't read %d bytes into a %d byte buffer\n\n", bytes, rep->buf_size); - -// throw GribError(buffer_size_error, __LINE__, __FILE__, temp_str); } @@ -1278,7 +1236,6 @@ if ( (n_read = ::read(rep->fd, (char *) rep->buf, bytes)) < 0 ) { mlog << Error << "\nGribFile::read() -> file read error\n\n"; exit ( 1 ); -// throw GribError(file_read_error, __LINE__, __FILE__, "\n\n GribFile::read() -> file read error\n\n"); } @@ -1302,7 +1259,6 @@ if ( (buffer_offset + bytes) > (rep->buf_size) ) { mlog << Error << "\nGribFile::read(int, int) -> requested read would overflow buffer\n\n"; exit ( 1 ); -// throw GribError(read_overflow_error, __LINE__, __FILE__, "\n\n GribFile::read(int, int) -> requested read would overflow buffer\n\n"); } @@ -1315,11 +1271,6 @@ if ( n_read != B ) { mlog << Error << "\nGribFile::read() -> file read error ... requested " << B << " bytes, got " << n_read << "\n\n"; exit ( 1 ); -// char temp_str[max_temp_str_length]; - -// snprintf(temp_str, sizeof(temp_str), "\n\n GribFile::read() -> file read error ... requested %d bytes, got %d\n\n", bytes, n_read); - -// throw GribError(file_read_error, __LINE__, __FILE__, temp_str); } @@ -1342,7 +1293,6 @@ if ( (n_read = ::read(rep->fd, c, bytes)) < 0 ) { mlog << Error << "\nGribFile::read() -> file read error\n\n"; exit ( 1 ); -// throw GribError(file_read_error, __LINE__, __FILE__, "\n\n GribFile::read() -> file read error\n\n"); } @@ -1362,7 +1312,7 @@ if ( rep ) { if (--rep->referenceCount == 0) delete rep; - rep = (GribFileRep *) NULL; + rep = (GribFileRep *) nullptr; } @@ -1395,7 +1345,6 @@ if ( (n < 0) || (n >= (rep->n_records)) ) { mlog << Error << "\nGribFile::record_offset(int) -> range check error\n\n"; exit ( 1 ); -// throw GribError(range_chk_error, __LINE__, __FILE__, "\n\n GribFile::record_offset(int) -> range check error\n\n"); } @@ -1416,7 +1365,6 @@ if ( (n < 0) || (n >= (rep->n_records)) ) { mlog << Error << "\nGribFile::gribcode(int) -> range check error\n\n"; exit ( 1 ); -// throw GribError(range_chk_error, __LINE__, __FILE__, "\n\n GribFile::gribcode(int) -> range check error\n\n"); } @@ -2033,7 +1981,6 @@ if ( n_read < 0 ) { mlog << Error << "\nfind_magic_cookie(int) -> trouble reading file\n\n"; exit ( 1 ); -// throw GribError(file_read_error, __LINE__, __FILE__, "\n\n find_magic_cookie(int) -> trouble reading file\n\n"); } @@ -2068,11 +2015,6 @@ switch ( pds->fcst_unit ) { default: mlog << Error << "\ncalc_lead_time(Section1_Header *) -> bad value for fcst unit: " << (pds->fcst_unit) << "\n\n"; exit ( 1 ); -// char temp_str[max_temp_str_length]; - -// snprintf(temp_str, sizeof(temp_str), "\n\n calc_lead_time(Section1_Header *) -> bad value for fcst unit: %d\n\n", pds->fcst_unit); - -// throw GribError(bad_fcst_unit_val_error, __LINE__, __FILE__, temp_str); } diff --git a/src/libcode/vx_data2d_grib/grib_strings.cc b/src/libcode/vx_data2d_grib/grib_strings.cc index c5fee85bd6..b03b614079 100644 --- a/src/libcode/vx_data2d_grib/grib_strings.cc +++ b/src/libcode/vx_data2d_grib/grib_strings.cc @@ -278,7 +278,7 @@ int str_to_grib_code(const char *c, int &pcode, m_strncpy(tmp_str, c, buf_len, method_name); // Retrieve the first token containing the GRIB code info - if((ptr = strtok_r(tmp_str, "()", &save_ptr)) == NULL) { + if((ptr = strtok_r(tmp_str, "()", &save_ptr)) == nullptr) { mlog << Error << "\n" << method_name << "problems parsing the string \"" << c << "\".\n\n"; @@ -289,7 +289,7 @@ int str_to_grib_code(const char *c, int &pcode, gc = str_to_grib_code(ptr, ptv); // Check for probability information - if((ptr = strtok_r(NULL, "()", &save_ptr)) != NULL) { + if((ptr = strtok_r(nullptr, "()", &save_ptr)) != nullptr) { pcode = str_to_prob_info(ptr, pthresh_lo, pthresh_hi, ptv); } // No probability information specified @@ -335,7 +335,7 @@ int str_to_prob_info(const char *c, double &pthresh_lo, double &pthresh_hi, gc = str_to_grib_code(ptr, ptv); // Parse the threshold - ptr = strtok_r(NULL, "<>", &save_ptr); + ptr = strtok_r(nullptr, "<>", &save_ptr); if(n_lt > 0) pthresh_hi = atof(ptr); else pthresh_lo = atof(ptr); } @@ -348,11 +348,11 @@ int str_to_prob_info(const char *c, double &pthresh_lo, double &pthresh_hi, else pthresh_hi = atof(ptr); // Parse the GRIB code - ptr = strtok_r(NULL, "<>", &save_ptr); + ptr = strtok_r(nullptr, "<>", &save_ptr); gc = str_to_grib_code(ptr, ptv); // Parse the second threshold - ptr = strtok_r(NULL, "<>", &save_ptr); + ptr = strtok_r(nullptr, "<>", &save_ptr); if(n_lt > 0) pthresh_hi = atof(ptr); else pthresh_lo = atof(ptr); } diff --git a/src/libcode/vx_data2d_grib/var_info_grib.cc b/src/libcode/vx_data2d_grib/var_info_grib.cc index 5b6a0dbfdb..c414bc94f8 100644 --- a/src/libcode/vx_data2d_grib/var_info_grib.cc +++ b/src/libcode/vx_data2d_grib/var_info_grib.cc @@ -304,7 +304,7 @@ void VarInfoGrib::set_dict(Dictionary & dict) { // check for a probability dictionary setting Dictionary* dict_prob; - if(NULL == (dict_prob = dict.lookup_dictionary(conf_key_prob, false, false))) return; + if(nullptr == (dict_prob = dict.lookup_dictionary(conf_key_prob, false, false))) return; // gather information from the prob dictionary ConcatString prob_name = dict_prob->lookup_string(conf_key_name); diff --git a/src/libcode/vx_data2d_grib2/data2d_grib2.cc b/src/libcode/vx_data2d_grib2/data2d_grib2.cc index 59366a6567..b0cdd10e3d 100644 --- a/src/libcode/vx_data2d_grib2/data2d_grib2.cc +++ b/src/libcode/vx_data2d_grib2/data2d_grib2.cc @@ -77,8 +77,6 @@ MetGrib2DataFile & MetGrib2DataFile::operator=(const MetGrib2DataFile &) { mlog << Error << "\nMetGrib2DataFile::operator=(const MetGrib2DataFile &) -> " << "should never be called!\n\n"; exit(1); - - return(*this); } //////////////////////////////////////////////////////////////////////// @@ -104,7 +102,7 @@ void MetGrib2DataFile::close() { bool MetGrib2DataFile::open(const char * _filename) { Filename = _filename; - if( NULL == (FileGrib2 = met_fopen(Filename.c_str(), "r")) ){ + if( nullptr == (FileGrib2 = met_fopen(Filename.c_str(), "r")) ){ mlog << Error << "\nMetGrib2DataFile::open() -> " << "unable to open input GRIB2 file " << _filename << "\n\n"; exit(1); diff --git a/src/libcode/vx_data2d_grib2/var_info_grib2.cc b/src/libcode/vx_data2d_grib2/var_info_grib2.cc index cd38ed15ab..90f5275bbc 100644 --- a/src/libcode/vx_data2d_grib2/var_info_grib2.cc +++ b/src/libcode/vx_data2d_grib2/var_info_grib2.cc @@ -376,7 +376,7 @@ void VarInfoGrib2::set_dict(Dictionary & dict) { // check for a probability dictionary setting Dictionary* dict_prob; - if(NULL == (dict_prob = dict.lookup_dictionary(conf_key_prob, false, false))){ + if(nullptr == (dict_prob = dict.lookup_dictionary(conf_key_prob, false, false))){ mlog << Error << "\nVarInfoGrib2::set_dict() -> " << "if the field name is set to \"PROB\", then a prob dictionary " << "must be defined\n\n"; diff --git a/src/libcode/vx_data2d_nc_met/data2d_nc_met.cc b/src/libcode/vx_data2d_nc_met/data2d_nc_met.cc index 164734275e..46a39bd502 100644 --- a/src/libcode/vx_data2d_nc_met/data2d_nc_met.cc +++ b/src/libcode/vx_data2d_nc_met/data2d_nc_met.cc @@ -57,8 +57,6 @@ MetNcMetDataFile & MetNcMetDataFile::operator=(const MetNcMetDataFile &) { mlog << Error << "\nMetNcMetDataFile::operator=(const MetNcMetDataFile &) -> " << "should never be called!\n\n"; exit(1); - - return(*this); } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nc_met/met_file.cc b/src/libcode/vx_data2d_nc_met/met_file.cc index b37a05e997..db2d5f9ca3 100644 --- a/src/libcode/vx_data2d_nc_met/met_file.cc +++ b/src/libcode/vx_data2d_nc_met/met_file.cc @@ -413,7 +413,7 @@ double fill_value; double missing_value = get_var_missing_value(var); get_var_fill_value(var, fill_value); -status = get_nc_data(var, &d, (long *)a); +status = get_nc_data(var, &d, a); if ( !status ) { @@ -516,7 +516,7 @@ for (j=0; j<(a.n_elements()); ++j) { ++count; - if ( (var == NULL) || ( (j != var->x_slot) && (j != var->y_slot) ) ) { + if ( (var == nullptr) || ( (j != var->x_slot) && (j != var->y_slot) ) ) { mlog << Error << "\n" << method_name << "star found in bad slot\n\n"; @@ -543,7 +543,7 @@ if ( count != 2 ) { int x_slot_tmp = 0; int y_slot_tmp = 0; - if ( var == NULL || (var->x_slot < 0) || (var->y_slot < 0) ) { + if ( var == nullptr || (var->x_slot < 0) || (var->y_slot < 0) ) { mlog << Error << "\n" << method_name << "bad x|y|z slot\n\n"; @@ -579,10 +579,10 @@ plane.set_size(Nx, Ny); clock_t clock_time; double nc_time; - long dim[dimCount], cur[dimCount]; + LongArray dim, cur; for (int index=0; index " << "should never be called!\n\n"; exit(1); - - return(*this); } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nc_pinterp/pinterp_file.cc b/src/libcode/vx_data2d_nc_pinterp/pinterp_file.cc index 34834c5a8f..b84aee3a9f 100644 --- a/src/libcode/vx_data2d_nc_pinterp/pinterp_file.cc +++ b/src/libcode/vx_data2d_nc_pinterp/pinterp_file.cc @@ -571,7 +571,7 @@ double d = bad_data_double; double missing_value = get_var_missing_value(var); get_var_fill_value(var, fill_value); -status = get_nc_data(var, &d, (long *)a); +status = get_nc_data(var, &d, a); if ( !status ) { @@ -669,7 +669,7 @@ if ( !found ) { // check x_slot and y_slot // -if ( var == NULL || (var->x_slot < 0) || (var->y_slot < 0) ) { +if ( var == nullptr || (var->x_slot < 0) || (var->y_slot < 0) ) { mlog << Error << "\nPinterpFile::data(NcVar *, const LongArray &, DataPlane &, double &) const -> " @@ -743,12 +743,12 @@ plane.set_size(Nx, Ny); // double d[Ny]; -long offsets[dim_count]; -long lengths[dim_count]; +LongArray offsets; +LongArray lengths; for (int k=0; k " << "should never be called!\n\n"; exit(1); - - return(*this); } //////////////////////////////////////////////////////////////////////// @@ -90,7 +88,7 @@ void MetNcCFDataFile::nccf_init_from_scratch() { //////////////////////////////////////////////////////////////////////// NcVarInfo *MetNcCFDataFile::find_first_data_var() { - NcVarInfo *first_data_var = NULL; + NcVarInfo *first_data_var = nullptr; // Store the name of the first data variable for (int i = 0; i < _file->Nvars; ++i) { if (is_nc_unit_time(_file->Var[i].units_att.c_str()) || @@ -161,7 +159,7 @@ bool MetNcCFDataFile::data_plane(VarInfo &vinfo, DataPlane &plane) { // Not sure why we do this - NcVarInfo *data_var = (NcVarInfo *)NULL; + NcVarInfo *data_var = (NcVarInfo *)nullptr; VarInfoNcCF *vinfo_nc = (VarInfoNcCF *)&vinfo; static const string method_name = "MetNcCFDataFile::data_plane(VarInfo &, DataPlane &) -> "; @@ -175,7 +173,7 @@ bool MetNcCFDataFile::data_plane(VarInfo &vinfo, DataPlane &plane) { // Store the name of the first data variable data_var = find_first_data_var(); - if (NULL != data_var) vinfo_nc->set_req_name(data_var->name.c_str()); + if (nullptr != data_var) vinfo_nc->set_req_name(data_var->name.c_str()); } int zdim_slot = bad_data_int; @@ -187,7 +185,7 @@ bool MetNcCFDataFile::data_plane(VarInfo &vinfo, DataPlane &plane) BoolArray is_offset = vinfo_nc->is_offset(); data_var = _file->find_var_name(vinfo_nc->req_name().c_str()); - if (NULL != data_var) { + if (nullptr != data_var) { time_dim_slot = data_var->t_slot; for (int idx=0; idxreq_name() == na_str ) { // Store the name of the first data variable NcVarInfo *data_var = find_first_data_var(); - if (NULL != data_var) vinfo_nc->set_req_name(data_var->name.c_str()); + if (nullptr != data_var) vinfo_nc->set_req_name(data_var->name.c_str()); } LongArray time_offsets = collect_time_offsets(vinfo); @@ -595,7 +593,7 @@ LongArray MetNcCFDataFile::collect_time_offsets(VarInfo &vinfo) { int MetNcCFDataFile::index(VarInfo &vinfo){ - if( NULL == _file->find_var_name( vinfo.name().c_str() ) ) return -1; + if( nullptr == _file->find_var_name( vinfo.name().c_str() ) ) return -1; if( ( vinfo.valid() != 0 && _file->ValidTime[0] != vinfo.valid() ) || ( vinfo.init() != 0 && _file->InitTime != vinfo.init() ) || diff --git a/src/libcode/vx_data2d_nccf/nccf_file.cc b/src/libcode/vx_data2d_nccf/nccf_file.cc index 11592fbaf5..0acfb0ba93 100644 --- a/src/libcode/vx_data2d_nccf/nccf_file.cc +++ b/src/libcode/vx_data2d_nccf/nccf_file.cc @@ -96,7 +96,7 @@ void NcCfFile::init_from_scratch() _ncFile = (NcFile *) 0; _dims = (NcDim **) 0; Var = (NcVarInfo *) 0; - _time_var_info = (NcVarInfo *)NULL; + _time_var_info = (NcVarInfo *)nullptr; _xDim = (NcDim *)0; _yDim = (NcDim *)0; @@ -265,7 +265,7 @@ bool NcCfFile::open(const char * filepath) } } // for j - if (NULL == _time_var_info) { + if (nullptr == _time_var_info) { for (int j=0; jx_slot) && (j != var->y_slot)) ) + if ( var == nullptr || ((j != var->x_slot) && (j != var->y_slot)) ) { mlog << Error << "\n" << method_name << "star found in bad slot\n\n"; @@ -1005,7 +1005,7 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const // check slots - additional logic to satisfy Fortify Null Dereference int x_slot_tmp = 0; int y_slot_tmp = 0; - if (var == NULL || var->x_slot < 0 || var->y_slot < 0) + if (var == nullptr || var->x_slot < 0 || var->y_slot < 0) { mlog << Error << "\n" << method_name << "bad x|y|z slot\n\n"; @@ -1049,11 +1049,11 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const double *d = new double[plane_size]; size_t dim_size; - long offsets[dim_count]; - long lengths[dim_count]; + LongArray offsets; + LongArray lengths; for (int k=0; kgetDim(k).getSize(); if (dim_size < offsets[k]) { mlog << Error << "\n" << method_name @@ -3202,9 +3202,11 @@ LatLonData NcCfFile::get_data_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, bool lat_first = false; if (two_dim_coord) { lat_first = (lat_counts == get_dim_size(lat_var, 0)); - long cur[2], length[2]; - cur[0] = cur[1] = 0; - length[0] = length[1] = 1; + LongArray cur, length; // {0,0}, {1,1} + cur.add(0); + cur.add(0); + length.add(1); + length.add(1); if (lat_first) length[0] = lat_counts; else length[1] = lat_counts; get_nc_data(lat_var,lat_values, length, cur); diff --git a/src/libcode/vx_data2d_nccf/var_info_nccf.cc b/src/libcode/vx_data2d_nccf/var_info_nccf.cc index 184d38b7e1..7050fd3018 100644 --- a/src/libcode/vx_data2d_nccf/var_info_nccf.cc +++ b/src/libcode/vx_data2d_nccf/var_info_nccf.cc @@ -167,7 +167,7 @@ void VarInfoNcCF::set_magic(const ConcatString &nstr, const ConcatString &lstr) set_name(nstr); // If there's no level specification, assume (*, *) - if(strchr(lstr.c_str(), '(') == NULL) { + if(strchr(lstr.c_str(), '(') == nullptr) { Level.set_req_name("*,*"); Level.set_name("*,*"); clear_dimension(); @@ -188,20 +188,20 @@ void VarInfoNcCF::set_magic(const ConcatString &nstr, const ConcatString &lstr) Level.set_name(ptr); // If dimensions are specified, clear the default value - if (strchr(ptr, ',') != NULL) clear_dimension(); + if (strchr(ptr, ',') != nullptr) clear_dimension(); // Parse the dimensions bool as_offset = true; - while ((ptr2 = strtok_r(ptr, ",", &save_ptr)) != NULL) { + while ((ptr2 = strtok_r(ptr, ",", &save_ptr)) != nullptr) { // Check for wildcards - if (strchr(ptr2, '*') != NULL) { add_dimension(vx_data2d_star); + if (strchr(ptr2, '*') != nullptr) { add_dimension(vx_data2d_star); } else { as_offset = (*ptr2 != '@'); if (!as_offset) ptr2++; // Check for a range of levels - if ((ptr3 = strchr(ptr2, '-')) != NULL) { + if ((ptr3 = strchr(ptr2, '-')) != nullptr) { // Check if a range has already been supplied if (Dimension.has(range_flag)) { @@ -223,7 +223,7 @@ void VarInfoNcCF::set_magic(const ConcatString &nstr, const ConcatString &lstr) } } // Check for a range of times - else if ((ptr3 = strchr(ptr2, ':')) != NULL) { + else if ((ptr3 = strchr(ptr2, ':')) != nullptr) { // Check if a range has already been supplied if (Dimension.has(range_flag)) { mlog << Error << "\n" << method_name @@ -236,7 +236,7 @@ void VarInfoNcCF::set_magic(const ConcatString &nstr, const ConcatString &lstr) // Store the dimension of the range and limits *ptr3++ = 0; char *ptr_inc = strchr(ptr3, ':'); - if (ptr_inc != NULL) *ptr_inc++ = 0; + if (ptr_inc != nullptr) *ptr_inc++ = 0; mlog << Debug(7) << method_name << " start: " << ptr2 << ", end: " << ptr3 << "\n"; @@ -256,7 +256,7 @@ void VarInfoNcCF::set_magic(const ConcatString &nstr, const ConcatString &lstr) unixtime time_upper = datestring_end ? timestring_to_unix(ptr3) : (as_offset ? atoi(ptr3) : atof(ptr3)); - if (ptr_inc != NULL) { + if (ptr_inc != nullptr) { if (as_offset) increment = atoi(ptr_inc); else { increment = is_float(ptr_inc) @@ -311,15 +311,15 @@ void VarInfoNcCF::set_magic(const ConcatString &nstr, const ConcatString &lstr) } } - // Set ptr to NULL for next call to strtok - ptr = NULL; + // Set ptr to nullptr for next call to strtok + ptr = nullptr; } // end while } // end else // Check for "/PROB" to indicate a probability forecast - if (strstr(MagicStr.c_str(), "/PROB") != NULL) PFlag = 1; + if (strstr(MagicStr.c_str(), "/PROB") != nullptr) PFlag = 1; // Set the long name tmp_str.format("%s(%s)", req_name().text(), Level.req_name().text()); diff --git a/src/libcode/vx_data2d_python/data2d_python.cc b/src/libcode/vx_data2d_python/data2d_python.cc index 2339be16f2..5e07656e95 100644 --- a/src/libcode/vx_data2d_python/data2d_python.cc +++ b/src/libcode/vx_data2d_python/data2d_python.cc @@ -85,8 +85,6 @@ mlog << Error << "\nMetPythonDataFile::operator=(const MetPythonDataFile &) -> " exit ( 1 ); -return ( * this ); - } diff --git a/src/libcode/vx_data2d_python/python_dataplane.cc b/src/libcode/vx_data2d_python/python_dataplane.cc index fd8524b27f..00217bb7b2 100644 --- a/src/libcode/vx_data2d_python/python_dataplane.cc +++ b/src/libcode/vx_data2d_python/python_dataplane.cc @@ -31,9 +31,9 @@ extern GlobalPython GP; // this needs external linkage static const char * user_ppath = 0; -static const char write_tmp_nc [] = "MET_BASE/wrappers/write_tmp_dataplane.py"; +static const char write_tmp_nc [] = "MET_BASE/python/pyembed/write_tmp_dataplane.py"; -static const char read_tmp_nc [] = "read_tmp_dataplane"; // NO ".py" suffix +static const char read_tmp_nc [] = "pyembed.read_tmp_dataplane"; // NO ".py" suffix static const char tmp_nc_var_name [] = "met_info"; diff --git a/src/libcode/vx_gis/dbf_file.cc b/src/libcode/vx_gis/dbf_file.cc index d28485867c..258ac156b6 100644 --- a/src/libcode/vx_gis/dbf_file.cc +++ b/src/libcode/vx_gis/dbf_file.cc @@ -720,8 +720,6 @@ mlog << Error << "\nDbfFile::operator=(const DbfFile &) -> " exit ( 1 ); -return ( * this ); - } @@ -977,7 +975,6 @@ const size_t buf_size = 65536; unsigned char buf[buf_size]; ConcatString cs; StringArray sa; -int j; // // check range @@ -1021,15 +1018,14 @@ if ( n_read != bytes ) { if ( Header.record_length < buf_size) buf[Header.record_length] = 0; -std::string s = (const char *) buf+1; // skip first byte - // - // parse each subrecord value + // parse each subrecord value, skip first byte // -for (j=0,pos=0; j<(Header.n_subrecs); ++j) { +for (int j=0,pos=1; j<(Header.n_subrecs); ++j) { - cs = s.substr(pos, Header.subrec[j].field_length); + cs << cs_erase; + for (int k=0; k trouble reading file header from shp file \"" << path << "\"\n\n"; - // exit ( 1 ); - close(); return ( false ); @@ -358,13 +354,9 @@ if ( ! is_open() ) { exit ( 1 ); - // return ( false ); - } -// if ( ! is_open() ) return ( -1 ); - const int pos = ::lseek(fd, 0, SEEK_CUR); if ( pos < 0 ) { @@ -398,8 +390,6 @@ if ( ! is_open() ) { exit ( 1 ); - // return ( false ); - } @@ -432,8 +422,6 @@ if ( ! is_open() ) { exit ( 1 ); - // return ( false ); - } int n_read; diff --git a/src/libcode/vx_grid/goes_grid.cc b/src/libcode/vx_grid/goes_grid.cc index 7a58fe4cff..bdbf39ad85 100644 --- a/src/libcode/vx_grid/goes_grid.cc +++ b/src/libcode/vx_grid/goes_grid.cc @@ -30,12 +30,6 @@ using namespace std; #include "goes_grid.h" -//////////////////////////////////////////////////////////////////////// - - -//static double lc_func(double lat, double Cone, const bool is_north); - - //////////////////////////////////////////////////////////////////////// @@ -399,8 +393,6 @@ mlog << Error << "\nGoesImagerGrid::shift_right(int) -> " exit ( 1 ); -return; - } @@ -501,13 +493,13 @@ void GoesImagerData::compute_lat_lon() mlog << Error << method_name << " index=" << index << " too big than " << buf_len << "\n"; else { - if (isnan(lat_rad)) lat = bad_data_float; + if (std::isnan(lat_rad)) lat = bad_data_float; else { lat = lat_rad * deg_per_rad; if (lat > lat_max) {lat_max = lat; idx_lat_max = index; } if (lat < lat_min) {lat_min = lat; idx_lat_min = index; } } - if (isnan(lon_rad)) lon = bad_data_float; + if (std::isnan(lon_rad)) lon = bad_data_float; else { lon = lon_of_projection_origin - (lon_rad * deg_per_rad); if (lon > lon_max) {lon_max = lon; idx_lon_max = index; } diff --git a/src/libcode/vx_grid/lc_grid.cc b/src/libcode/vx_grid/lc_grid.cc index d920379f3c..402f158c88 100644 --- a/src/libcode/vx_grid/lc_grid.cc +++ b/src/libcode/vx_grid/lc_grid.cc @@ -661,8 +661,6 @@ mlog << Error << "\nLambertGrid::shift_right(int) -> " exit ( 1 ); -return; - } diff --git a/src/libcode/vx_grid/merc_grid.cc b/src/libcode/vx_grid/merc_grid.cc index b93e4b61e3..e80b9a3946 100644 --- a/src/libcode/vx_grid/merc_grid.cc +++ b/src/libcode/vx_grid/merc_grid.cc @@ -595,8 +595,6 @@ mlog << Error << "\nMercatorGrid::shift_right(int) -> " exit ( 1 ); -return; - } diff --git a/src/libcode/vx_grid/rot_latlon_grid.cc b/src/libcode/vx_grid/rot_latlon_grid.cc index 35618dcb77..0996d0291d 100644 --- a/src/libcode/vx_grid/rot_latlon_grid.cc +++ b/src/libcode/vx_grid/rot_latlon_grid.cc @@ -383,8 +383,6 @@ mlog << Error << "\nRotatedLatLonGrid::shift_right(int) -> " exit ( 1 ); -return; - } diff --git a/src/libcode/vx_grid/semilatlon_grid.cc b/src/libcode/vx_grid/semilatlon_grid.cc index e6fb9b42a9..a6390e64a2 100644 --- a/src/libcode/vx_grid/semilatlon_grid.cc +++ b/src/libcode/vx_grid/semilatlon_grid.cc @@ -445,8 +445,6 @@ mlog << Error << "\nSemiLatLonGrid::shift_right(int) -> " exit ( 1 ); -return; - } diff --git a/src/libcode/vx_grid/st_grid.cc b/src/libcode/vx_grid/st_grid.cc index 7a8bcbcd21..05a70775f1 100644 --- a/src/libcode/vx_grid/st_grid.cc +++ b/src/libcode/vx_grid/st_grid.cc @@ -31,11 +31,6 @@ using namespace std; //////////////////////////////////////////////////////////////////////// -// static double st_func (double lat, bool is_north_hemisphere); -// static double st_der_func (double lat, bool is_north_hemisphere); -// -// static double st_inv_func (double r, bool is_north_hemisphere); - static void reduce(double & angle); static double stereographic_segment_area(double u0, double v0, double u1, double v1); @@ -594,8 +589,6 @@ mlog << Error << "\nStereographicGrid::shift_right(int) -> " exit ( 1 ); -return; - } @@ -879,34 +872,6 @@ set(data); } -//////////////////////////////////////////////////////////////////////// - -/* -Grid::Grid(const StereoType2Data & data) - -{ - -init_from_scratch(); - -set(data); - -} -*/ - -//////////////////////////////////////////////////////////////////////// - -/* -Grid::Grid(const StereoType3Data & data) - -{ - -init_from_scratch(); - -set(data); - -} -*/ - //////////////////////////////////////////////////////////////////////// @@ -932,54 +897,6 @@ return; } -//////////////////////////////////////////////////////////////////////// - -/* -void Grid::set(const StereoType2Data & data) - -{ - -clear(); - -rep = new StereographicGrid (data); - -if ( !rep ) { - - mlog << Error << "\nGrid::set(const StereoType2Data &) -> memory allocation error\n\n"; - - exit ( 1 ); - -} - -return; - -} -*/ - -//////////////////////////////////////////////////////////////////////// - -/* -void Grid::set(const StereoType3Data & data) - -{ - -clear(); - -rep = new StereographicGrid (data); - -if ( !rep ) { - - mlog << Error << "\nGrid::set(const StereoType3Data &) -> memory allocation error\n\n"; - - exit ( 1 ); - -} - -return; - -} -*/ - //////////////////////////////////////////////////////////////////////// @@ -991,7 +908,6 @@ Grid create_aligned_st(double lat_center, double lon_center, { Grid g_new; -// double alpha; double r_center, r_previous; double Qx, Qy; double L; @@ -1027,8 +943,6 @@ data.ny = ny; // calculate orientation longitude // -// alpha = stereographic_alpha(data.scale_lat, data.r_km, data.d_km); - r_center = st_func(lat_center, is_north); r_previous = st_func(lat_previous, is_north); diff --git a/src/libcode/vx_gsl_prob/gsl_randist.cc b/src/libcode/vx_gsl_prob/gsl_randist.cc index 9dd9804132..db3aa76f68 100644 --- a/src/libcode/vx_gsl_prob/gsl_randist.cc +++ b/src/libcode/vx_gsl_prob/gsl_randist.cc @@ -63,7 +63,7 @@ void rng_set(gsl_rng *&r, const char *rng_name, const char *rng_seed) { T = gsl_rng_default; r = gsl_rng_alloc(T); - if(r == NULL) { + if(r == nullptr) { mlog << Error << "\nrng_set() -> " << "error allocating the random number generator!\n\n"; exit(1); @@ -309,7 +309,7 @@ int get_seed() { unsigned char *u = (unsigned char *) 0, t; // Get the current time - curr_time = time(NULL); + curr_time = time(nullptr); // Swap the first and fourth bytes and the second and third bytes u = (unsigned char *) &(curr_time); diff --git a/src/libcode/vx_nc_obs/nc_obs_util.cc b/src/libcode/vx_nc_obs/nc_obs_util.cc index 8f9fa37082..f33eb40f5c 100644 --- a/src/libcode/vx_nc_obs/nc_obs_util.cc +++ b/src/libcode/vx_nc_obs/nc_obs_util.cc @@ -603,10 +603,17 @@ void NetcdfObsVars::read_header_data(MetPointHeader &hdr_data) { float *hdr_lon_block = new float[buf_size]; float *hdr_elv_block = new float[buf_size]; - long offsets[2] = { 0, 0 }; - long lengths[2] = { 1, 1 }; - long offsets_1D[1] = { 0 }; - long lengths_1D[1] = { 1 }; + LongArray offsets; // = { 0, 0 }; + LongArray lengths; // = { 1, 1 }; + LongArray offsets_1D; // = { 0 }; + LongArray lengths_1D; // = { 1 }; + + offsets.add(0); + offsets.add(0); + lengths.add(1); + lengths.add(1); + offsets_1D.add(0); + lengths_1D.add(1); for(int i_start=0; i_start NC_BUFFER_SIZE_32K) @@ -821,10 +828,14 @@ void NetcdfObsVars::read_header_data(MetPointHeader &hdr_data) { bool NetcdfObsVars::read_obs_data(int buf_size, int offset, int qty_len, float *obs_arr, int *qty_idx_arr, char *obs_qty_buf) { bool result = true; - long offsets[2] = { offset, 0 }; - long lengths[2] = { buf_size, 1 }; + LongArray offsets; // = { offset, 0 }; + LongArray lengths; // = { buf_size, 1 }; const char *method_name = "read_obs_data() -> "; + offsets.add(offset); + offsets.add(0); + lengths.add(buf_size); + lengths.add(1); if (IS_VALID_NC(obs_arr_var)) { // Read the current observation message lengths[1] = OBS_ARRAY_LEN; @@ -921,8 +932,8 @@ void NetcdfObsVars::read_pb_hdr_data(MetPointHeader &hdr_data) { return; } - long offsets[1] = { 0 }; - long lengths[1] = { 1 }; + LongArray offsets; // = { 0 }; + LongArray lengths; // = { 1 }; bool has_hdr_prpt_typ_var = !IS_INVALID_NC(hdr_prpt_typ_var); bool has_hdr_irpt_typ_var = !IS_INVALID_NC(hdr_irpt_typ_var); bool has_hdr_inst_typ_var = !IS_INVALID_NC(hdr_inst_typ_var); @@ -931,6 +942,9 @@ void NetcdfObsVars::read_pb_hdr_data(MetPointHeader &hdr_data) { if (has_hdr_irpt_typ_var) hdr_data.irpt_typ_array.extend(pb_hdr_count); if (has_hdr_inst_typ_var) hdr_data.inst_typ_array.extend(pb_hdr_count); + offsets.add(0); + lengths.add(1); + // Read PB report type int buf_size = ((pb_hdr_count > NC_BUFFER_SIZE_32K) ? NC_BUFFER_SIZE_32K : (pb_hdr_count)); diff --git a/src/libcode/vx_nc_util/nc_utils.cc b/src/libcode/vx_nc_util/nc_utils.cc index 87fcd8bece..443caa837d 100644 --- a/src/libcode/vx_nc_util/nc_utils.cc +++ b/src/libcode/vx_nc_util/nc_utils.cc @@ -1278,7 +1278,7 @@ float get_float_var(NcVar * var, const int index) { //////////////////////////////////////////////////////////////////////// -bool get_nc_data(NcVar *var, int *data, const long *curs) { +bool get_nc_data(NcVar *var, int *data, const LongArray &curs) { bool return_status = get_nc_data_(var, data, bad_data_int, curs); return(return_status); @@ -1307,7 +1307,7 @@ bool get_nc_data(NcVar *var, int *data, const long dim, const long cur) { //////////////////////////////////////////////////////////////////////// -bool get_nc_data(NcVar *var, int *data, const long *dims, const long *curs) { +bool get_nc_data(NcVar *var, int *data, const LongArray &dims, const LongArray &curs) { bool return_status = get_nc_data_(var, data, bad_data_int, dims, curs); return(return_status); @@ -1315,7 +1315,7 @@ bool get_nc_data(NcVar *var, int *data, const long *dims, const long *curs) { //////////////////////////////////////////////////////////////////////// -bool get_nc_data(NcVar *var, short *data, const long *curs) { +bool get_nc_data(NcVar *var, short *data, const LongArray &curs) { bool return_status = get_nc_data_(var, data, (short)bad_data_int, curs); return(return_status); @@ -1323,7 +1323,7 @@ bool get_nc_data(NcVar *var, short *data, const long *curs) { //////////////////////////////////////////////////////////////////////// -bool get_nc_data(NcVar *var, short *data, const long *dims, const long *curs) { +bool get_nc_data(NcVar *var, short *data, const LongArray &dims, const LongArray &curs) { bool return_status = get_nc_data_(var, data, (short)bad_data_int, dims, curs); return(return_status); @@ -1421,13 +1421,13 @@ bool get_nc_data(NcVar *var, float *data) { for (int idx=0; idxgetDim(dim_count-2); NcDim str_dim = var->getDim(dim_count-1); int count = get_dim_size(&count_dim); int str_len = get_dim_size(&str_dim); - lengths[1] = str_len; char str_buffer[str_len+1]; + + offsets.add(0); + offsets.add(0); + lengths.add(1); + lengths.add(str_len); + result = true; for (int idx=0; idx -bool get_nc_data_(netCDF::NcVar *var, T *data, T bad_data, const long *dims, const long *curs) { +bool get_nc_data_(netCDF::NcVar *var, T *data, T bad_data, const LongArray &dims, const LongArray &curs) { bool return_status = false; const char *method_name = "get_nc_data_(T, *dims, *curs) "; @@ -313,9 +313,16 @@ bool get_nc_data_(netCDF::NcVar *var, T *data, T bad_data, const long *dims, con std::vector start; std::vector count; + int idx =0; int data_size = 1; int dimC = get_dim_count(var); - for (int idx = 0 ; idx < dimC; idx++) { + int dim_cnt = dims.n_elements(); + int off_cnt = curs.n_elements(); + int in_cnt = (dim_cnt > off_cnt) ? off_cnt : dim_cnt; + + // madis2nc shares the same dims & curs for 1D, 2D and 3D + if (in_cnt > dimC) in_cnt = dimC; + for (idx = 0 ; idx < in_cnt; idx++) { int dim_size = get_dim_size(var, idx); if ((curs[idx]+dims[idx]) > dim_size) { netCDF::NcDim nc_dim = get_nc_dim(var, idx); @@ -331,6 +338,12 @@ bool get_nc_data_(netCDF::NcVar *var, T *data, T bad_data, const long *dims, con count.push_back((size_t)dims[idx]); data_size *= dims[idx]; } + for (; idx < dimC; idx++) { + int dim_size = get_dim_size(var, idx); + start.push_back((size_t)0); + count.push_back((size_t)dim_size); + data_size *= dim_size; + } for (int idx1=0; idx1 -bool get_nc_data_(netCDF::NcVar *var, T *data, T bad_data, const long *curs) { +bool get_nc_data_(netCDF::NcVar *var, T *data, T bad_data, const LongArray &curs) { bool return_status = false; const char *method_name = "get_nc_data_(*curs) "; if (IS_VALID_NC_P(var)) { int dimC = get_dim_count(var); - long dims[dimC]; + + LongArray dims; for (int idx = 0 ; idx < dimC; idx++) { - dims[idx] = 1; + dims.add(1); } // Retrieve the NetCDF value from the NetCDF variable. @@ -520,7 +534,7 @@ void copy_nc_data_t(netCDF::NcVar *var, float *data, const T *packed_data, << " - " << max_value << "]\n"; } } - mlog << Debug(7) << method_name << "took " + mlog << Debug(7) << method_name << "took " << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; return; } diff --git a/src/libcode/vx_nc_util/nc_var_info.cc b/src/libcode/vx_nc_util/nc_var_info.cc index 68502bf32a..834df56ac8 100644 --- a/src/libcode/vx_nc_util/nc_var_info.cc +++ b/src/libcode/vx_nc_util/nc_var_info.cc @@ -301,7 +301,7 @@ return; NcVarInfo *find_var_info_by_dim_name(NcVarInfo *vars, const string dim_name, const int nvars) { // Find the variable with the same dimension name - NcVarInfo *var = (NcVarInfo *)NULL; + NcVarInfo *var = (NcVarInfo *)nullptr; for (int i = 0; i < nvars; i++) { if (vars[i].name == dim_name) { var = &vars[i]; diff --git a/src/libcode/vx_nc_util/write_netcdf.cc b/src/libcode/vx_nc_util/write_netcdf.cc index c2fd66f493..d492979138 100644 --- a/src/libcode/vx_nc_util/write_netcdf.cc +++ b/src/libcode/vx_nc_util/write_netcdf.cc @@ -45,7 +45,7 @@ void write_netcdf_global(NcFile * f_out, const char *file_name, char hostname_str[max_str_len]; char time_str[max_str_len]; - unix_to_mdyhms(time(NULL), mon, day, yr, hr, min, sec); + unix_to_mdyhms(time(nullptr), mon, day, yr, hr, min, sec); snprintf(time_str, sizeof(time_str), "%.4i%.2i%.2i_%.2i%.2i%.2i", yr, mon, day, hr, min, sec); gethostname(hostname_str, max_str_len); diff --git a/src/libcode/vx_pointdata_python/Makefile.am b/src/libcode/vx_pointdata_python/Makefile.am index 5579afc6fe..4045badcde 100644 --- a/src/libcode/vx_pointdata_python/Makefile.am +++ b/src/libcode/vx_pointdata_python/Makefile.am @@ -12,6 +12,7 @@ include ${top_srcdir}/Make-include noinst_LIBRARIES = libvx_pointdata_python.a libvx_pointdata_python_a_SOURCES = \ + mask_filters.h mask_filters.cc \ pointdata_python.h pointdata_python.cc \ pointdata_from_array.h pointdata_from_array.cc pointdata_from_array.hpp \ python_pointdata.h python_pointdata.cc python_pointdata.hpp diff --git a/src/libcode/vx_pointdata_python/Makefile.in b/src/libcode/vx_pointdata_python/Makefile.in index a68210285d..8d36c6b212 100644 --- a/src/libcode/vx_pointdata_python/Makefile.in +++ b/src/libcode/vx_pointdata_python/Makefile.in @@ -108,6 +108,7 @@ am__v_AR_1 = libvx_pointdata_python_a_AR = $(AR) $(ARFLAGS) libvx_pointdata_python_a_LIBADD = am_libvx_pointdata_python_a_OBJECTS = \ + libvx_pointdata_python_a-mask_filters.$(OBJEXT) \ libvx_pointdata_python_a-pointdata_python.$(OBJEXT) \ libvx_pointdata_python_a-pointdata_from_array.$(OBJEXT) \ libvx_pointdata_python_a-python_pointdata.$(OBJEXT) @@ -129,6 +130,7 @@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = \ + ./$(DEPDIR)/libvx_pointdata_python_a-mask_filters.Po \ ./$(DEPDIR)/libvx_pointdata_python_a-pointdata_from_array.Po \ ./$(DEPDIR)/libvx_pointdata_python_a-pointdata_python.Po \ ./$(DEPDIR)/libvx_pointdata_python_a-python_pointdata.Po @@ -340,6 +342,7 @@ MAINTAINERCLEANFILES = Makefile.in # The library noinst_LIBRARIES = libvx_pointdata_python.a libvx_pointdata_python_a_SOURCES = \ + mask_filters.h mask_filters.cc \ pointdata_python.h pointdata_python.cc \ pointdata_from_array.h pointdata_from_array.cc pointdata_from_array.hpp \ python_pointdata.h python_pointdata.cc python_pointdata.hpp @@ -393,6 +396,7 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_pointdata_python_a-mask_filters.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_pointdata_python_a-pointdata_from_array.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_pointdata_python_a-pointdata_python.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_pointdata_python_a-python_pointdata.Po@am__quote@ # am--include-marker @@ -417,6 +421,20 @@ am--depfiles: $(am__depfiles_remade) @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` +libvx_pointdata_python_a-mask_filters.o: mask_filters.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_pointdata_python_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libvx_pointdata_python_a-mask_filters.o -MD -MP -MF $(DEPDIR)/libvx_pointdata_python_a-mask_filters.Tpo -c -o libvx_pointdata_python_a-mask_filters.o `test -f 'mask_filters.cc' || echo '$(srcdir)/'`mask_filters.cc +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libvx_pointdata_python_a-mask_filters.Tpo $(DEPDIR)/libvx_pointdata_python_a-mask_filters.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='mask_filters.cc' object='libvx_pointdata_python_a-mask_filters.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_pointdata_python_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libvx_pointdata_python_a-mask_filters.o `test -f 'mask_filters.cc' || echo '$(srcdir)/'`mask_filters.cc + +libvx_pointdata_python_a-mask_filters.obj: mask_filters.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_pointdata_python_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libvx_pointdata_python_a-mask_filters.obj -MD -MP -MF $(DEPDIR)/libvx_pointdata_python_a-mask_filters.Tpo -c -o libvx_pointdata_python_a-mask_filters.obj `if test -f 'mask_filters.cc'; then $(CYGPATH_W) 'mask_filters.cc'; else $(CYGPATH_W) '$(srcdir)/mask_filters.cc'; fi` +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libvx_pointdata_python_a-mask_filters.Tpo $(DEPDIR)/libvx_pointdata_python_a-mask_filters.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='mask_filters.cc' object='libvx_pointdata_python_a-mask_filters.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_pointdata_python_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libvx_pointdata_python_a-mask_filters.obj `if test -f 'mask_filters.cc'; then $(CYGPATH_W) 'mask_filters.cc'; else $(CYGPATH_W) '$(srcdir)/mask_filters.cc'; fi` + libvx_pointdata_python_a-pointdata_python.o: pointdata_python.cc @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_pointdata_python_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libvx_pointdata_python_a-pointdata_python.o -MD -MP -MF $(DEPDIR)/libvx_pointdata_python_a-pointdata_python.Tpo -c -o libvx_pointdata_python_a-pointdata_python.o `test -f 'pointdata_python.cc' || echo '$(srcdir)/'`pointdata_python.cc @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libvx_pointdata_python_a-pointdata_python.Tpo $(DEPDIR)/libvx_pointdata_python_a-pointdata_python.Po @@ -584,7 +602,8 @@ clean: clean-am clean-am: clean-generic clean-noinstLIBRARIES mostlyclean-am distclean: distclean-am - -rm -f ./$(DEPDIR)/libvx_pointdata_python_a-pointdata_from_array.Po + -rm -f ./$(DEPDIR)/libvx_pointdata_python_a-mask_filters.Po + -rm -f ./$(DEPDIR)/libvx_pointdata_python_a-pointdata_from_array.Po -rm -f ./$(DEPDIR)/libvx_pointdata_python_a-pointdata_python.Po -rm -f ./$(DEPDIR)/libvx_pointdata_python_a-python_pointdata.Po -rm -f Makefile @@ -632,7 +651,8 @@ install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am - -rm -f ./$(DEPDIR)/libvx_pointdata_python_a-pointdata_from_array.Po + -rm -f ./$(DEPDIR)/libvx_pointdata_python_a-mask_filters.Po + -rm -f ./$(DEPDIR)/libvx_pointdata_python_a-pointdata_from_array.Po -rm -f ./$(DEPDIR)/libvx_pointdata_python_a-pointdata_python.Po -rm -f ./$(DEPDIR)/libvx_pointdata_python_a-python_pointdata.Po -rm -f Makefile diff --git a/src/libcode/vx_pointdata_python/mask_filters.cc b/src/libcode/vx_pointdata_python/mask_filters.cc new file mode 100644 index 0000000000..bc8aa17d13 --- /dev/null +++ b/src/libcode/vx_pointdata_python/mask_filters.cc @@ -0,0 +1,156 @@ +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +// ** Copyright UCAR (c) 1992 - 2023 +// ** University Corporation for Atmospheric Research (UCAR) +// ** National Center for Atmospheric Research (NCAR) +// ** Research Applications Lab (RAL) +// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* + +//////////////////////////////////////////////////////////////////////// + + +using namespace std; + +#include "mask_filters.h" + + +//////////////////////////////////////////////////////////////////////// + + + // + // Code for class MaskFilters + // + + +//////////////////////////////////////////////////////////////////////// + + +MaskFilters::MaskFilters(): + grid_mask(0), + area_mask(0), + poly_mask(0), + sid_mask(0), + typ_mask(0) +{ + clear(); +} + +//////////////////////////////////////////////////////////////////////// + +MaskFilters::MaskFilters(Grid *_grid_mask, MaskPlane *_area_mask, MaskPoly *_poly_mask, + StringArray *_sid_mask, StringArray *_typ_mask) { + clear(); + grid_mask = _grid_mask; + area_mask = _area_mask; + poly_mask = _poly_mask; + sid_mask = _sid_mask; + typ_mask = _typ_mask; +} + +//////////////////////////////////////////////////////////////////////// + +void MaskFilters::clear() { + grid_mask_cnt = 0; + area_mask_cnt = 0; + poly_mask_cnt = 0; + sid_mask_cnt = 0; + typ_mask_cnt = 0; +} + +//////////////////////////////////////////////////////////////////////// + +bool MaskFilters::is_filtered(double lat, double lon) { + bool masked = false; + // Apply the grid mask + if(grid_mask) { + double grid_x, grid_y; + grid_mask->latlon_to_xy(lat, -1.0*lon, grid_x, grid_y); + + if(grid_x < 0 || grid_x >= grid_mask->nx() || + grid_y < 0 || grid_y >= grid_mask->ny()) { + grid_mask_cnt++; + masked = true; + } + + // Apply the area mask + if(area_mask && !masked) { + if(!area_mask->s_is_on(nint(grid_x), nint(grid_y))) { + area_mask_cnt++; + masked = true; + } + } + } + + // Apply the polyline mask + if(poly_mask && !masked) { + if(!poly_mask->latlon_is_inside_dege(lat, lon)) { + poly_mask_cnt++; + masked = true; + } + } + + return masked; +} + +//////////////////////////////////////////////////////////////////////// + +bool MaskFilters::is_filtered_sid(const char *sid) { + bool masked = false; + + // Apply the station ID mask + if(sid_mask) { + if(!sid_mask->has(sid)) { + sid_mask_cnt++; + masked = true; + } + } + + return masked; +} + +//////////////////////////////////////////////////////////////////////// + +bool MaskFilters::is_filtered_typ(const char *msg_typ) { + bool masked = false; + + // Apply the message type mask + if(typ_mask) { + if(!typ_mask->has(msg_typ)) { + typ_mask_cnt++; + masked = true; + } + } + return masked; +} + +//////////////////////////////////////////////////////////////////////// + +void MaskFilters::set_area_mask(MaskPlane *_area_mask) { + area_mask = _area_mask; +} + +//////////////////////////////////////////////////////////////////////// + +void MaskFilters::set_grid_mask(Grid *_grid_mask) { + grid_mask = _grid_mask; +} + +//////////////////////////////////////////////////////////////////////// + +void MaskFilters::set_poly_mask(MaskPoly *_poly_mask) { + poly_mask = _poly_mask; +} + +//////////////////////////////////////////////////////////////////////// + +void MaskFilters::set_sid_mask(StringArray *_sid_mask) { + sid_mask = _sid_mask; +} + +//////////////////////////////////////////////////////////////////////// + +void MaskFilters::set_typ_mask(StringArray *_typ_mask) { + typ_mask = _typ_mask; +} + +//////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_pointdata_python/mask_filters.h b/src/libcode/vx_pointdata_python/mask_filters.h new file mode 100644 index 0000000000..39d7264138 --- /dev/null +++ b/src/libcode/vx_pointdata_python/mask_filters.h @@ -0,0 +1,82 @@ +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +// ** Copyright UCAR (c) 1992 - 2023 +// ** University Corporation for Atmospheric Research (UCAR) +// ** National Center for Atmospheric Research (NCAR) +// ** Research Applications Lab (RAL) +// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* + + +//////////////////////////////////////////////////////////////////////// + + +#ifndef __MASK_FILTERS_H__ +#define __MASK_FILTERS_H__ + + +//////////////////////////////////////////////////////////////////////// + + +#include "grid_base.h" + + +//////////////////////////////////////////////////////////////////////// + +class MaskFilters { + +protected: + + int grid_mask_cnt; + int area_mask_cnt; + int poly_mask_cnt; + int typ_mask_cnt; + int sid_mask_cnt; + + Grid *grid_mask; + MaskPlane *area_mask; + MaskPoly *poly_mask; + StringArray *sid_mask; // station IDs to be excuded + StringArray *typ_mask; // message types to be excuded + +public: + + MaskFilters(); + MaskFilters(Grid *grid_mask, MaskPlane *area_mask, + MaskPoly *poly_mask, StringArray *sid_mask, StringArray *typ_mask); + + void clear(); + + int get_area_mask_cnt(); + int get_grid_mask_cnt(); + int get_poly_mask_cnt(); + int get_sid_mask_cnt(); + int get_typ_mask_cnt(); + + bool is_filtered(double lat, double lon); + bool is_filtered_sid(const char *sid); + bool is_filtered_typ(const char *msg_typ); + + void set_area_mask(MaskPlane *_area_mask); + void set_grid_mask(Grid *_grid_mask); + void set_poly_mask(MaskPoly *_poly_mask); + void set_sid_mask(StringArray *_sid_mask); + void set_typ_mask(StringArray *_typ_mask); + +}; + +//////////////////////////////////////////////////////////////////////// + +inline int MaskFilters::get_area_mask_cnt() { return area_mask_cnt; }; +inline int MaskFilters::get_grid_mask_cnt() { return grid_mask_cnt; }; +inline int MaskFilters::get_poly_mask_cnt() { return poly_mask_cnt; }; +inline int MaskFilters::get_sid_mask_cnt() { return sid_mask_cnt; }; +inline int MaskFilters::get_typ_mask_cnt() { return typ_mask_cnt; }; + + +//////////////////////////////////////////////////////////////////////// + + +#endif /* __MASK_FILTERS_H__ */ + + +//////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_pointdata_python/pointdata_python.cc b/src/libcode/vx_pointdata_python/pointdata_python.cc index d349ec9bb8..fc5cfcc858 100644 --- a/src/libcode/vx_pointdata_python/pointdata_python.cc +++ b/src/libcode/vx_pointdata_python/pointdata_python.cc @@ -172,7 +172,9 @@ file_name = full_path; file_name.chomp(".py"); // remove possible ".py" suffix from script filename -bool status = python_point_data(file_name.c_str(), file_argc, file_argv, use_xarray, met_data); +MaskFilters *filters = 0; +bool status = python_point_data(file_name.c_str(), file_argc, file_argv, + met_data, filters); met_data.get_hdr_cnt(); met_data.get_obs_cnt(); diff --git a/src/libcode/vx_pointdata_python/python_pointdata.cc b/src/libcode/vx_pointdata_python/python_pointdata.cc index d389596fea..cef71163a5 100644 --- a/src/libcode/vx_pointdata_python/python_pointdata.cc +++ b/src/libcode/vx_pointdata_python/python_pointdata.cc @@ -8,7 +8,10 @@ //////////////////////////////////////////////////////////////////////// +#include +#include +#include "observation.h" #include "vx_python3_utils.h" #include "python_pointdata.h" #include "pointdata_from_array.h" @@ -27,22 +30,120 @@ extern GlobalPython GP; // this needs external linkage static const char * user_ppath = 0; -static const char write_tmp_nc [] = "MET_BASE/wrappers/write_tmp_point_nc.py"; +static const char write_tmp_nc [] = "MET_BASE/python/pyembed/write_tmp_point_nc.py"; -static const char read_tmp_nc [] = "read_tmp_point_nc"; // NO ".py" suffix +static const char read_tmp_nc [] = "pyembed.read_tmp_point_nc"; // NO ".py" suffix //////////////////////////////////////////////////////////////////////// static bool tmp_nc_point_obs(const char * script_name, int user_script_argc, - char ** user_script_argv, MetPointDataPython &met_pd_out); + char ** user_script_argv, MetPointDataPython &met_pd_out, + MaskFilters *filters); static bool straight_python_point_data(const char * script_name, int script_argc, char ** script_argv, - const bool use_xarray, MetPointDataPython &met_pd_out); + MetPointDataPython &met_pd_out, + MaskFilters *filters); + +bool process_point_data(PyObject *module_obj, MetPointDataPython &met_pd_out); +bool process_point_data_list(PyObject *python_obj, MetPointDataPython &met_pd_out, + MaskFilters *filters); + +//////////////////////////////////////////////////////////////////////// + +void check_header_data(MetPointHeader *header_data, const char *caller) { + + if (header_data->typ_idx_array.n() == 0) { + mlog << Error << "\n" << caller + << "The hdr_typ is empty. Please check if python input is processed properly\n\n"; + exit (1); + } + if (header_data->sid_idx_array.n() == 0) { + mlog << Error << "\n" << caller + << "The hdr_sid is empty. Please check if python input is processed properly\n\n"; + exit (1); + } + if (header_data->vld_idx_array.n() == 0) { + mlog << Error << "\n" << caller + << "The hdr_vld is empty. Please check if python input is processed properly\n\n"; + exit (1); + } + if (header_data->lat_array.n() == 0) { + mlog << Error << "\n" << caller + << "The hdr_lat is empty. Please check if python input is processed properly\n\n"; + exit (1); + } + if (header_data->lon_array.n() == 0) { + mlog << Error << "\n" << caller + << "The hdr_lon is empty. Please check if python input is processed properly\n\n"; + exit (1); + } + if (header_data->elv_array.n() == 0) { + mlog << Error << "\n" << caller + << "The hdr_elv is empty. Please check if python input is processed properly\n\n"; + exit (1); + } + + if (header_data->typ_array.n() == 0) { + mlog << Error << "\n" << caller + << "The hdr_typ_table is empty. Please check if python input is processed properly\n\n"; + exit (1); + } + if (header_data->sid_array.n() == 0) { + mlog << Error << "\n" << caller + << "The hdr_sid_table is empty. Please check if python input is processed properly\n\n"; + exit (1); + } + if (header_data->vld_array.n() == 0) { + mlog << Error << "\n" << caller + << "The hdr_vld_table is empty. Please check if python input is processed properly\n\n"; + exit (1); + } +} //////////////////////////////////////////////////////////////////////// +void check_obs_data(MetPointObsData *obs_data, bool use_var_id, const char *caller) { + + if (obs_data->qty_names.n() == 0) { + mlog << Error << "\n" << caller + << "The obs_qty_table is empty. Please check if python input is processed properly\n\n"; + exit (1); + } + if (use_var_id && obs_data->var_names.n() == 0) { + mlog << Error << "\n" << caller + << "The obs_var_table is empty. Please check if python input is processed properly\n\n"; + exit (1); + } + +} + +//////////////////////////////////////////////////////////////////////// + + +PyObject *get_python_object(PyObject *module_obj, const char *python_var_name) +{ + + // + // get the namespace for the module (as a dictionary) + // + + PyObject *module_dict_obj = PyModule_GetDict (module_obj); + + // + // get handles to the objects of interest from the module_dict + // + + PyObject *python_met_point_data = PyDict_GetItemString (module_dict_obj, python_var_name); + + return python_met_point_data; +} + + +//////////////////////////////////////////////////////////////////////// + + static void set_str_array_from_python(PyObject *python_data, const char *python_key, StringArray *out) { const char *method_name = "set_met_array_from_python(StringArray *) -> "; PyObject *str_array_obj = PyDict_GetItemString (python_data, python_key); @@ -63,7 +164,7 @@ static void set_str_array_from_python(PyObject *python_data, const char *python_ bool python_point_data(const char * script_name, int script_argc, char ** script_argv, - const bool use_xarray, MetPointDataPython &met_pd_out) + MetPointDataPython &met_pd_out, MaskFilters *filters) { @@ -71,16 +172,15 @@ bool status = false; if ( user_ppath == 0 ) user_ppath = getenv(user_python_path_env); -if ( user_ppath != 0 ) { - // do_tmp_nc = true; +if ( user_ppath != 0 ) { // do_tmp_nc = true; status = tmp_nc_point_obs(script_name, script_argc, script_argv, - met_pd_out); + met_pd_out, filters); } else { status = straight_python_point_data(script_name, script_argc, script_argv, - use_xarray, met_pd_out); + met_pd_out, filters); } return ( status ); @@ -89,29 +189,21 @@ return ( status ); //////////////////////////////////////////////////////////////////////// -bool process_python_point_data(PyObject *module_obj, MetPointDataPython &met_pd_out) +bool process_point_data(PyObject *python_met_point_data, + MetPointDataPython &met_pd_out) + { int int_value; -PyObject *module_dict_obj = 0; PyObject *python_value = 0; -PyObject *python_met_point_data = 0; ConcatString cs, user_dir, user_base; -const char *method_name = "process_python_point_data -> "; -const char *method_name_s = "process_python_point_data()"; - - // - // get the namespace for the module (as a dictionary) - // - -module_dict_obj = PyModule_GetDict (module_obj); +const char *method_name = "process_point_data -> "; +const char *method_name_s = "process_point_data()"; // // get handles to the objects of interest from the module_dict // -python_met_point_data = PyDict_GetItemString (module_dict_obj, python_key_point_data); - python_value = PyDict_GetItemString (python_met_point_data, python_use_var_id); bool use_var_id = pyobject_as_bool(python_value); @@ -142,8 +234,7 @@ met_pd_out.allocate(int_value); MetPointObsData *obs_data = met_pd_out.get_point_obs_data(); MetPointHeader *header_data = met_pd_out.get_header_data(); - - // look up the data array variable name from the dictionary + // look up the data array variable name from the dictionary set_array_from_python(python_met_point_data, numpy_array_hdr_typ, &header_data->typ_idx_array); set_array_from_python(python_met_point_data, numpy_array_hdr_sid, &header_data->sid_idx_array); @@ -151,59 +242,17 @@ MetPointHeader *header_data = met_pd_out.get_header_data(); set_array_from_python(python_met_point_data, numpy_array_hdr_lat, &header_data->lat_array); set_array_from_python(python_met_point_data, numpy_array_hdr_lon, &header_data->lon_array); set_array_from_python(python_met_point_data, numpy_array_hdr_elv, &header_data->elv_array); - if (header_data->typ_idx_array.n() == 0) { - mlog << Error << "\n" << method_name - << "The hdr_typ is empty. Please check if python input is processed properly\n\n"; - exit (1); - } - if (header_data->sid_idx_array.n() == 0) { - mlog << Error << "\n" << method_name - << "The hdr_sid is empty. Please check if python input is processed properly\n\n"; - exit (1); - } - if (header_data->vld_idx_array.n() == 0) { - mlog << Error << "\n" << method_name - << "The hdr_vld is empty. Please check if python input is processed properly\n\n"; - exit (1); - } - if (header_data->lat_array.n() == 0) { - mlog << Error << "\n" << method_name - << "The hdr_lat is empty. Please check if python input is processed properly\n\n"; - exit (1); - } - if (header_data->lon_array.n() == 0) { - mlog << Error << "\n" << method_name - << "The hdr_lon is empty. Please check if python input is processed properly\n\n"; - exit (1); - } - if (header_data->elv_array.n() == 0) { - mlog << Error << "\n" << method_name - << "The hdr_elv is empty. Please check if python input is processed properly\n\n"; - exit (1); - } set_str_array_from_python(python_met_point_data, numpy_array_hdr_typ_table, &header_data->typ_array); set_str_array_from_python(python_met_point_data, numpy_array_hdr_sid_table, &header_data->sid_array); set_str_array_from_python(python_met_point_data, numpy_array_hdr_vld_table, &header_data->vld_array); - if (header_data->typ_array.n() == 0) { - mlog << Error << "\n" << method_name - << "The hdr_typ_table is empty. Please check if python input is processed properly\n\n"; - exit (1); - } - if (header_data->sid_array.n() == 0) { - mlog << Error << "\n" << method_name - << "The hdr_sid_table is empty. Please check if python input is processed properly\n\n"; - exit (1); - } - if (header_data->vld_array.n() == 0) { - mlog << Error << "\n" << method_name - << "The hdr_vld_table is empty. Please check if python input is processed properly\n\n"; - exit (1); - } + set_array_from_python(python_met_point_data, numpy_array_prpt_typ_table, &header_data->prpt_typ_array, false); set_array_from_python(python_met_point_data, numpy_array_irpt_typ_table, &header_data->irpt_typ_array, false); set_array_from_python(python_met_point_data, numpy_array_inst_typ_table, &header_data->inst_typ_array, false); + check_header_data(header_data, method_name); + set_array_from_python(python_met_point_data, numpy_array_obs_qty, obs_data->obs_qids); set_array_from_python(python_met_point_data, numpy_array_obs_hid, obs_data->obs_hids); set_array_from_python(python_met_point_data, numpy_array_obs_vid, obs_data->obs_ids); @@ -213,24 +262,173 @@ MetPointHeader *header_data = met_pd_out.get_header_data(); set_str_array_from_python(python_met_point_data, numpy_array_obs_qty_table, &obs_data->qty_names); set_str_array_from_python(python_met_point_data, numpy_array_obs_var_table, &obs_data->var_names); - if (obs_data->qty_names.n() == 0) { + + check_obs_data(obs_data, use_var_id, method_name); + + if(mlog.verbosity_level()>=point_data_debug_level) { + print_met_data(met_pd_out.get_point_obs_data(), + met_pd_out.get_header_data(), method_name_s); + } + + // + // done + // + +return ( true ); + +} + + +//////////////////////////////////////////////////////////////////////// + +bool process_point_data_list(PyObject *python_point_data, MetPointDataPython &met_pd_out, + MaskFilters *filters) +{ + + bool use_var_id; + Observation obs; + time_t vld_time; + int hid, vid, qid, sid, typ_idx, vld_idx; + double lat, lon, elv, hgt, level, obs_value; + double prev_lat, prev_lon, prev_elv, prev_vld, prev_typ, prev_sid; + Python3_List list(python_point_data); + const char *method_name = "process_point_data_list -> "; + const char *method_name_s = "process_point_data_list()"; + + int obs_cnt = list.size(); + if (obs_cnt == 0) { mlog << Error << "\n" << method_name - << "The obs_qty_table is empty. Please check if python input is processed properly\n\n"; + << "The point observation data is empty. Please check if python input is processed properly\n\n"; exit (1); } - if (use_var_id && obs_data->var_names.n() == 0) { + + // + // initialize use_var_id to false + // + + use_var_id = false; + hid = -1; // starts from -1 to be 0 for the first header + prev_lat = prev_lon = prev_elv = bad_data_double; + prev_vld = prev_typ = prev_sid = bad_data_double; + + met_pd_out.allocate(obs_cnt); + MetPointHeader *header_data = met_pd_out.get_header_data(); + MetPointObsData *obs_data = met_pd_out.get_point_obs_data(); + + for (int j=0; jis_filtered(lat, lon)) continue; + if (filters->is_filtered_sid(obs.getStationId().c_str())) continue; + if (filters->is_filtered_typ(obs.getHeaderType().c_str())) continue; + } + + // get message type index + str_data = obs.getHeaderType(); + if ( !header_data->typ_array.has(str_data, typ_idx) ) { + header_data->typ_array.add(str_data); + header_data->typ_array.has(str_data, typ_idx); + } + + // get station ID index + str_data = obs.getStationId(); + if ( !header_data->sid_array.has(str_data, sid) ) { + header_data->sid_array.add(str_data); + header_data->sid_array.has(str_data, sid); + } + + // get valid time index + vld_time = obs.getValidTime(); + if ( !header_data->vld_num_array.has(vld_time, vld_idx) ) { + header_data->vld_num_array.add(vld_time); + header_data->vld_num_array.has(vld_time, vld_idx); + } + + if (!is_eq(prev_lat, lat) || !is_eq(prev_lon, lon) || !is_eq(prev_elv, elv) + || !is_eq(prev_sid, sid) || !is_eq(prev_typ, typ_idx) + || !is_eq(prev_vld, vld_idx)) { + header_data->lat_array.add(lat); + header_data->lon_array.add(lon); + header_data->elv_array.add(elv); + header_data->sid_idx_array.add(sid); + header_data->typ_idx_array.add(typ_idx); + header_data->vld_idx_array.add(vld_idx); + header_data->vld_array.add(obs.getValidTimeString()); + + prev_lat = lat; + prev_lon = lon; + prev_elv = elv; + prev_sid = sid; + prev_typ = typ_idx; + prev_vld = vld_idx; + hid++; + } + obs_data->obs_hids[j] = hid; + + // get the observation variable code + str_data = obs.getVarName(); + if ( use_var_id || !is_number(str_data.c_str()) ) { + use_var_id = true; + // update the list of variable names + if ( !obs_data->var_names.has(str_data, vid) ) { + obs_data->var_names.add(str_data); + obs_data->var_names.has(str_data, vid); + } + } + else { + vid = atoi(obs.getVarName().c_str()); + } + obs_data->obs_ids[j] = vid; + obs.setVarCode(vid); + + // get the quality flag index + str_data = obs.getQualityFlag(); + if ( !obs_data->qty_names.has(str_data, qid) ) { + obs_data->qty_names.add(str_data); + obs_data->qty_names.has(str_data, qid); + } + obs_data->obs_qids[j] = qid; + obs_data->obs_lvls[j] = obs.getPressureLevel(); + obs_data->obs_hgts[j] = obs.getHeight(); + obs_data->obs_vals[j] = obs.getValue(); + + } // for j + + met_pd_out.set_use_var_id(use_var_id); + mlog << Debug(9) << method_name << "use_var_id: \"" << use_var_id + << "\" from python. is_using_var_id(): " << met_pd_out.is_using_var_id() << "\n"; + + if (hid <= 0) { mlog << Error << "\n" << method_name - << "The obs_var_table is empty. Please check if python input is processed properly\n\n"; + << "The header is empty. Please check the python script and input\n\n"; exit (1); } + met_pd_out.set_hdr_cnt(hid + 1); - if(mlog.verbosity_level()>=point_data_debug_level) print_met_data(obs_data, header_data, method_name_s); + check_obs_data(obs_data, use_var_id, method_name); + check_header_data(header_data, method_name); + + if(mlog.verbosity_level()>=point_data_debug_level) { + print_met_data(met_pd_out.get_point_obs_data(), + met_pd_out.get_header_data(), method_name_s); + } // // done // -return ( true ); + return ( true ); } @@ -239,17 +437,14 @@ return ( true ); bool straight_python_point_data(const char * script_name, int script_argc, char ** script_argv, - const bool use_xarray, MetPointDataPython &met_pd_out) + MetPointDataPython &met_pd_out, MaskFilters *filters) { int int_value; PyObject *module_obj = 0; -PyObject *module_dict_obj = 0; PyObject *python_value = 0; -PyObject *python_met_point_data = 0; ConcatString cs, user_dir, user_base; const char *method_name = "straight_python_point_data -> "; -const char *method_name_s = "straight_python_point_data()"; cs = script_name; @@ -349,9 +544,24 @@ if ( ! module_obj ) { } +bool result = false; +PyObject *met_point_data = get_python_object(module_obj, python_key_point_data); +if ( met_point_data && met_point_data != &_Py_NoneStruct) { + result = process_point_data(met_point_data, met_pd_out); +} +else { + PyObject *point_data = get_python_object(module_obj, python_key_point_data_list); + if ( point_data && point_data != &_Py_NoneStruct) + result = process_point_data_list(point_data, met_pd_out, filters); + else { + mlog << Warning << "\n" << method_name + << "no \"" << python_key_point_data << "\" and \"" + << python_key_point_data_list << "\" from " + << script_name << "\"\n\n"; + } +} -return process_python_point_data(module_obj, met_pd_out); - +return result; } @@ -359,7 +569,8 @@ return process_python_point_data(module_obj, met_pd_out); bool tmp_nc_point_obs(const char * user_script_name, int user_script_argc, - char ** user_script_argv, MetPointDataPython &met_pd_out) + char ** user_script_argv, MetPointDataPython &met_pd_out, + MaskFilters *filters) { @@ -402,6 +613,10 @@ command << cs_erase << replace_path(python_dir) << "\")"; run_python_string(command.text()); +mlog << Debug(0) << method_name << " -> added python path " + << python_dir << ") to python interpreter\n"; + +//setenv(env_PYTHONPATH, python_dir.c_str(),1); mlog << Debug(3) << "Running user-specified python instance (MET_PYTHON_EXE=" << user_ppath << ") to run user's python script (" << user_script_name << ").\n"; @@ -509,8 +724,14 @@ if ( ! module_obj ) { // -process_python_point_data(module_obj, met_pd_out); - +PyObject *met_point_data = get_python_object(module_obj, python_key_point_data); +if ( met_point_data ) { + process_point_data(met_point_data, met_pd_out); +} +else { + PyObject *point_data = get_python_object(module_obj, python_key_point_data_list); + process_point_data_list(point_data, met_pd_out, filters); +} // // cleanup diff --git a/src/libcode/vx_pointdata_python/python_pointdata.h b/src/libcode/vx_pointdata_python/python_pointdata.h index 5bfb87ca2e..284421b17c 100644 --- a/src/libcode/vx_pointdata_python/python_pointdata.h +++ b/src/libcode/vx_pointdata_python/python_pointdata.h @@ -16,6 +16,7 @@ //////////////////////////////////////////////////////////////////////// +#include "mask_filters.h" #include "met_point_data.h" @@ -29,6 +30,7 @@ extern "C" { //////////////////////////////////////////////////////////////////////// static const char python_key_point_data [] = "met_point_data"; +static const char python_key_point_data_list[] = "point_data"; static const char python_key_nhdr [] = "nhdr"; //static const char python_key_npbhdr [] = "npbhdr"; @@ -62,8 +64,10 @@ static const int point_data_debug_level = 10; //////////////////////////////////////////////////////////////////////// -extern bool python_point_data(const char * script_name, int script_argc, char ** script_argv, - const bool use_xarray, MetPointDataPython &met_pd_out); +extern bool python_point_data(const char * script_name, int script_argc, + char ** script_argv, MetPointDataPython &met_pd_out, + MaskFilters *filters); + //extern bool python_point_data(const char *python_command, const bool use_xarray, // MetPointData & po_out); extern void print_met_data(MetPointObsData *obs_data, MetPointHeader *header_data, diff --git a/src/libcode/vx_ps/table_helper.cc b/src/libcode/vx_ps/table_helper.cc index 599ef948fc..f372db824a 100644 --- a/src/libcode/vx_ps/table_helper.cc +++ b/src/libcode/vx_ps/table_helper.cc @@ -75,10 +75,6 @@ mlog << Error exit ( 1 ); -// init_from_scratch(); -// -// assign(t); - } @@ -94,12 +90,6 @@ mlog << Error exit ( 1 ); -// if ( this == &t ) returnt ( * this ); -// -// assign(t); - -return ( * this ); - } @@ -164,7 +154,6 @@ if ( (Nrows <= 0) || (Ncols <= 0) ) { exit ( 1 ); - } int j; @@ -263,7 +252,6 @@ if ( !Plot ) { } - return; } diff --git a/src/libcode/vx_ps/vx_ps.cc b/src/libcode/vx_ps/vx_ps.cc index 7c6afa8eb9..fb34dcc574 100644 --- a/src/libcode/vx_ps/vx_ps.cc +++ b/src/libcode/vx_ps/vx_ps.cc @@ -1943,8 +1943,6 @@ mlog << Error << "\ndefault_media() -> " exit ( 1 ); -return ( no_document_media ); - } diff --git a/src/libcode/vx_pxm/pbm.cc b/src/libcode/vx_pxm/pbm.cc index 14cd0c1bde..f557af72e5 100644 --- a/src/libcode/vx_pxm/pbm.cc +++ b/src/libcode/vx_pxm/pbm.cc @@ -171,8 +171,6 @@ if ( (r < 0) || (r >= Nrows) || (c < 0) || (c >= Ncols) ) { exit ( 1 ); - // return ( 0 ); - } int j, n; @@ -247,8 +245,6 @@ if ( (r < 0) || (r >= Nrows) || (c < 0) || (c >= Ncols) ) { exit ( 1 ); - // return; - } value = (int) (color.red()); @@ -647,9 +643,6 @@ mlog << Error << "\nPbm::rotate(int) -> not yet implemented ... sorry\n\n"; exit ( 1 ); - -return; - } @@ -665,9 +658,6 @@ mlog << Error << "\nPbm::autocrop() -> not yet implemented ... sorry\n\n"; exit ( 1 ); - -return; - } diff --git a/src/libcode/vx_pxm/pcm.cc b/src/libcode/vx_pxm/pcm.cc index 4dde04bd24..dc682d2af3 100644 --- a/src/libcode/vx_pxm/pcm.cc +++ b/src/libcode/vx_pxm/pcm.cc @@ -904,10 +904,6 @@ mlog << Error << "\nvoid Pcm::autocrop() -> not yet implemented\n\n"; exit ( 1 ); - - -return; - } diff --git a/src/libcode/vx_pxm/ppm.cc b/src/libcode/vx_pxm/ppm.cc index 25f9c073bc..54dd5bd1e7 100644 --- a/src/libcode/vx_pxm/ppm.cc +++ b/src/libcode/vx_pxm/ppm.cc @@ -727,9 +727,6 @@ mlog << Error << "\nvoid Ppm::autocrop() -> not yet implemented\n\n"; exit ( 1 ); - -return; - } diff --git a/src/libcode/vx_python3_utils/global_python.h b/src/libcode/vx_python3_utils/global_python.h index c0a34d7760..11d555bac6 100644 --- a/src/libcode/vx_python3_utils/global_python.h +++ b/src/libcode/vx_python3_utils/global_python.h @@ -63,7 +63,7 @@ if ( ! is_initialized ) { mlog << Debug(3) << "Initializing MET compile time python instance: " << MET_PYTHON_BIN_EXE << "\n"; - wchar_t *python_path = Py_DecodeLocale(MET_PYTHON_BIN_EXE, NULL); + wchar_t *python_path = Py_DecodeLocale(MET_PYTHON_BIN_EXE, nullptr); Py_SetProgramName(python_path); Py_Initialize(); diff --git a/src/libcode/vx_python3_utils/python3_dict.cc b/src/libcode/vx_python3_utils/python3_dict.cc index 4d9da509de..9fca8e01c2 100644 --- a/src/libcode/vx_python3_utils/python3_dict.cc +++ b/src/libcode/vx_python3_utils/python3_dict.cc @@ -89,8 +89,6 @@ mlog << Error << "\nPython3_Dict::Python3_Dict(const Python3_Dict &) -> " exit ( 1 ); -return; - } @@ -106,8 +104,6 @@ mlog << Error << "\nPython3_Dict(const Python3_Dict &) -> " exit ( 1 ); -return ( * this ); - } @@ -611,13 +607,6 @@ mlog << Error << "\nPython3_Dict::dump_dict_value() -> " exit ( 1 ); - - // - // done - // - -return; - } diff --git a/src/libcode/vx_python3_utils/python3_list.cc b/src/libcode/vx_python3_utils/python3_list.cc index 16aa403dc6..8db256c94b 100644 --- a/src/libcode/vx_python3_utils/python3_list.cc +++ b/src/libcode/vx_python3_utils/python3_list.cc @@ -81,8 +81,6 @@ mlog << Error << "\nPython3_List::Python3_List(const Python3_List &) -> " exit ( 1 ); -return; - } @@ -98,8 +96,6 @@ mlog << Error << "\nPython3_List(const Python3_List &) -> " exit ( 1 ); -return ( * this ); - } diff --git a/src/libcode/vx_python3_utils/python3_script.cc b/src/libcode/vx_python3_utils/python3_script.cc index e34fb3ed7a..34fc038d71 100644 --- a/src/libcode/vx_python3_utils/python3_script.cc +++ b/src/libcode/vx_python3_utils/python3_script.cc @@ -26,7 +26,7 @@ using namespace std; static const char sq = '\''; // single quote -static const char read_tmp_ascii_py [] = "MET_BASE/wrappers/read_tmp_ascii.py"; +static const char read_tmp_ascii_py [] = "MET_BASE/python/pyembed/read_tmp_ascii.py"; //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_python3_utils/python3_script.h b/src/libcode/vx_python3_utils/python3_script.h index 1174385c15..10b648b853 100644 --- a/src/libcode/vx_python3_utils/python3_script.h +++ b/src/libcode/vx_python3_utils/python3_script.h @@ -64,7 +64,7 @@ class Python3_Script { ConcatString filename() const; - PyObject * module(); + PyObject * get_module(); PyObject * dict(); PyObject * module_ascii(); PyObject * dict_ascii(); @@ -92,7 +92,7 @@ class Python3_Script { //////////////////////////////////////////////////////////////////////// -inline PyObject * Python3_Script::module() { return ( Module ); } +inline PyObject * Python3_Script::get_module() { return ( Module ); } inline PyObject * Python3_Script::dict() { return ( Dict ); } diff --git a/src/libcode/vx_python3_utils/python3_util.cc b/src/libcode/vx_python3_utils/python3_util.cc index f454c837b1..7c648edc88 100644 --- a/src/libcode/vx_python3_utils/python3_util.cc +++ b/src/libcode/vx_python3_utils/python3_util.cc @@ -34,7 +34,7 @@ size_t len = 0; FILE * f = 0; -if ( (f = open_memstream(&buf, &len)) == NULL ) { +if ( (f = open_memstream(&buf, &len)) == nullptr ) { mlog << Error << "\noperator<<(ostream &, PyObject *) -> " << "unable to open memory stream\n\n"; diff --git a/src/libcode/vx_python3_utils/wchar_argv.cc b/src/libcode/vx_python3_utils/wchar_argv.cc index 7b21e9934e..158ec5672f 100644 --- a/src/libcode/vx_python3_utils/wchar_argv.cc +++ b/src/libcode/vx_python3_utils/wchar_argv.cc @@ -71,8 +71,6 @@ mlog << Error << "\nWchar_Argv::Wchar_Argv(const Wchar_Argv &) -> " exit ( 1 ); -return; - } @@ -88,8 +86,6 @@ mlog << Error << "\nWchar_Argv::operator=(const Wchar_Argv &) -> " exit ( 1 ); -return ( * this ); - } diff --git a/src/libcode/vx_render/flate_filter.cc b/src/libcode/vx_render/flate_filter.cc index 4ef31312d6..a1c44f5338 100644 --- a/src/libcode/vx_render/flate_filter.cc +++ b/src/libcode/vx_render/flate_filter.cc @@ -59,9 +59,9 @@ if ( !inbuf || !outbuf || !s ) { memset(s, 0, sizeof(*s)); -s->zalloc = Z_NULL; -s->zfree = Z_NULL; -s->opaque = Z_NULL; +s->zalloc = NULL; +s->zfree = NULL; +s->opaque = NULL; if ( deflateInit(s, Z_BEST_COMPRESSION) != Z_OK ) { diff --git a/src/libcode/vx_render/render_pbm.cc b/src/libcode/vx_render/render_pbm.cc index c15cde9e94..a7bbcac9fc 100644 --- a/src/libcode/vx_render/render_pbm.cc +++ b/src/libcode/vx_render/render_pbm.cc @@ -172,7 +172,7 @@ pad = 8 - nx%8; if ( pad == 8 ) pad = 0; -if ( out == NULL) { return; } +if ( out == nullptr) { return; } for (r=0; r= 0; --j) { plot.file() << "\n" << ">>\n\nimage\n\n"; -if ( out == NULL) { return; } +if ( out == nullptr) { return; } for (r=0; r= 0; --j) { plot.file() << "\n" << ">>\n\nimage\n\n"; -if ( out == NULL ) { return; } +if ( out == nullptr ) { return; } for (r=0; r= 0; --j) { plot.file() << "\n>>\n\nimage\n\n"; -if ( out == NULL) { return; } +if ( out == nullptr) { return; } for (r=0; r<(image.nrows()); ++r) { diff --git a/src/libcode/vx_seeps/seeps.cc b/src/libcode/vx_seeps/seeps.cc index df5f47e757..ff94441ea1 100644 --- a/src/libcode/vx_seeps/seeps.cc +++ b/src/libcode/vx_seeps/seeps.cc @@ -62,7 +62,7 @@ void release_seeps_climo() { SeepsClimoGrid *get_seeps_climo_grid(int month, int hour) { bool not_found = true; - SeepsClimoGrid *seeps_climo_grid = NULL; + SeepsClimoGrid *seeps_climo_grid = nullptr; for (map::iterator it=seeps_climo_grid_map_00.begin(); it!=seeps_climo_grid_map_00.end(); ++it) { if (it->first == month) { @@ -276,11 +276,11 @@ SeepsClimoRecord *SeepsClimo::create_climo_record( //////////////////////////////////////////////////////////////////////// SeepsRecord *SeepsClimo::get_record(int sid, int month, int hour) { - SeepsRecord *record = NULL; + SeepsRecord *record = nullptr; const char *method_name = "SeepsClimo::get_record() -> "; if (seeps_ready) { - SeepsClimoRecord *climo_record = NULL; + SeepsClimoRecord *climo_record = nullptr; map::iterator it; if (hour < 6 || hour >= 18) { it = seeps_score_00_map.find(sid); @@ -290,7 +290,7 @@ SeepsRecord *SeepsClimo::get_record(int sid, int month, int hour) { it = seeps_score_12_map.find(sid); if (it != seeps_score_12_map.end()) climo_record = it->second; } - if (NULL != climo_record) { + if (nullptr != climo_record) { double p1 = climo_record->p1[month-1]; if (seeps_p1_thresh.check(p1)) { record = new SeepsRecord; @@ -362,7 +362,7 @@ double SeepsClimo::get_score(int sid, double p_fcst, double p_obs, double score = bad_data_double; SeepsRecord *record = get_record(sid, month, hour); - if (NULL != record) { + if (nullptr != record) { // Determine location in contingency table int ic = (p_obs>record->t1)+(p_obs>record->t2); int jc = (p_fcst>record->t1)+(p_fcst>record->t2); @@ -378,10 +378,10 @@ double SeepsClimo::get_score(int sid, double p_fcst, double p_obs, SeepsScore *SeepsClimo::get_seeps_score(int sid, double p_fcst, double p_obs, int month, int hour) { - SeepsScore *score = NULL; + SeepsScore *score = nullptr; SeepsRecord *record = get_record(sid, month, hour); - if (NULL != record) { + if (nullptr != record) { score = new SeepsScore(); score->p1 = record->p1; score->p2 = record->p2; @@ -651,8 +651,8 @@ void SeepsClimo::read_seeps_scores(ConcatString filename) { SeepsClimoGrid::SeepsClimoGrid(int month, int hour) : month{month}, hour{hour} { - p1_buf = p2_buf = t1_buf = t2_buf = NULL; - s12_buf = s13_buf = s21_buf = s23_buf = s31_buf = s32_buf = NULL; + p1_buf = p2_buf = t1_buf = t2_buf = nullptr; + s12_buf = s13_buf = s21_buf = s23_buf = s31_buf = s32_buf = nullptr; ConcatString seeps_name = get_seeps_climo_filename(); seeps_ready = file_exists(seeps_name.c_str()); @@ -678,23 +678,23 @@ SeepsClimoGrid::~SeepsClimoGrid() { void SeepsClimoGrid::clear() { SeepsClimoBase::clear(); - if (NULL != p1_buf) { delete [] p1_buf; p1_buf = NULL; } - if (NULL != p2_buf) { delete [] p2_buf; p2_buf = NULL; } - if (NULL != t1_buf) { delete [] t1_buf; t1_buf = NULL; } - if (NULL != t2_buf) { delete [] t2_buf; t2_buf = NULL; } - if (NULL != s12_buf) { delete [] s12_buf; s12_buf = NULL; } - if (NULL != s13_buf) { delete [] s13_buf; s13_buf = NULL; } - if (NULL != s21_buf) { delete [] s21_buf; s21_buf = NULL; } - if (NULL != s23_buf) { delete [] s23_buf; s23_buf = NULL; } - if (NULL != s31_buf) { delete [] s31_buf; s31_buf = NULL; } - if (NULL != s32_buf) { delete [] s32_buf; s32_buf = NULL; } + if (nullptr != p1_buf) { delete [] p1_buf; p1_buf = nullptr; } + if (nullptr != p2_buf) { delete [] p2_buf; p2_buf = nullptr; } + if (nullptr != t1_buf) { delete [] t1_buf; t1_buf = nullptr; } + if (nullptr != t2_buf) { delete [] t2_buf; t2_buf = nullptr; } + if (nullptr != s12_buf) { delete [] s12_buf; s12_buf = nullptr; } + if (nullptr != s13_buf) { delete [] s13_buf; s13_buf = nullptr; } + if (nullptr != s21_buf) { delete [] s21_buf; s21_buf = nullptr; } + if (nullptr != s23_buf) { delete [] s23_buf; s23_buf = nullptr; } + if (nullptr != s31_buf) { delete [] s31_buf; s31_buf = nullptr; } + if (nullptr != s32_buf) { delete [] s32_buf; s32_buf = nullptr; } }; //////////////////////////////////////////////////////////////////////// SeepsScore *SeepsClimoGrid::get_record(int ix, int iy, double p_fcst, double p_obs) { - SeepsScore *seeps_record = NULL; + SeepsScore *seeps_record = nullptr; const char *method_name = "SeepsClimoGrid::get_record() -> "; if (!is_eq(p_fcst, -9999.0) && !is_eq(p_obs, -9999.0)) { int offset = iy * nx + ix; @@ -836,8 +836,8 @@ void SeepsClimoGrid::read_seeps_scores(ConcatString filename) { s31_buf = new double[nx*ny]; s32_buf = new double[nx*ny]; - long curs[3] = { month-1, 0, 0 }; - long dims[3] = { 1, ny, nx }; + LongArray curs; // = { month-1, 0, 0 }; + LongArray dims; // = { 1, ny, nx }; NcVar var_p1_00 = get_nc_var(nc_file, var_name_p1_00); NcVar var_p2_00 = get_nc_var(nc_file, var_name_p2_00); NcVar var_t1_00 = get_nc_var(nc_file, var_name_t1_00); @@ -849,6 +849,13 @@ void SeepsClimoGrid::read_seeps_scores(ConcatString filename) { NcVar var_s31_00 = get_nc_var(nc_file, var_name_s31_00); NcVar var_s32_00 = get_nc_var(nc_file, var_name_s32_00); + curs.add(month-1); + curs.add(0); + curs.add(0); + dims.add(1); + dims.add(ny); + dims.add(nx); + if (IS_INVALID_NC(var_p1_00) || !get_nc_data(&var_p1_00, p1_buf, dims, curs)) { mlog << Error << "\n" << method_name << "Did not get p1_00\n\n"; diff --git a/src/libcode/vx_shapedata/mode_conf_info.cc b/src/libcode/vx_shapedata/mode_conf_info.cc index b35e3ddb82..eec28a0445 100644 --- a/src/libcode/vx_shapedata/mode_conf_info.cc +++ b/src/libcode/vx_shapedata/mode_conf_info.cc @@ -928,7 +928,6 @@ switch ( e2->type() ) { mlog << Error << "\n\n ModeConfInfo::is_multivar() const -> bad object type for entry \"fcst.field\"\n\n"; exit ( 1 ); - break; } diff --git a/src/libcode/vx_shapedata/node.cc b/src/libcode/vx_shapedata/node.cc index 97ddf8614a..e961ebf294 100644 --- a/src/libcode/vx_shapedata/node.cc +++ b/src/libcode/vx_shapedata/node.cc @@ -130,7 +130,7 @@ void Node::add_child(const Polyline * poly) { Node *n_ptr = (Node *) 0; // Check for first child - if(child == NULL) { + if(child == nullptr) { child = new Node; @@ -144,8 +144,8 @@ void Node::add_child(const Polyline * poly) { child->p = *(poly); - child->child = NULL; - child->sibling = NULL; + child->child = nullptr; + child->sibling = nullptr; } // Existing children else { @@ -172,8 +172,8 @@ void Node::add_child(const Polyline * poly) { n_ptr->sibling->p = *(poly); - n_ptr->sibling->child = NULL; - n_ptr->sibling->sibling = NULL; + n_ptr->sibling->child = nullptr; + n_ptr->sibling->sibling = nullptr; } return; diff --git a/src/libcode/vx_shapedata/set.cc b/src/libcode/vx_shapedata/set.cc index 82c3bf0307..26985475a0 100644 --- a/src/libcode/vx_shapedata/set.cc +++ b/src/libcode/vx_shapedata/set.cc @@ -169,7 +169,7 @@ if ( s.n_fcst_alloc > 0 ) { } - + if ( s.n_obs_alloc > 0 ) { extend_obs (s.n_obs_alloc); @@ -239,7 +239,7 @@ u = new int [k]; if ( a ) memcpy(u, a, n_alloc*sizeof(int)); -for (j=n_alloc; j " << "unexpected column name specified: \"" diff --git a/src/libcode/vx_statistics/apply_mask.cc b/src/libcode/vx_statistics/apply_mask.cc index 652184200d..2b0fbcfee3 100644 --- a/src/libcode/vx_statistics/apply_mask.cc +++ b/src/libcode/vx_statistics/apply_mask.cc @@ -459,7 +459,7 @@ void parse_poly_2d_data_mask(const ConcatString &mask_poly_str, // Open the data file mtddf = mtddf_factory.new_met_2d_data_file(file_name.c_str(), type); - // If data file pointer is NULL, assume a lat/lon polyline file + // If data file pointer is nullptr, assume a lat/lon polyline file if(!mtddf) { mlog << Error << "\nparse_poly_2d_data_mask() -> " << "cannot read file \"" << file_name << "\"!\n\n"; diff --git a/src/libcode/vx_statistics/compute_ci.cc b/src/libcode/vx_statistics/compute_ci.cc index 94cd053e00..ac287e4c99 100644 --- a/src/libcode/vx_statistics/compute_ci.cc +++ b/src/libcode/vx_statistics/compute_ci.cc @@ -345,10 +345,10 @@ void compute_cts_stats_ci_bca(const gsl_rng *rng_ptr, // for(i=0; iget_record(ix, iy, fcst_value, obs_value); - if (seeps_mpr != NULL) { + if (seeps_mpr != nullptr) { fcst_cat = seeps_mpr->fcst_cat; obs_cat = seeps_mpr->obs_cat; if (fcst_cat == 0) { @@ -1596,7 +1596,7 @@ void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &ob else count_diagonal++; } seeps_score = seeps_mpr->score; - if (isnan(seeps_score)) { + if (std::isnan(seeps_score)) { nan_count++; seeps_score = bad_data_double; } @@ -1725,7 +1725,7 @@ double *compute_seeps_density_vector(const PairDataPoint *pd, SeepsAggScore *see if (seeps_cnt == 0) { mlog << Debug(1) << method_name << "no SEEPS_MPR available.\n"; - return NULL; + return nullptr; } // Get lat/lon & convert them to radian and get sin/cos values diff --git a/src/libcode/vx_statistics/contable.cc b/src/libcode/vx_statistics/contable.cc index 772ea14551..9910673f6b 100644 --- a/src/libcode/vx_statistics/contable.cc +++ b/src/libcode/vx_statistics/contable.cc @@ -1649,8 +1649,6 @@ mlog << Error << "\nTTContingencyTable::set_size(int) -> " exit ( 1 ); -return; - } @@ -1666,8 +1664,6 @@ mlog << Error << "\nTTContingencyTable::set_size(int, int) -> " exit ( 1 ); -return; - } diff --git a/src/libcode/vx_statistics/contable_stats.cc b/src/libcode/vx_statistics/contable_stats.cc index 49a91354cd..02aa6dfbbb 100644 --- a/src/libcode/vx_statistics/contable_stats.cc +++ b/src/libcode/vx_statistics/contable_stats.cc @@ -1146,7 +1146,7 @@ for (j=0; j " diff --git a/src/libcode/vx_statistics/pair_data_point.cc b/src/libcode/vx_statistics/pair_data_point.cc index c3e6afae05..706bc0e5a3 100644 --- a/src/libcode/vx_statistics/pair_data_point.cc +++ b/src/libcode/vx_statistics/pair_data_point.cc @@ -70,7 +70,7 @@ void PairDataPoint::init_from_scratch() { seeps_mpr.clear(); seeps.clear(); - seeps_climo = NULL; + seeps_climo = nullptr; clear(); return; @@ -101,7 +101,7 @@ void PairDataPoint::erase() { f_na.erase(); for (int idx=0; idxset_p1_thresh(p1_thresh); + if (nullptr != seeps_climo) seeps_climo->set_p1_thresh(p1_thresh); else mlog << Warning << "\nPairDataPoint::set_seeps_thresh() ignored t1_threshold." << " Load SEEPS climo first\n\n"; } @@ -206,7 +206,7 @@ void PairDataPoint::set_seeps_score(SeepsScore *seeps, int index) { else { if (seeps_mpr[index]) { delete seeps_mpr[index]; - seeps_mpr[index] = NULL; + seeps_mpr[index] = nullptr; } } } @@ -250,7 +250,7 @@ bool PairDataPoint::add_grid_pair(double f, double o, add_grid_obs(o, cmn, csd, wgt); f_na.add(f); - seeps_mpr.push_back(NULL); + seeps_mpr.push_back(nullptr); return(true); } @@ -299,7 +299,7 @@ SeepsScore *PairDataPoint::compute_seeps(const char *sid, double f, int month, day, year, hour, minute, second; int sid_no = atoi(sid); - if (sid_no && NULL != seeps_climo) { + if (sid_no && nullptr != seeps_climo) { unix_to_mdyhms(ut, month, day, year, hour, minute, second); seeps = seeps_climo->get_seeps_score(sid_no, f, o, month, hour); if (mlog.verbosity_level() >= seeps_debug_level diff --git a/src/libcode/vx_tc_util/atcf_line_base.cc b/src/libcode/vx_tc_util/atcf_line_base.cc index f6f4d9849a..d049a684c3 100644 --- a/src/libcode/vx_tc_util/atcf_line_base.cc +++ b/src/libcode/vx_tc_util/atcf_line_base.cc @@ -280,7 +280,7 @@ ConcatString ATCFLineBase::technique() const { else cs = get_item(TechniqueOffset); // Replace instances of AVN with GFS - if(strstr(cs.c_str(), "AVN") != NULL) { + if(strstr(cs.c_str(), "AVN") != nullptr) { if(print_avn_to_gfs_message) { mlog << Debug(1) << "When reading ATCF track data, all instances of " diff --git a/src/libcode/vx_tc_util/diag_file.cc b/src/libcode/vx_tc_util/diag_file.cc index 9710532575..2059e19f6c 100644 --- a/src/libcode/vx_tc_util/diag_file.cc +++ b/src/libcode/vx_tc_util/diag_file.cc @@ -147,8 +147,6 @@ DiagFile & DiagFile::operator=(const DiagFile &) { mlog << Error << "\nDiagFile::operator=(const DiagFile &) -> " << "should never be called!\n\n"; exit(1); - - return(*this); } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/vx_tc_nc_util.cc b/src/libcode/vx_tc_util/vx_tc_nc_util.cc index c6eb467491..2cde15bce3 100644 --- a/src/libcode/vx_tc_util/vx_tc_nc_util.cc +++ b/src/libcode/vx_tc_util/vx_tc_nc_util.cc @@ -271,22 +271,25 @@ void def_tc_range_azimuth(NcFile* nc_out, //////////////////////////////////////////////////////////////////////// -void def_tc_lat_lon_time(NcFile* nc_out, - const NcDim& range_dim, const NcDim& azimuth_dim, - const NcDim& track_point_dim, - NcVar& lat_var, NcVar& lon_var, NcVar& valid_time_var) { +void def_tc_time_lat_lon(NcFile* nc_out, + const NcDim& track_point_dim, const NcDim& range_dim, const NcDim& azimuth_dim, + NcVar& valid_time_var, NcVar& lat_var, NcVar& lon_var) { vector dims; + dims.push_back(track_point_dim); dims.push_back(range_dim); dims.push_back(azimuth_dim); - dims.push_back(track_point_dim); - lat_var = nc_out->addVar("lat", ncDouble, dims); - lon_var = nc_out->addVar("lon", ncDouble, dims); valid_time_var = nc_out->addVar("valid_time", ncUint64, track_point_dim); + lat_var = nc_out->addVar("lat", ncDouble, dims); + lon_var = nc_out->addVar("lon", ncDouble, dims); // Set attributes + add_att(&valid_time_var, "long_name", "valid_time"); + add_att(&valid_time_var, "units", "yyyymmddhh"); + add_att(&valid_time_var, "standard_name", "valid_time"); + add_att(&lat_var, "long_name", "latitude"); add_att(&lat_var, "units", "degrees_north"); add_att(&lat_var, "standard_name", "latitude"); @@ -294,10 +297,6 @@ void def_tc_lat_lon_time(NcFile* nc_out, add_att(&lon_var, "long_name", "longitude"); add_att(&lon_var, "units", "degrees_east"); add_att(&lon_var, "standard_name", "longitude"); - - add_att(&valid_time_var, "long_name", "valid_time"); - add_att(&valid_time_var, "units", "yyyymmddhh"); - add_att(&valid_time_var, "standard_name", "valid_time"); } //////////////////////////////////////////////////////////////////////// @@ -324,20 +323,20 @@ void def_tc_variables(NcFile* nc_out, map > variable_levels, map variable_long_names, map variable_units, + const NcDim& track_point_dim, const NcDim& pressure_dim, const NcDim& range_dim, const NcDim& azimuth_dim, - const NcDim& pressure_dim, const NcDim& track_point_dim, map& data_vars) { vector dims; + dims.push_back(track_point_dim); dims.push_back(range_dim); dims.push_back(azimuth_dim); - dims.push_back(track_point_dim); vector dims_3d; + dims_3d.push_back(track_point_dim); + dims_3d.push_back(pressure_dim); dims_3d.push_back(range_dim); dims_3d.push_back(azimuth_dim); - dims_3d.push_back(pressure_dim); - dims_3d.push_back(track_point_dim); for (map >::iterator i = variable_levels.begin(); i != variable_levels.end(); ++i) { @@ -373,9 +372,9 @@ void def_tc_data(NcFile* nc_out, NcVar& data_var, VarInfo* data_info) { vector dims; + dims.push_back(track_point_dim); dims.push_back(range_dim); dims.push_back(azimuth_dim); - dims.push_back(track_point_dim); ConcatString var_name = data_info->name_attr(); var_name.add("_"); @@ -398,10 +397,10 @@ void def_tc_data_3d(NcFile* nc_out, NcVar& data_var, VarInfo* data_info) { vector dims; + dims.push_back(track_point_dim); + dims.push_back(pressure_dim); dims.push_back(range_dim); dims.push_back(azimuth_dim); - dims.push_back(pressure_dim); - dims.push_back(track_point_dim); data_var = nc_out->addVar( data_info->name_attr(), ncDouble, dims); @@ -420,8 +419,8 @@ void def_tc_azi_mean_data(NcFile* nc_out, NcVar& data_var, VarInfo* data_info) { vector dims; - dims.push_back(range_dim); dims.push_back(track_point_dim); + dims.push_back(range_dim); ConcatString var_name = data_info->name_attr(); var_name.add("_"); @@ -445,14 +444,14 @@ void write_tc_data(NcFile* nc_out, const TcrmwGrid& grid, vector counts; offsets.clear(); + offsets.push_back(i_point); offsets.push_back(0); offsets.push_back(0); - offsets.push_back(i_point); counts.clear(); + counts.push_back(1); counts.push_back(grid.range_n()); counts.push_back(grid.azimuth_n()); - counts.push_back(1); var.putVar(offsets, counts, data); } @@ -468,14 +467,14 @@ void write_tc_data_rev(NcFile* nc_out, const TcrmwGrid& grid, double* data_rev; offsets.clear(); + offsets.push_back(i_point); offsets.push_back(0); offsets.push_back(0); - offsets.push_back(i_point); counts.clear(); + counts.push_back(1); counts.push_back(grid.range_n()); counts.push_back(grid.azimuth_n()); - counts.push_back(1); data_rev = new double[ grid.range_n() * grid.azimuth_n()]; @@ -505,12 +504,12 @@ void write_tc_azi_mean_data(NcFile* nc_out, const TcrmwGrid& grid, double* data_azi_mean; offsets.clear(); - offsets.push_back(0); offsets.push_back(i_point); + offsets.push_back(0); counts.clear(); - counts.push_back(grid.range_n()); counts.push_back(1); + counts.push_back(grid.range_n()); data_rev = new double[ grid.range_n() * grid.azimuth_n()]; @@ -554,26 +553,26 @@ extern void write_tc_pressure_level_data( int i_level = pressure_level_indices[level_str]; offsets.clear(); + offsets.push_back(i_point); offsets.push_back(0); offsets.push_back(0); - offsets.push_back(i_point); offsets_3d.clear(); + offsets_3d.push_back(i_point); + offsets_3d.push_back(i_level); offsets_3d.push_back(0); offsets_3d.push_back(0); - offsets_3d.push_back(i_level); - offsets_3d.push_back(i_point); counts.clear(); + counts.push_back(1); counts.push_back(grid.range_n()); counts.push_back(grid.azimuth_n()); - counts.push_back(1); counts_3d.clear(); - counts_3d.push_back(grid.range_n()); - counts_3d.push_back(grid.azimuth_n()); counts_3d.push_back(1); counts_3d.push_back(1); + counts_3d.push_back(grid.range_n()); + counts_3d.push_back(grid.azimuth_n()); data_rev = new double[ grid.range_n() * grid.azimuth_n()]; @@ -586,12 +585,6 @@ extern void write_tc_pressure_level_data( } } - // string label = level_str.substr(0, 1); - // if (label == "P") { - // var.putVar(offsets_3d, counts_3d, data_rev); - // } else { - // var.putVar(offsets, counts, data_rev); - // } var.putVar(offsets_3d, counts_3d, data_rev); delete[] data_rev; diff --git a/src/libcode/vx_tc_util/vx_tc_nc_util.h b/src/libcode/vx_tc_util/vx_tc_nc_util.h index 32ea30ad63..d909cffe83 100644 --- a/src/libcode/vx_tc_util/vx_tc_nc_util.h +++ b/src/libcode/vx_tc_util/vx_tc_nc_util.h @@ -47,7 +47,7 @@ extern void def_tc_pressure(netCDF::NcFile*, extern void def_tc_range_azimuth(netCDF::NcFile*, const netCDF::NcDim&, const netCDF::NcDim&, const TcrmwGrid&, double); -extern void def_tc_lat_lon_time(netCDF::NcFile*, +extern void def_tc_time_lat_lon(netCDF::NcFile*, const netCDF::NcDim&, const netCDF::NcDim&, const netCDF::NcDim&, netCDF::NcVar&, netCDF::NcVar&, netCDF::NcVar&); diff --git a/src/tools/core/ensemble_stat/ensemble_stat.cc b/src/tools/core/ensemble_stat/ensemble_stat.cc index 7560fb57a2..62885a5f5e 100644 --- a/src/tools/core/ensemble_stat/ensemble_stat.cc +++ b/src/tools/core/ensemble_stat/ensemble_stat.cc @@ -1292,7 +1292,7 @@ void process_grid_vx() { conf_info.obtype.c_str()); // If match was found and includes a value range setting, - // reset to NULL and lookup separately for grid point + // reset to nullptr and lookup separately for grid point if(oerr_ptr) { if(oerr_ptr->val_range.n() == 0) { mlog << Debug(3) diff --git a/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc b/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc index 6c958e2e0e..869721655b 100644 --- a/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc +++ b/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc @@ -666,10 +666,10 @@ void EnsembleStatVxOpt::process_config(GrdFileType ftype, Dictionary &fdict, vx_pd.fcst_info->add_input(input_info); // Add InputInfo to fcst info list for each ensemble file provided - // set var_info to NULL to note first VarInfo should be used + // set var_info to nullptr to note first VarInfo should be used int last_member_index = ens_files->n() - (use_ctrl ? 1 : 0); for(j=1; jadd_input(input_info); diff --git a/src/tools/core/mode/mode_exec.cc b/src/tools/core/mode/mode_exec.cc index 5a47a49ab0..cce19fcb42 100644 --- a/src/tools/core/mode/mode_exec.cc +++ b/src/tools/core/mode/mode_exec.cc @@ -1069,11 +1069,11 @@ if ( info.all_false() ) return; // // Get raw values and object ID's for each grid box - // Extra NULL checks to satisfy Fortify + // Extra nullptr checks to satisfy Fortify if ( info.do_raw && - fcst_raw_data != NULL && obs_raw_data != NULL && - engine.fcst_raw != NULL && engine.obs_raw != NULL ) { + fcst_raw_data != nullptr && obs_raw_data != nullptr && + engine.fcst_raw != nullptr && engine.obs_raw != nullptr ) { fcst_raw_data[n] = engine.fcst_raw->data (x, y); obs_raw_data[n] = engine.obs_raw->data (x, y); @@ -1081,35 +1081,35 @@ if ( info.all_false() ) return; } if(engine.fcst_split->is_nonzero(x, y) ) { - if ( info.do_object_raw && fcst_obj_raw_data != NULL && engine.fcst_raw != NULL ) { + if ( info.do_object_raw && fcst_obj_raw_data != nullptr && engine.fcst_raw != nullptr ) { fcst_obj_raw_data[n] = engine.fcst_raw->data(x, y); } - if ( info.do_object_id && fcst_obj_data != NULL && engine.fcst_split != NULL ) { + if ( info.do_object_id && fcst_obj_data != nullptr && engine.fcst_split != nullptr ) { fcst_obj_data[n] = nint(engine.fcst_split->data(x, y)); } } else { - if ( info.do_object_raw && fcst_obj_raw_data != NULL ) { + if ( info.do_object_raw && fcst_obj_raw_data != nullptr ) { fcst_obj_raw_data[n] = bad_data_float; } - if ( info.do_object_id && fcst_obj_data != NULL ) { + if ( info.do_object_id && fcst_obj_data != nullptr ) { fcst_obj_data[n] = bad_data_int; } } if(engine.obs_split->is_nonzero(x, y) ) { - if ( info.do_object_raw && obs_obj_raw_data != NULL ) { + if ( info.do_object_raw && obs_obj_raw_data != nullptr ) { obs_obj_raw_data[n] = engine.obs_raw->data(x, y); } - if ( info.do_object_id && obs_obj_data != NULL ) { + if ( info.do_object_id && obs_obj_data != nullptr ) { obs_obj_data[n] = nint(engine.obs_split->data(x, y)); } } else { - if ( info.do_object_raw && obs_obj_raw_data != NULL) { + if ( info.do_object_raw && obs_obj_raw_data != nullptr) { obs_obj_raw_data[n] = bad_data_float; } - if ( info.do_object_id && obs_obj_data != NULL ) { + if ( info.do_object_id && obs_obj_data != nullptr ) { obs_obj_data[n] = bad_data_int; } } @@ -1118,7 +1118,7 @@ if ( info.all_false() ) return; // Get cluster object ID's for each grid box // - if ( info.do_cluster_id && fcst_clus_data != NULL && obs_clus_data != NULL) { + if ( info.do_cluster_id && fcst_clus_data != nullptr && obs_clus_data != nullptr) { // Write the index of the cluster object if ( engine.fcst_clus_split->data(x, y) > 0 ) { diff --git a/src/tools/core/mode/mode_ps_file.cc b/src/tools/core/mode/mode_ps_file.cc index cc7dc50bca..c722da4cf6 100644 --- a/src/tools/core/mode/mode_ps_file.cc +++ b/src/tools/core/mode/mode_ps_file.cc @@ -87,8 +87,6 @@ ModePsFile::ModePsFile(const ModePsFile &) { -// mpsf_init_from_scratch(); - mlog << Error << "\n\n ModePsFile::ModePsFile(const ModePsFile &) -> shoule never be called!\n\n"; exit ( 1 ); @@ -103,16 +101,10 @@ ModePsFile & ModePsFile::operator=(const ModePsFile &) { -// if ( this == &m ) return ( * this ); -// -// assign(m); - mlog << Error << "\n\n ModePsFile::operator=(const ModePsFile &) -> should never be called!\n\n"; exit ( 1 ); -return ( * this ); - } diff --git a/src/tools/core/mode/mode_usage.cc b/src/tools/core/mode/mode_usage.cc index 1825e38043..da6a22cf64 100644 --- a/src/tools/core/mode/mode_usage.cc +++ b/src/tools/core/mode/mode_usage.cc @@ -51,8 +51,6 @@ multivar_usage(); exit ( 1 ); -return; - } diff --git a/src/tools/core/mode_analysis/mode_analysis.cc b/src/tools/core/mode_analysis/mode_analysis.cc index decd318a34..f0ffb1ae15 100644 --- a/src/tools/core/mode_analysis/mode_analysis.cc +++ b/src/tools/core/mode_analysis/mode_analysis.cc @@ -302,7 +302,7 @@ if ( cmd_line.n_elements() != 0 ) { StringArray a; -a = get_filenames(lookin_dirs, NULL, "_obj.txt"); +a = get_filenames(lookin_dirs, nullptr, "_obj.txt"); mode_files.add(a); diff --git a/src/tools/core/pcp_combine/pcp_combine.cc b/src/tools/core/pcp_combine/pcp_combine.cc index 9e51e0523c..0efcb42ac9 100644 --- a/src/tools/core/pcp_combine/pcp_combine.cc +++ b/src/tools/core/pcp_combine/pcp_combine.cc @@ -729,7 +729,7 @@ int search_pcp_dir(const char *cur_dir, const unixtime cur_ut, // // Process each file contained in the directory. // - while((dirp = readdir(dp)) != NULL) { + while((dirp = readdir(dp)) != nullptr) { // // Ignore any hidden files. diff --git a/src/tools/core/stat_analysis/stat_analysis.cc b/src/tools/core/stat_analysis/stat_analysis.cc index a5797b518a..26b15deb57 100644 --- a/src/tools/core/stat_analysis/stat_analysis.cc +++ b/src/tools/core/stat_analysis/stat_analysis.cc @@ -456,7 +456,7 @@ void process_search_dirs() { // // Get the list of stat files in the search directories // - files = get_filenames(search_dirs, NULL, stat_file_ext); + files = get_filenames(search_dirs, nullptr, stat_file_ext); n = files.n_elements(); @@ -876,7 +876,7 @@ void open_temps() // Build the temp file name // tmp_file << tmp_dir << "/" << "tmp_stat_analysis"; - tmp_path = make_temp_file_name(tmp_file.c_str(), NULL); + tmp_path = make_temp_file_name(tmp_file.c_str(), nullptr); // // Open the temp file diff --git a/src/tools/core/stat_analysis/stat_analysis_job.cc b/src/tools/core/stat_analysis/stat_analysis_job.cc index 53fd77949b..ea6322a2a5 100644 --- a/src/tools/core/stat_analysis/stat_analysis_job.cc +++ b/src/tools/core/stat_analysis/stat_analysis_job.cc @@ -40,7 +40,7 @@ // 019 01/24/20 Halley Gotway Add aggregate RPS lines. // 020 04/02/21 Halley Gotway MET #1736, write output to -out or // -out_stat, but not both. -// 021 04/12/21 Halley Gotway MET #1735 Support multiple +// 021 04/12/21 Halley Gotway MET #1735 Support multiple // -out_thresh and -out_line_type options. // 022 05/28/21 Halley Gotway Add MCTS HSS_EC output. // 023 11/10/22 Halley Gotway MET #2339 Add SEEPS and SEEPS_MPR @@ -2713,6 +2713,8 @@ void write_job_aggr_seeps(STATAnalysisJob &job, STATLineType lt, int n, n_row, n_col, r, c; StatHdrColumns shc; + n = 0; + // // Setup the output table // @@ -2737,6 +2739,7 @@ void write_job_aggr_seeps(STATAnalysisJob &job, STATLineType lt, // // Loop through the map // + for(it = m.begin(), r=1; it != m.end(); it++) { // @@ -2778,6 +2781,8 @@ void write_job_aggr_seeps_mpr(STATAnalysisJob &job, STATLineType lt, StatHdrColumns shc; SeepsAggScore agg_score; + n = 0; + // // Setup the output table // diff --git a/src/tools/core/wavelet_stat/wavelet_stat.cc b/src/tools/core/wavelet_stat/wavelet_stat.cc index 2bab283e13..beba507332 100644 --- a/src/tools/core/wavelet_stat/wavelet_stat.cc +++ b/src/tools/core/wavelet_stat/wavelet_stat.cc @@ -1521,19 +1521,19 @@ void write_nc_raw(const WaveletStatNcOutInfo & nc_info, const double *fdata, con // Store the forecast, observation, and difference fields for(i=0; ilatlon_to_xy(obs.getLatitude(), -1.0*obs.getLongitude(), - grid_x, grid_y); - - if(grid_x < 0 || grid_x >= _gridMask->nx() || - grid_y < 0 || grid_y >= _gridMask->ny()) { - _gridMaskNum++; - return false; - } - - // - // Apply the area mask - // - if(_areaMask) { - if(!_areaMask->s_is_on(nint(grid_x), nint(grid_y))) { - _areaMaskNum++; - return false; - } - } - } - - // - // Apply the polyline mask - // - if(_polyMask) - { - if(!_polyMask->latlon_is_inside_dege(obs.getLatitude(), obs.getLongitude())) - { - _polyMaskNum++; - return false; - } - } + if(filters.is_filtered(obs.getLatitude(), obs.getLongitude())) return false; // // Apply the station ID mask // - if(_sidMask) - { - if(!_sidMask->has(obs.getStationId().c_str())) - { - _sidMaskNum++; - return false; - } - } + if(filters.is_filtered_sid(obs.getStationId().c_str())) return false; // Save obs because the obs vector is sorted after time summary _observations.push_back(obs); diff --git a/src/tools/other/ascii2nc/file_handler.h b/src/tools/other/ascii2nc/file_handler.h index 006f965d3e..ece575672a 100644 --- a/src/tools/other/ascii2nc/file_handler.h +++ b/src/tools/other/ascii2nc/file_handler.h @@ -25,6 +25,7 @@ #include #include "mask_poly.h" +#include "mask_filters.h" #include "vx_grid.h" #include "vx_config.h" #include "vx_util.h" @@ -92,15 +93,7 @@ class FileHandler int _hdrNum; int _obsNum; - int _gridMaskNum; - int _areaMaskNum; - int _polyMaskNum; - int _sidMaskNum; - - Grid *_gridMask; - MaskPlane *_areaMask; - MaskPoly *_polyMask; - StringArray *_sidMask; + MaskFilters filters; map _messageTypeMap; @@ -149,20 +142,14 @@ class FileHandler void _closeNetcdf(); bool _openNetcdf(const string &nc_filename); -// bool _writeHdrInfo(const ConcatString &hdr_typ, -// const ConcatString &hdr_sid, -// const time_t hdr_vld, -// double lat, double lon, double elv); -// bool _writeObsInfo(int gc, float prs, float hgt, float obs, -// const ConcatString &qty); void debug_print_observations(vector< Observation >, string); }; inline void FileHandler::setCompressionLevel(int compressoion_level) { deflate_level = compressoion_level; } -inline void FileHandler::setGridMask(Grid &g) { _gridMask = &g; } -inline void FileHandler::setAreaMask(MaskPlane &a) { _areaMask = &a; } -inline void FileHandler::setPolyMask(MaskPoly &p) { _polyMask = &p; } -inline void FileHandler::setSIDMask (StringArray &s) { _sidMask = &s; } +inline void FileHandler::setGridMask(Grid &g) { filters.set_grid_mask(&g); } +inline void FileHandler::setAreaMask(MaskPlane &a) { filters.set_area_mask(&a); } +inline void FileHandler::setPolyMask(MaskPoly &p) { filters.set_poly_mask(&p); } +inline void FileHandler::setSIDMask (StringArray &s) { filters.set_sid_mask(&s); } inline void FileHandler::setMessageTypeMap(map m) { _messageTypeMap = m; } diff --git a/src/tools/other/ascii2nc/python_handler.cc b/src/tools/other/ascii2nc/python_handler.cc index 9ea61d5a8a..a5a55e0f94 100644 --- a/src/tools/other/ascii2nc/python_handler.cc +++ b/src/tools/other/ascii2nc/python_handler.cc @@ -18,6 +18,7 @@ using namespace std; #include "vx_log.h" #include "vx_math.h" +#include "python_line.h" #include "vx_python3_utils.h" #include "python_handler.h" @@ -25,10 +26,7 @@ using namespace std; //////////////////////////////////////////////////////////////////////// - -static const char set_python_env_wrapper [] = "set_python_env"; - -static const char write_tmp_ascii_wrapper[] = "MET_BASE/wrappers/write_tmp_point.py"; +static const char write_tmp_ascii_wrapper[] = "MET_BASE/python/pyembed/write_tmp_point.py"; static const char list_name [] = "point_data"; @@ -136,9 +134,6 @@ mlog << Error << "\nbool PythonHandler::_readObservations(LineDataFile &) -> " exit ( 1 ); - -return ( false ); - } @@ -247,9 +242,7 @@ bool PythonHandler::do_straight() { -ConcatString command, path, user_base; - -path = set_python_env_wrapper; +ConcatString command, user_base; mlog << Debug(3) << "Running user's python script (" @@ -263,7 +256,7 @@ user_base.chomp(".py"); // start up the python interpreter // -Python3_Script script(path.text()); +Python3_Script *script = get_python3_script(); // // set up a "new" sys.argv list @@ -271,7 +264,7 @@ Python3_Script script(path.text()); // the user's script // -script.reset_argv(user_script_filename.text(), user_script_args); +script->reset_argv(user_script_filename.text(), user_script_args); // // import the user's script as a module @@ -380,20 +373,16 @@ if ( status ) { } -ConcatString wrapper; - -wrapper = set_python_env_wrapper; - -Python3_Script script(wrapper.text()); +Python3_Script *script = get_python3_script(); mlog << Debug(4) << "Reading temporary Python ascii observation file: " << tmp_ascii_path << "\n"; -script.import_read_tmp_ascii_py(); +script->import_read_tmp_ascii_py(); -PyObject * dobj = script.read_tmp_ascii(tmp_ascii_path.text()); +PyObject * dobj = script->read_tmp_ascii(tmp_ascii_path.text()); -PyObject * obj = script.lookup_ascii(tmp_list_name); +PyObject * obj = script->lookup_ascii(tmp_list_name); if ( ! PyList_Check(obj) ) { diff --git a/src/tools/other/gen_ens_prod/gen_ens_prod_conf_info.cc b/src/tools/other/gen_ens_prod/gen_ens_prod_conf_info.cc index 10bac43b18..ef233e6f86 100644 --- a/src/tools/other/gen_ens_prod/gen_ens_prod_conf_info.cc +++ b/src/tools/other/gen_ens_prod/gen_ens_prod_conf_info.cc @@ -217,9 +217,9 @@ void GenEnsProdConfInfo::process_config(GrdFileType etype, StringArray * ens_fil ens_info->add_input(input_info); // Add InputInfo to ens info list for each ensemble file provided - // set var_info to NULL to note first VarInfo should be used + // set var_info to nullptr to note first VarInfo should be used for(int k=1; k " - << "the -type command line requirement must be set to a specific masking type!\n" + << "the -type command line requirement must be set to a specific masking type!\n" << "\t\t \"poly\", \"box\", \"circle\", \"track\", \"grid\", " - << "\"data\", \"solar_alt\", \"solar_azi\", \"lat\", \"lon\" " - << "or \"shape\"" - << "\n\n"; + << "\"data\", \"solar_alt\", \"solar_azi\", \"lat\", \"lon\" " + << "or \"shape\"" << "\n\n"; exit(1); } @@ -236,11 +238,24 @@ void process_mask_file(DataPlane &dp) { // Process the mask from a shapefile else if(mask_type == MaskType_Shape) { - get_shapefile_outline(shape); + // If -shape_str was specified, find the matching records + if(shape_str_map.size() > 0) get_shapefile_strings(); + + // Get the records specified by -shapeno and -shape_str + get_shapefile_records(); + + int n_pts; + vector::const_iterator it; + for(it = shape_recs.begin(), n_pts=0; + it != shape_recs.end(); ++it) { + n_pts += it->n_points; + } mlog << Debug(2) << "Parsed Shape Mask:\t" << mask_filename - << " containing " << shape.n_points << " points\n"; + << " using " << shape_recs.size() + << " shape(s) containing a total of " + << n_pts << " points\n"; } // For solar masking, check for a date/time string @@ -515,14 +530,88 @@ bool get_gen_vx_mask_config_str(MetNcMetDataFile *mnmdf_ptr, //////////////////////////////////////////////////////////////////////// -void get_shapefile_outline(ShpPolyRecord & cur_shape) { +void get_shapefile_strings() { + DbfFile f; + StringArray rec_names; + StringArray rec_values; + + // Get the corresponding dbf file name + ConcatString dbf_filename = mask_filename; + dbf_filename.replace(".shp", ".dbf"); + + mlog << Debug(3) << "Shape dBASE file:\t" + << dbf_filename << "\n"; + + // Open the database file + if(!(f.open(dbf_filename.c_str()))) { + mlog << Error << "\nget_shapefile_strings() -> " + << "unable to open database file \"" << dbf_filename + << "\"\n\n"; + exit(1); + } + + // Get the subrecord names, ignoring case + rec_names = f.subrecord_names(); + rec_names.set_ignore_case(true); + + // Print the subrecord names + mlog << Debug(4) << "Filtering shapes using " + << shape_str_map.size() << " of the " << rec_names.n() + << " available attributes (" << write_css(rec_names) + << ").\n"; + + // Check that the attributes requested actually exist + map::const_iterator it; + for(it = shape_str_map.begin(); + it != shape_str_map.end(); it++) { + + if(!rec_names.has(it->first)) { + mlog << Warning << "\nget_shapefile_strings() -> " + << "the \"-shape_str\" name \"" << it->first + << "\" is not in the list of " << rec_names.n() + << " shapefile attributes and will be ignored:\n" + << write_css(rec_names) << "\n\n"; + } + } + + // Check each record + for(int i=0; in_records; i++) { + + // Get the values for the current record + rec_values = f.subrecord_values(i); + + // Add matching records to the list + if(is_shape_str_match(i, rec_names, rec_values)) { + mlog << Debug(4) << "Shape number " << i + << " is a shapefile match.\n"; + if(!shape_numbers.has(i)) shape_numbers.add(i); + } + } + + // Close the database file + f.close(); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void get_shapefile_records() { const char * const shape_filename = mask_filename.c_str(); ShpFile f; - ShpPolyRecord & pr = cur_shape; + ShpPolyRecord pr; + + // Check the number of requested shapes + if(shape_numbers.n() == 0) { + mlog << Error << "\nget_shapefile_records() -> " + << "at least one shape number must be specified using " + << "the \"-shapeno\" or \"-shape_str\" command line options!\n\n"; + exit(1); + } // Open shapefile if(!(f.open(shape_filename))) { - mlog << Error << "\nget_shapefile_outline() -> " + mlog << Error << "\nget_shapefile_records() -> " << "unable to open shape file \"" << shape_filename << "\"\n\n"; exit(1); @@ -530,33 +619,81 @@ void get_shapefile_outline(ShpPolyRecord & cur_shape) { // Make sure it's a polygon file, and not some other type if(f.shape_type() != shape_type_polygon) { - mlog << Error << "\nget_shapefile_outline() -> " + mlog << Error << "\nget_shapefile_records() -> " << "shape file \"" << shape_filename << "\" is not a polygon file\n\n"; exit(1); } - // Skip through un-needed records - for(int i=0; i> pr; + + // Store requested records + if(shape_numbers.has(i)) { + mlog << Debug(3) + << "Using shape number " << i << " with " + << pr.n_points << " points.\n"; + pr.toggle_longitudes(); + shape_recs.push_back(pr); + } } + // Check for end of file if(f.at_eof()) { - mlog << Error << "\nget_shapefile_outline() -> " - << "hit eof before reading specified record\n\n"; + mlog << Error << "\nget_shapefile_records() -> " + << "hit eof before reading all specified record(s)\n\n"; exit(1); } - // Get the target record - f >> pr; - pr.toggle_longitudes(); - return; } //////////////////////////////////////////////////////////////////////// +bool is_shape_str_match(const int i_shape, const StringArray &names, const StringArray &values) { + bool match = true; + int i_match; + + // Check each map entry + map::const_iterator it; + for(it = shape_str_map.begin(); + it != shape_str_map.end(); it++) { + + // Ignore names that do not exist in the shapefile + if(!names.has(it->first, i_match)) continue; + + // The corresponding value must match + if(!it->second.has(values[i_match].c_str())) { + mlog << Debug(4) << "Shape number " << i_shape << " \"" + << it->first << "\" value (" << values[i_match] + << ") does not match (" << write_css(it->second) + << ")\n"; + match = false; + break; + } + else { + mlog << Debug(3) << "Shape number " << i_shape << " \"" + << it->first << "\" value (" << values[i_match] + << ") matches (" << write_css(it->second) + << ")\n"; + } + + } + + return(match); +} + +//////////////////////////////////////////////////////////////////////// + void apply_poly_mask(DataPlane & dp) { int x, y, n_in; bool inside; @@ -1121,15 +1258,30 @@ void apply_shape_mask(DataPlane & dp) { int x, y, n_in; bool status = false; - // Load the shape - GridClosedPolyArray p; - p.set(shape, grid); + // Load the shapes + GridClosedPolyArray poly; + vector poly_list; + vector::const_iterator rec_it; + for(rec_it = shape_recs.begin(); + rec_it != shape_recs.end(); ++rec_it) { + poly.set(*rec_it, grid); + poly_list.push_back(poly); + } // Check grid points for(x=0,n_in=0; x<(grid.nx()); x++) { for(y=0; y<(grid.ny()); y++) { - status = p.is_inside(x, y); + vector::const_iterator poly_it; + for(poly_it = poly_list.begin(); + poly_it != poly_list.end(); ++poly_it) { + + // Check if point is inside + status = poly_it->is_inside(x, y); + + // Break after the first match + if(status) break; + } // Check the complement if(complement) status = !status; @@ -1415,6 +1567,7 @@ void usage() { << "\t[-height n]\n" << "\t[-width n]\n" << "\t[-shapeno n]\n" + << "\t[-shape_str name string]\n" << "\t[-value n]\n" << "\t[-name string]\n" << "\t[-log file]\n" @@ -1466,7 +1619,6 @@ void usage() { << "to combine the \"input_field\" data with the current mask " << "(optional).\n" - << "\t\t\"-thresh string\" defines the threshold to be applied " << "(optional).\n" << "\t\t For \"circle\" and \"track\" masking, threshold the " @@ -1483,8 +1635,14 @@ void usage() { << "units.\n" << "\t\t\"-shapeno n\" (optional).\n" - << "\t\t For \"shape\" masking, specify the shape number " - << "(0-based) to be used.\n" + << "\t\t For \"shape\" masking, specify the integer shape " + << "number(s) (0-based) to be used as a comma-separated list.\n" + + << "\t\t\"-shape_str name string\" (optional).\n" + << "\t\t For \"shape\" masking, specify the shape(s) to be used " + << "as a named attribute followed by a comma-separated list of " + << "matching strings. If used multiple times, only shapes matching " + << "all named attributes will be used.\n" << "\t\t\"-value n\" overrides the default output mask data " << "value (" << default_mask_val << ") (optional).\n" @@ -1597,13 +1755,41 @@ void set_compress(const StringArray & a) { //////////////////////////////////////////////////////////////////////// void set_shapeno(const StringArray & a) { + NumArray cur_na; - shape_number = atoi(a[0].c_str()); + cur_na.add_css(a[0].c_str()); - if(shape_number < 0) { - mlog << Error << "\n" << program_name << " -> " - << "bad shapeno ... " << shape_number << "\n\n"; - exit(1); + // Check and add each unique shape number + for(int i=0; i " + << "bad shapeno ... " << cur_na[i] << "\n\n"; + exit(1); + } + else if(!shape_numbers.has(cur_na[i])) { + shape_numbers.add(cur_na[i]); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void set_shape_str(const StringArray & a) { + StringArray sa; + + // Comma-separated list of matching strings, ignoring case + sa.parse_css(a[1]); + sa.set_ignore_case(true); + + // Append options to existing entry + if(shape_str_map.count(a[0]) > 0) { + shape_str_map[a[0]].add(sa); + } + // Or add a new entry + else { + shape_str_map[a[0]] = sa; } return; diff --git a/src/tools/other/gen_vx_mask/gen_vx_mask.h b/src/tools/other/gen_vx_mask/gen_vx_mask.h index 6fbb9f45ba..962b36e4ba 100644 --- a/src/tools/other/gen_vx_mask/gen_vx_mask.h +++ b/src/tools/other/gen_vx_mask/gen_vx_mask.h @@ -21,6 +21,7 @@ // 004 08/30/21 Halley Gotway MET #1891 fix input and mask fields. // 005 05/05/22 Halley Gotway MET #2152 Add -type poly_xy. // 006 09/29/22 Prestopnik MET #2227 Remove namespace std from header files +// 007 05/03/23 Halley Gotway MET #1060 Support multiple shapes // //////////////////////////////////////////////////////////////////////// @@ -105,9 +106,9 @@ static double mask_val = default_mask_val; static ConcatString mask_name; static unixtime solar_ut = (unixtime) 0; -static ShpPolyRecord shape; - -static int shape_number = 0; +static std::map shape_str_map; +static NumArray shape_numbers; +static std::vector shape_recs; // Masking polyline static MaskPoly poly_mask; @@ -129,7 +130,10 @@ static void get_data_plane(const ConcatString &file_name, DataPlane &dp, Grid &dp_grid); static bool get_gen_vx_mask_config_str(MetNcMetDataFile *, ConcatString &); -static void get_shapefile_outline(ShpPolyRecord &shape); +static void get_shapefile_strings(); +static void get_shapefile_records(); +static bool is_shape_str_match(const int i, + const StringArray &, const StringArray &); static void apply_poly_mask(DataPlane &dp); static void apply_poly_xy_mask(DataPlane &dp); static void apply_shape_mask(DataPlane &dp); @@ -158,6 +162,7 @@ static void set_value(const StringArray &); static void set_name(const StringArray &); static void set_compress(const StringArray &); static void set_shapeno(const StringArray &); +static void set_shape_str(const StringArray &); //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/gis_utils/gis_dump_dbf.cc b/src/tools/other/gis_utils/gis_dump_dbf.cc index 519d706827..7a1fea84e3 100644 --- a/src/tools/other/gis_utils/gis_dump_dbf.cc +++ b/src/tools/other/gis_utils/gis_dump_dbf.cc @@ -176,8 +176,6 @@ mlog << Error exit ( 1 ); -return; - } diff --git a/src/tools/other/gis_utils/gis_dump_shp.cc b/src/tools/other/gis_utils/gis_dump_shp.cc index 601eef8dd4..ec64ad0d19 100644 --- a/src/tools/other/gis_utils/gis_dump_shp.cc +++ b/src/tools/other/gis_utils/gis_dump_shp.cc @@ -173,8 +173,6 @@ mlog << Error exit ( 1 ); -return; - } diff --git a/src/tools/other/gis_utils/gis_dump_shx.cc b/src/tools/other/gis_utils/gis_dump_shx.cc index e15c8ac63c..4480b79196 100644 --- a/src/tools/other/gis_utils/gis_dump_shx.cc +++ b/src/tools/other/gis_utils/gis_dump_shx.cc @@ -150,8 +150,6 @@ mlog << Error exit ( 1 ); -return; - } diff --git a/src/tools/other/grid_diag/grid_diag.cc b/src/tools/other/grid_diag/grid_diag.cc index bf180669cc..01514a9b0e 100644 --- a/src/tools/other/grid_diag/grid_diag.cc +++ b/src/tools/other/grid_diag/grid_diag.cc @@ -505,7 +505,7 @@ void setup_nc_file(void) { // Add global attributes write_netcdf_global(nc_out, out_file.c_str(), program_name, - NULL, NULL, conf_info.desc.c_str()); + nullptr, nullptr, conf_info.desc.c_str()); add_att(nc_out, "mask_grid", (conf_info.mask_grid_name.nonempty() ? (string)conf_info.mask_grid_name : na_str)); diff --git a/src/tools/other/gsi_tools/rad_config.cc b/src/tools/other/gsi_tools/rad_config.cc index 0c5bc87ebd..f88b115d4f 100644 --- a/src/tools/other/gsi_tools/rad_config.cc +++ b/src/tools/other/gsi_tools/rad_config.cc @@ -66,10 +66,6 @@ GsiRadConfig::GsiRadConfig(const GsiRadConfig & c) { -// init_from_scratch(); -// -// assign(c); - mlog << Error << "\n\n GsiRadConfig::GsiRadConfig(const GsiRadConfig &) -> should never be called!\n\n"; exit ( 1 ); @@ -84,17 +80,10 @@ GsiRadConfig & GsiRadConfig::operator=(const GsiRadConfig & c) { -// if ( this == &c ) return ( * this ); -// -// assign(c); - mlog << Error << "\n\n GsiRadConfig::operator=(const GsiRadConfig &) -> should never be called!\n\n"; exit ( 1 ); - -return ( * this ); - } diff --git a/src/tools/other/ioda2nc/ioda2nc.cc b/src/tools/other/ioda2nc/ioda2nc.cc index e8d1b0c980..4b48cbaf9e 100644 --- a/src/tools/other/ioda2nc/ioda2nc.cc +++ b/src/tools/other/ioda2nc/ioda2nc.cc @@ -570,11 +570,11 @@ void process_ioda_file(int i_pb) { float *obs_hght_arr = new float[nlocs]; float *hdr_time_arr = new float[nlocs]; char *hdr_vld_block = new char[nlocs*ndatetime]; - char *hdr_msg_types = NULL; - char *hdr_station_ids = NULL; - char **hdr_vld_block2 = NULL; - char **hdr_msg_types2 = NULL; - char **hdr_station_ids2 = NULL; + char *hdr_msg_types = nullptr; + char *hdr_station_ids = nullptr; + char **hdr_vld_block2 = nullptr; + char **hdr_msg_types2 = nullptr; + char **hdr_station_ids2 = nullptr; vector v_qc_data; vector v_obs_data; @@ -791,7 +791,7 @@ void process_ioda_file(int i_pb) { } if(has_msg_type) { - if (NULL != hdr_msg_types2) { + if (nullptr != hdr_msg_types2) { m_strncpy(hdr_typ, hdr_msg_types2[i_read], nstring, method_name_s, "hdr_typ2"); } else { @@ -823,7 +823,7 @@ void process_ioda_file(int i_pb) { if(has_station_id) { char tmp_sid[nstring+1]; - if (NULL != hdr_station_ids2) { + if (nullptr != hdr_station_ids2) { m_strncpy(tmp_sid, hdr_station_ids2[i_read], nstring, method_name_s, "tmp_sid2"); } else { @@ -1012,15 +1012,15 @@ void process_ioda_file(int i_pb) { delete [] obs_hght_arr; if (hdr_msg_types) delete [] hdr_msg_types; if (hdr_station_ids) delete [] hdr_station_ids; - if (NULL != hdr_msg_types2) { + if (nullptr != hdr_msg_types2) { for (int i=0; i BUFFER_SIZE) ? BUFFER_SIZE: (my_rec_end - rec_beg); buf_size = (my_rec_end - rec_beg); // read all int levels[buf_size]; @@ -3308,8 +3313,6 @@ void process_madis_acarsProfiles(NcFile *&f_in) { for(i_hdr=0; i_hdr BUFFER_SIZE) ? BUFFER_SIZE: (my_rec_end - i_hdr_s); @@ -3351,34 +3355,34 @@ void process_madis_acarsProfiles(NcFile *&f_in) { cur[0] = i_hdr_s; dim[0] = buf_size; dim[1] = maxLevels; + data_cnt = buf_size * maxLevels; + get_nc_data(&in_hdr_vld_var, tmp_dbl_arr, dim, cur); get_nc_data(&in_hdr_lat_var, (float *)hdr_lat_arr, dim, cur); get_nc_data(&in_hdr_lon_var, (float *)hdr_lon_arr, dim, cur); - get_filtered_nc_data_2d(in_hdr_elv_var, (float *)hdr_elv_arr, dim, cur, "elevation"); + get_filtered_nc_data_2d(in_hdr_elv_var, (float *)hdr_elv_arr, dim, cur, "elevation", data_cnt); + if (IS_VALID_NC(in_temperatureQty_var)) get_nc_data(&in_temperatureQty_var, (char *)&temperatureQty_arr, dim, cur); - else memset(temperatureQty_arr, 0, buf_size*dim[1]*sizeof(char)); + else memset(temperatureQty_arr, 0, data_cnt*sizeof(char)); if (IS_VALID_NC(in_dewpointQty_var)) get_nc_data(&in_dewpointQty_var, (char *)&dewpointQty_arr, dim, cur); - else memset(dewpointQty_arr, 0, buf_size*dim[1]*sizeof(char)); + else memset(dewpointQty_arr, 0, data_cnt*sizeof(char)); if (IS_VALID_NC(in_windDirQty_var)) get_nc_data(&in_windDirQty_var, (char *)&windDirQty_arr, dim, cur); - else memset(windDirQty_arr, 0, buf_size*dim[1]*sizeof(char)); + else memset(windDirQty_arr, 0, data_cnt*sizeof(char)); if (IS_VALID_NC(in_windSpeedQty_var)) get_nc_data(&in_windSpeedQty_var, (char *)&windSpeedQty_arr, dim, cur); - else memset(windSpeedQty_arr, 0, buf_size*dim[1]*sizeof(char)); + else memset(windSpeedQty_arr, 0, data_cnt*sizeof(char)); if (IS_VALID_NC(in_altitudeQty_var)) get_nc_data(&in_altitudeQty_var, (char *)&altitudeQty_arr, dim, cur); - else memset(altitudeQty_arr, 0, buf_size*dim[1]*sizeof(char)); + else memset(altitudeQty_arr, 0, data_cnt*sizeof(char)); - get_filtered_nc_data_2d(in_hdr_tob_var, (int *)&obsTimeOfDay_arr, dim, cur, "obsTimeOfDay"); - get_filtered_nc_data_2d(in_temperature_var, (float *)&temperature_arr, dim, cur, "temperature"); - get_filtered_nc_data_2d(in_dewpoint_var, (float *)&dewpoint_arr, dim, cur, "dewpoint"); - get_filtered_nc_data_2d(in_windDir_var, (float *)&windDir_arr, dim, cur, "windDir"); - get_filtered_nc_data_2d(in_windSpeed_var, (float *)&windSpeed_arr, dim, cur, "windSpeed"); + get_filtered_nc_data_2d(in_hdr_tob_var, (int *)&obsTimeOfDay_arr, dim, cur, "obsTimeOfDay", data_cnt); + get_filtered_nc_data_2d(in_temperature_var, (float *)&temperature_arr, dim, cur, "temperature", data_cnt); + get_filtered_nc_data_2d(in_dewpoint_var, (float *)&dewpoint_arr, dim, cur, "dewpoint", data_cnt); + get_filtered_nc_data_2d(in_windDir_var, (float *)&windDir_arr, dim, cur, "windDir", data_cnt); + get_filtered_nc_data_2d(in_windSpeed_var, (float *)&windSpeed_arr, dim, cur, "windSpeed", data_cnt); dim[1] = hdr_sid_len; get_nc_data(&in_hdr_sid_var, (char *)hdr_sid_arr, dim, cur); - dim[0] = 1; - dim[1] = 1; - // // Process the header type. // For ACARS, store as AIRCFT. @@ -3390,12 +3394,6 @@ void process_madis_acarsProfiles(NcFile *&f_in) { i_hdr = i_hdr_s + i_idx; mlog << Debug(3) << "Record Number: " << i_hdr << "\n"; - // - // Use cur to index into the NetCDF variables. - // - cur[0] = i_hdr; - cur[1] = 0; - // // Process the station i.e. airport name. // @@ -3544,8 +3542,8 @@ void process_madis_acarsProfiles(NcFile *&f_in) { // // Cleanup // - if(cur) { delete [] cur; cur = (long *) 0; } - if(dim) { delete [] dim; dim = (long *) 0; } + cur.clear(); + dim.clear(); return; } diff --git a/src/tools/other/mode_graphics/cgraph_main.cc b/src/tools/other/mode_graphics/cgraph_main.cc index 06580cf12b..83908c78f2 100644 --- a/src/tools/other/mode_graphics/cgraph_main.cc +++ b/src/tools/other/mode_graphics/cgraph_main.cc @@ -143,10 +143,6 @@ mlog << Error << "\n\n CgraphBase::CgraphBase(const CgraphBase &) -> should nev exit ( 1 ); - // cgraph_init_from_scratch(); - // - // assign(c); - } @@ -161,13 +157,6 @@ mlog << Error << "\n\n operator=CgraphBase(const CgraphBase &) -> should never exit ( 1 ); - -// if ( this == &c ) return ( * this ); -// -// assign(c); - -return ( * this ); - } @@ -315,8 +304,6 @@ if ( empty (filename) ) { exit ( 1 ); - // return ( false ); - } Filename = filename; @@ -810,7 +797,7 @@ FT_UInt previous; FT_Glyph_Metrics * metrics = (FT_Glyph_Metrics *) 0; FT_Vector k_delta; const bool use_kerning = DoKerning && FT_HAS_KERNING(face); -const char * new_string = (const char *) NULL; +const char * new_string = (const char *) nullptr; bool first_char = false; bool last_char = false; double x_bearing, y_bearing, advance; @@ -1662,10 +1649,6 @@ mlog << Error << "\n\n Cgraph::Cgraph(const Cgraph &) -> should never be called exit ( 1 ); - // cgraph2_init_from_scratch(); - // - // assign(c); - } @@ -1680,13 +1663,6 @@ mlog << Error << "\n\n operator=Cgraph(const Cgraph &) -> should never be calle exit ( 1 ); - -// if ( this == &c ) return ( * this ); -// -// assign(c); - -return ( * this ); - } @@ -2115,7 +2091,7 @@ if ( empty(in) ) { int j, k; char c0, c1; const int N = m_strlen(in); -char * s = (char *) NULL; +char * s = (char *) nullptr; FT_UInt fi_glyph_index = 0; FT_UInt fl_glyph_index = 0; @@ -2161,7 +2137,7 @@ while ( j < N ) { // done // -out = s; s = (char *) NULL; +out = s; s = (char *) nullptr; return; @@ -2227,8 +2203,6 @@ mlog << Error << "\n\n my_conic() -> should never be called!\n\n"; exit ( 1 ); -return ( 0 ); - } diff --git a/src/tools/other/mode_graphics/mode_nc_output_file.cc b/src/tools/other/mode_graphics/mode_nc_output_file.cc index c3be3fcbd3..fd20806ca0 100644 --- a/src/tools/other/mode_graphics/mode_nc_output_file.cc +++ b/src/tools/other/mode_graphics/mode_nc_output_file.cc @@ -449,8 +449,14 @@ int x, y, n, k; n = 0; int v[Ny][Nx]; -long offsets[2] = { 0, 0}; // NOT (x, y)! -long lengths[2] = {Ny, Nx}; +LongArray offsets; // { 0, 0}; +LongArray lengths; // {Ny, Nx}; // NOT (x, y)! + +offsets.add(0); +offsets.add(0); +lengths.add(Ny); +lengths.add(Nx); + if (get_nc_data(var, (int *)&v, lengths, offsets)) { for (x=0; x= Nx) || (y < 0) || (y >= Ny) ) { int i[2]; int status; -long offsets[2] = {y, x}; // NOT (x, y)! -long lengths[2] = {1,1}; +LongArray offsets; // {y, x}; // NOT (x, y)! +LongArray lengths; // {1,1}; + +offsets.add(y); +offsets.add(x); +lengths.add(1); +lengths.add(1); //status = var->set_cur(y, x); // @@ -537,8 +548,13 @@ if ( (x < 0) || (x >= Nx) || (y < 0) || (y >= Ny) ) { float ff[2]; int status; -long offsets[2] = {y, x}; // NOT (x, y)! -long lengths[2] = {1,1}; +LongArray offsets; // {y, x}; // NOT (x, y)! +LongArray lengths; // {1,1}; + +offsets.add(y); +offsets.add(x); +lengths.add(1); +lengths.add(1); //status = var->set_cur(y, x); // NOT (x, y)! // diff --git a/src/tools/other/mode_time_domain/3d_conv.cc b/src/tools/other/mode_time_domain/3d_conv.cc index 78c54968f0..0dd0172f22 100644 --- a/src/tools/other/mode_time_domain/3d_conv.cc +++ b/src/tools/other/mode_time_domain/3d_conv.cc @@ -529,6 +529,7 @@ void get_data_plane(const MtdFloatFile & mtd, const int t, double * data_plane, { int x, y; +int offset = 0; double * d = data_plane; bool * ok = ok_plane; double value; @@ -547,9 +548,13 @@ for (y=0; y 0 ) { } -delete [] W; W = 0; +delete [] W; W = nullptr; -delete [] F; F = 0; +delete [] F; F = nullptr; -delete [] A; A = 0; +delete [] A; A = nullptr; W = ww; @@ -222,11 +222,11 @@ F = ff; A = aa; -ww = 0; +ww = nullptr; -ff = 0; +ff = nullptr; -aa = 0; +aa = nullptr; // // done @@ -256,6 +256,7 @@ if ( _weight < 0.0 ) { } +if (Nelements < 0) Nelements = 0; // SonarQube findings extend(Nelements + 1); diff --git a/src/tools/other/mode_time_domain/mtd_file_float.cc b/src/tools/other/mode_time_domain/mtd_file_float.cc index 794c4a3311..f0c70f509f 100644 --- a/src/tools/other/mode_time_domain/mtd_file_float.cc +++ b/src/tools/other/mode_time_domain/mtd_file_float.cc @@ -669,8 +669,15 @@ var = get_nc_var(&f, data_field_name); // //} -long offsets[3] = {0,0,0}; -long lengths[3] = {Nt, Ny, Nx}; +LongArray offsets; // {0,0,0}; +LongArray lengths; // {Nt, Ny, Nx}; + +offsets.add(0); +offsets.add(0); +offsets.add(0); +lengths.add(Nt); +lengths.add(Ny); +lengths.add(Nx); //if ( ! get_nc_data(&var, Data, (long *){Nt, Ny, Nx}, (long *){0,0,0}) ) { if ( ! get_nc_data(&var, Data, lengths, offsets) ) { @@ -775,8 +782,15 @@ data_var = get_nc_var(&f, data_field_name); // //} -long offsets[3] = {0,0,0}; -long lengths[3] = {Nt, Ny, Nx}; +LongArray offsets; // {0,0,0}; +LongArray lengths; // {Nt, Ny, Nx}; + +offsets.add(0); +offsets.add(0); +offsets.add(0); +lengths.add(Nt); +lengths.add(Ny); +lengths.add(Nx); //if ( ! get_nc_data(&data_var, Data, (long *){Nt, Ny, Nx}, (long *){0,0,0}) ) { if ( ! get_nc_data(&data_var, Data, lengths, offsets) ) { diff --git a/src/tools/other/mode_time_domain/mtd_file_int.cc b/src/tools/other/mode_time_domain/mtd_file_int.cc index 339b583ce6..06dd462006 100644 --- a/src/tools/other/mode_time_domain/mtd_file_int.cc +++ b/src/tools/other/mode_time_domain/mtd_file_int.cc @@ -461,8 +461,15 @@ var = get_nc_var(&f, data_field_name); // //} -long offsets[3] = {0,0,0}; -long lengths[3] = {Nt, Ny, Nx}; +LongArray offsets; // {0,0,0}; +LongArray lengths; // {Nt, Ny, Nx}; + +offsets.add(0); +offsets.add(0); +offsets.add(0); +lengths.add(Nt); +lengths.add(Ny); +lengths.add(Nx); //if ( ! get_nc_data(&var, Data, (long *){Nt, Ny, Nx}, (long *){0,0,0}) ) { if ( ! get_nc_data(&var, Data, lengths, offsets) ) { @@ -566,8 +573,15 @@ data_var = add_var(&f, data_field_name, ncInt, nt_dim, ny_dim, nx_dim); //data_var = get_nc_var(&f, data_field_name); -long offsets[3] = {0,0,0}; -long lengths[3] = {Nt, Ny, Nx}; +LongArray offsets; // {0,0,0}; +LongArray lengths; // {Nt, Ny, Nx}; + +offsets.add(0); +offsets.add(0); +offsets.add(0); +lengths.add(Nt); +lengths.add(Ny); +lengths.add(Nx); if ( ! put_nc_data(&data_var, Data, lengths, offsets) ) { @@ -818,7 +832,7 @@ void MtdIntFile::fatten() int x, y, n; const int nxy = Nx*Ny; int * u = new int [nxy]; -int * a = 0; +int * a = nullptr; a = u; @@ -885,7 +899,9 @@ for (y = 0; y<(Ny - 2); ++y) { for (x=0; x<(Nx - 2); ++x) { - if ( *a ) { + if (n >= (nxy-1)) break; // For SonarQube findings + + if (n >=0 && *a ) { Data[n + 1] = 1; // (x + 1, y) Data[n + Nx] = 1; // (x, y + 1) @@ -1705,11 +1721,14 @@ s.set_to_zeroes(); int * in = Data; int * out = s.Data; +int out_size = s.nxyt(); v = 0; for (j=0; j= out_size) break; // For SonarQube findings + if ( yesno[*in] ) { *out = 1; ++v; } ++in; diff --git a/src/tools/other/mode_time_domain/mtd_partition.cc b/src/tools/other/mode_time_domain/mtd_partition.cc index 5cf123619e..3cbc81c076 100644 --- a/src/tools/other/mode_time_domain/mtd_partition.cc +++ b/src/tools/other/mode_time_domain/mtd_partition.cc @@ -230,6 +230,8 @@ void EquivalenceClass::add_no_repeat(int k) if ( has(k) ) return; +if (Nelements < 0) Nelements = 0; + extend(Nelements + 1); E[Nelements++] = k; @@ -467,7 +469,7 @@ void Mtd_Partition::extend(int n) if ( n <= Nalloc ) return; -EquivalenceClass ** u = (EquivalenceClass **) 0; +EquivalenceClass ** u = (EquivalenceClass **) nullptr; n = mtd_partition_alloc_inc*((n + mtd_partition_alloc_inc - 1)/mtd_partition_alloc_inc); @@ -479,7 +481,7 @@ if ( C ) { memcpy(u, C, Nelements*(sizeof(EquivalenceClass *))); - delete [] C; C = (EquivalenceClass **) 0; + delete [] C; C = (EquivalenceClass **) nullptr; } @@ -667,8 +669,6 @@ if ( (nclass_1 < 0) || (nclass_2 < 0) ) { exit ( 1 ); - return; - } merge_classes(nclass_1, nclass_2); @@ -687,6 +687,8 @@ void Mtd_Partition::add_no_repeat(int k) if ( has(k) ) return; +if (Nelements < 0) Nelements = 0; + extend(Nelements + 1); C[Nelements] = new EquivalenceClass; diff --git a/src/tools/other/modis_regrid/cloudsat_swath_file.cc b/src/tools/other/modis_regrid/cloudsat_swath_file.cc index c223944e2f..7220857db8 100644 --- a/src/tools/other/modis_regrid/cloudsat_swath_file.cc +++ b/src/tools/other/modis_regrid/cloudsat_swath_file.cc @@ -1037,12 +1037,6 @@ mlog << Error exit ( 1 ); - // - // done - // - -return; - } @@ -1295,7 +1289,7 @@ StringArray a; Nattributes = 0; -if ( (retval = SWinqattrs(SwathId, NULL, &att_buf_size)) < 0 ) { +if ( (retval = SWinqattrs(SwathId, nullptr, &att_buf_size)) < 0 ) { mlog << Error << "\n\n CloudsatSwath::get_attributes() -> can't get attribute buffer size\n\n"; @@ -1998,8 +1992,6 @@ mlog << Error exit ( 1 ); -return ( * this ); - } diff --git a/src/tools/other/modis_regrid/modis_file.cc b/src/tools/other/modis_regrid/modis_file.cc index 6299314084..fca32b27a0 100644 --- a/src/tools/other/modis_regrid/modis_file.cc +++ b/src/tools/other/modis_regrid/modis_file.cc @@ -113,8 +113,6 @@ mlog << Error exit ( 1 ); -return ( * this ); - } diff --git a/src/tools/other/modis_regrid/modis_regrid.cc b/src/tools/other/modis_regrid/modis_regrid.cc index 65652807bf..7107b2f604 100644 --- a/src/tools/other/modis_regrid/modis_regrid.cc +++ b/src/tools/other/modis_regrid/modis_regrid.cc @@ -256,8 +256,6 @@ cout << "\n" exit ( 1 ); -return; - } diff --git a/src/tools/other/pb2nc/pb2nc.cc b/src/tools/other/pb2nc/pb2nc.cc index 444b828e29..ddab9a5e8c 100644 --- a/src/tools/other/pb2nc/pb2nc.cc +++ b/src/tools/other/pb2nc/pb2nc.cc @@ -649,7 +649,7 @@ void get_variable_info(const char* tbl_filename) { static const char *method_name = " get_variable_info()"; FILE * fp; - char * line = NULL; + char * line = nullptr; size_t len = 1024; ssize_t read; @@ -663,14 +663,14 @@ void get_variable_info(const char* tbl_filename) { fp = fopen(tbl_filename, "r"); ConcatString input_data; - if (fp != NULL) { + if (fp != nullptr) { char var_name[BUFR_NAME_LEN+1]; char var_desc[max(BUFR_DESCRIPTION_LEN,BUFR_SEQUENCE_LEN)+1]; char var_unit_str[BUFR_UNIT_LEN+1]; bool find_mnemonic = false; line = (char *)malloc(len * sizeof(char)); - if( line == NULL) { + if( line == nullptr) { mlog << Error << "\n" << method_name << " -> " << "Unable to allocate buffer\n\n"; exit(1); @@ -678,8 +678,8 @@ void get_variable_info(const char* tbl_filename) { // Processing section 1 int var_count1 = 0; while ((read = getline(&line, &len, fp)) != -1) { - if (NULL != strstr(line,"--------")) continue; - if (NULL != strstr(line,"MNEMONIC")) { + if (nullptr != strstr(line,"--------")) continue; + if (nullptr != strstr(line,"MNEMONIC")) { if (find_mnemonic) break; find_mnemonic = true; continue; @@ -713,8 +713,8 @@ void get_variable_info(const char* tbl_filename) { // Skip section 2 while ((read = getline(&line, &len, fp)) != -1) { - if (NULL != strstr(line,"MNEMONIC")) break; - if (NULL == strstr(line,"EVENT")) continue; + if (nullptr != strstr(line,"MNEMONIC")) break; + if (nullptr == strstr(line,"EVENT")) continue; m_strncpy(var_name, (line+BUFR_NAME_START), BUFR_NAME_LEN, method_name, "var_name2", true); @@ -723,7 +723,7 @@ void get_variable_info(const char* tbl_filename) { if (' ' != var_name[idx] ) break; var_name[idx] = '\0'; } - //if (NULL == strstr(var_name,"EVENT")) continue; + //if (nullptr == strstr(var_name,"EVENT")) continue; m_strncpy(var_desc, (line+BUFR_SEQUENCE_START), BUFR_SEQUENCE_LEN, method_name, "var_desc2", true); @@ -754,7 +754,7 @@ void get_variable_info(const char* tbl_filename) { var_name[idx] = '\0'; } - if (NULL != strstr(line,"CCITT IA5")) { + if (nullptr != strstr(line,"CCITT IA5")) { ascii_vars.add(var_name); m_strncpy(var_unit_str, "CCITT IA5", sizeof(var_unit_str), method_name, "var_unit_str1", true); @@ -882,7 +882,7 @@ void process_pbfile(int i_pb) { // Build the temporary block file name blk_prefix << conf_info.tmp_dir << "/" << "tmp_pb2nc_blk"; - blk_file = make_temp_file_name(blk_prefix.c_str(), NULL); + blk_file = make_temp_file_name(blk_prefix.c_str(), nullptr); mlog << Debug(1) << "Blocking Bufr file to:\t" << blk_file << "\n"; @@ -2113,7 +2113,7 @@ void process_pbfile_metadata(int i_pb) { blk_prefix << conf_info.tmp_dir << "/" << "tmp_pb2nc_meta_blk"; blk_prefix2 << conf_info.tmp_dir << "/" << "tmp_pb2nc_tbl_blk"; - blk_file = make_temp_file_name(blk_prefix2.c_str(), NULL); + blk_file = make_temp_file_name(blk_prefix2.c_str(), nullptr); mlog << Debug(3) << " Blocking Bufr file (metadata) to:\t" << blk_file << "\n"; @@ -2129,7 +2129,7 @@ void process_pbfile_metadata(int i_pb) { // Assume that the input PrepBufr file is unblocked. // Block the PrepBufr file and open it for reading. unit = dump_unit + i_pb; - blk_file = make_temp_file_name(blk_prefix.c_str(), NULL); + blk_file = make_temp_file_name(blk_prefix.c_str(), nullptr); pblock(file_name.c_str(), blk_file.c_str(), block); if (unit > MAX_FORTRAN_FILE_ID || unit < MIN_FORTRAN_FILE_ID) { mlog << Error << "\n" << method_name << " -> " diff --git a/src/tools/other/point2grid/point2grid.cc b/src/tools/other/point2grid/point2grid.cc index e32604d147..3a7b69affd 100644 --- a/src/tools/other/point2grid/point2grid.cc +++ b/src/tools/other/point2grid/point2grid.cc @@ -2203,10 +2203,10 @@ void get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, to_dp.set_size(to_lon_count, to_lat_count); if (data_size > 0) { - float *latitudes = (float *)NULL; - float *longitudes = (float *)NULL; - float *latitudes_buf = (float *)NULL; - float *longitudes_buf = (float *)NULL; + float *latitudes = (float *)nullptr; + float *longitudes = (float *)nullptr; + float *latitudes_buf = (float *)nullptr; + float *longitudes_buf = (float *)nullptr; int buff_size = data_size*sizeof(float); GoesImagerData grid_data; grid_data.reset(); diff --git a/src/tools/other/wwmca_tool/wwmca_plot.cc b/src/tools/other/wwmca_tool/wwmca_plot.cc index 039228e407..6ff6c001ab 100644 --- a/src/tools/other/wwmca_tool/wwmca_plot.cc +++ b/src/tools/other/wwmca_tool/wwmca_plot.cc @@ -201,8 +201,6 @@ cout << "\nUsage: " << program_name << "\n" exit ( 1 ); -return; - } diff --git a/src/tools/other/wwmca_tool/wwmca_regrid.cc b/src/tools/other/wwmca_tool/wwmca_regrid.cc index b410e714f0..627d02eb04 100644 --- a/src/tools/other/wwmca_tool/wwmca_regrid.cc +++ b/src/tools/other/wwmca_tool/wwmca_regrid.cc @@ -212,8 +212,6 @@ cout << "\nUsage: " << program_name << "\n" exit ( 1 ); -return; - } diff --git a/src/tools/tc_utils/rmw_analysis/rmw_analysis.cc b/src/tools/tc_utils/rmw_analysis/rmw_analysis.cc index 1a5f9ea899..b3206ce23f 100644 --- a/src/tools/tc_utils/rmw_analysis/rmw_analysis.cc +++ b/src/tools/tc_utils/rmw_analysis/rmw_analysis.cc @@ -198,8 +198,8 @@ void setup() { } mlog << Debug(2) - << "(n_range, n_azimuth, n_level) = (" - << n_range << ", " << n_azimuth << ", " << n_level << ")\n"; + << "(n_level, n_range, n_azimuth) = (" + << n_level << ", " << n_range << ", " << n_azimuth << ")\n"; // Get dimension coordinates vector start; @@ -255,11 +255,11 @@ void setup() { DataCube* data_max_2d = new DataCube(); DataCube* data_min_2d = new DataCube(); - data_count_2d->set_size(n_range, n_azimuth, 1); - data_mean_2d->set_size(n_range, n_azimuth, 1); - data_stdev_2d->set_size(n_range, n_azimuth, 1); - data_max_2d->set_size(n_range, n_azimuth, 1); - data_min_2d->set_size(n_range, n_azimuth, 1); + data_count_2d->set_size(1, n_range, n_azimuth); + data_mean_2d->set_size(1, n_range, n_azimuth); + data_stdev_2d->set_size(1, n_range, n_azimuth); + data_max_2d->set_size(1, n_range, n_azimuth); + data_min_2d->set_size(1, n_range, n_azimuth); data_count_2d->set_constant(0); data_mean_2d->set_constant(0); @@ -281,11 +281,11 @@ void setup() { DataCube* data_max_3d = new DataCube(); DataCube* data_min_3d = new DataCube(); - data_count_3d->set_size(n_range, n_azimuth, n_level); - data_mean_3d->set_size(n_range, n_azimuth, n_level); - data_stdev_3d->set_size(n_range, n_azimuth, n_level); - data_max_3d->set_size(n_range, n_azimuth, n_level); - data_min_3d->set_size(n_range, n_azimuth, n_level); + data_count_3d->set_size(n_level, n_range, n_azimuth); + data_mean_3d->set_size(n_level, n_range, n_azimuth); + data_stdev_3d->set_size(n_level, n_range, n_azimuth); + data_max_3d->set_size(n_level, n_range, n_azimuth); + data_min_3d->set_size(n_level, n_range, n_azimuth); data_count_3d->set_constant(0); data_mean_3d->set_constant(0); @@ -312,10 +312,10 @@ void process_files() { DataCube data_3d; DataCube data_3d_sq; - data_2d.set_size(n_range, n_azimuth, 1); - data_2d_sq.set_size(n_range, n_azimuth, 1); - data_3d.set_size(n_range, n_azimuth, n_level); - data_3d_sq.set_size(n_range, n_azimuth, n_level); + data_2d.set_size(1, n_range, n_azimuth); + data_2d_sq.set_size(1, n_range, n_azimuth); + data_3d.set_size(n_level, n_range, n_azimuth); + data_3d_sq.set_size(n_level, n_range, n_azimuth); // Set up array track point slices vector start_2d; @@ -325,17 +325,17 @@ void process_files() { start_2d.push_back(0); start_2d.push_back(0); start_2d.push_back(0); + count_2d.push_back(1); count_2d.push_back(n_range); count_2d.push_back(n_azimuth); - count_2d.push_back(1); start_3d.push_back(0); start_3d.push_back(0); start_3d.push_back(0); start_3d.push_back(0); + count_3d.push_back(1); + count_3d.push_back(n_level); count_3d.push_back(n_range); count_3d.push_back(n_azimuth); - count_3d.push_back(n_level); - count_3d.push_back(1); for(int i_file = 0; i_file < data_files.n_elements(); i_file++) { mlog << Debug(1) << "Processing " @@ -362,7 +362,7 @@ void process_files() { for(int i_track = 0; i_track < n_track_point; i_track++) { if (data_n_dims[i_var] == 2) { - start_2d[2] = (size_t) i_track; + start_2d[0] = (size_t) i_track; mlog << Debug(4) << data_names[i_var] << i_track << "\n"; var.getVar(start_2d, count_2d, data_2d.data()); @@ -387,7 +387,7 @@ void process_files() { } if (data_n_dims[i_var] == 3) { mlog << Debug(4) << data_names[i_var] << i_track << "\n"; - start_3d[3] = (size_t) i_track; + start_3d[0] = (size_t) i_track; var.getVar(start_3d, count_3d, data_3d.data()); // Update partial sums @@ -442,22 +442,22 @@ void write_stats() { dims_2d.push_back(azimuth_dim); vector dims_3d; + dims_3d.push_back(level_dim); dims_3d.push_back(range_dim); dims_3d.push_back(azimuth_dim); - dims_3d.push_back(level_dim); + NcVar level_var = nc_out->addVar(level_name, ncDouble, level_dim); NcVar range_var = nc_out->addVar("range", ncDouble, range_dim); NcVar azimuth_var = nc_out->addVar("azimuth", ncDouble, azimuth_dim); - NcVar level_var = nc_out->addVar(level_name, ncDouble, level_dim); vector offset; vector count_range; vector count_azimuth; vector count_level; offset.push_back(0); + count_level.push_back(n_level); count_range.push_back(n_range); count_azimuth.push_back(n_azimuth); - count_level.push_back(n_level); for (int r = 0; r < n_range; r++) { range_coord[r] = r; @@ -485,9 +485,9 @@ void write_stats() { offset_3d.push_back(0); offset_3d.push_back(0); offset_3d.push_back(0); + count_3d.push_back(n_level); count_3d.push_back(n_range); count_3d.push_back(n_azimuth); - count_3d.push_back(n_level); for(int i_var = 0; i_var < data_names.size(); i_var++) { if (data_n_dims[i_var] == 2) { diff --git a/src/tools/tc_utils/tc_gen/tc_gen.cc b/src/tools/tc_utils/tc_gen/tc_gen.cc index 9d96664a13..a1d6e0a4a9 100644 --- a/src/tools/tc_utils/tc_gen/tc_gen.cc +++ b/src/tools/tc_utils/tc_gen/tc_gen.cc @@ -523,7 +523,7 @@ void score_genesis_shape(const GenesisInfoArray &best_ga) { // Get the list of input shapefiles for(i=0; imagic_str() << "\n"; - string fname = data_info->name_attr(); + string fname = data_info->name_attr(); variable_levels[fname].push_back(data_info->level_attr()); variable_long_names[fname] = data_info->long_name_attr(); variable_units[fname] = data_info->units_attr(); - wind_converter.update_input(fname, data_info->units_attr()); + wind_converter.update_input(fname, data_info->units_attr()); } // Define pressure levels pressure_level_strings = get_pressure_level_strings(variable_levels); pressure_levels = get_pressure_levels(pressure_level_strings); - pressure_level_indices - = get_pressure_level_indices(pressure_level_strings, pressure_levels); + pressure_level_indices = + get_pressure_level_indices(pressure_level_strings, pressure_levels); pressure_dim = add_dim(nc_out, "pressure", pressure_levels.size()); def_tc_pressure(nc_out, pressure_dim, pressure_levels); @@ -611,7 +613,7 @@ void setup_nc_file() { def_tc_variables(nc_out, variable_levels, variable_long_names, variable_units, - range_dim, azimuth_dim, pressure_dim, track_point_dim, + track_point_dim, pressure_dim, range_dim, azimuth_dim, data_3d_vars); } @@ -628,7 +630,7 @@ void compute_lat_lon(TcrmwGrid& tcrmw_grid, tcrmw_grid.range_azi_to_latlon( ir * tcrmw_grid.range_delta_km(), ia * tcrmw_grid.azimuth_delta_deg(), - lat, lon); + lat, lon); lat_arr[i] = lat; lon_arr[i] = - lon; } @@ -638,7 +640,6 @@ void compute_lat_lon(TcrmwGrid& tcrmw_grid, //////////////////////////////////////////////////////////////////////// void process_fields(const TrackInfoArray& tracks) { - VarInfo *data_info = (VarInfo *) 0; DataPlane data_dp; @@ -695,14 +696,13 @@ void process_fields(const TrackInfoArray& tracks) { // Update the variable info with the valid time of the track point data_info = conf_info.data_info[i_var]; - string sname = data_info->name_attr().string(); - string slevel = data_info->level_attr().string(); + string sname = data_info->name_attr().string(); + string slevel = data_info->level_attr().string(); - data_info->set_valid(valid_time); + data_info->set_valid(valid_time); // Find data for this track point - get_series_entry(i_point, data_info, data_files, ftype, data_dp, - latlon_arr); + get_series_entry(i_point, data_info, data_files, ftype, data_dp, latlon_arr); // Check data range double data_min, data_max; @@ -711,32 +711,31 @@ void process_fields(const TrackInfoArray& tracks) { mlog << Debug(4) << "data_max:" << data_max << "\n"; // Regrid data - data_dp = met_regrid(data_dp, - latlon_arr, grid, data_info->regrid()); - data_dp.data_range(data_min, data_max); + data_dp = met_regrid(data_dp, latlon_arr, grid, data_info->regrid()); + data_dp.data_range(data_min, data_max); mlog << Debug(4) << "data_min:" << data_min << "\n"; mlog << Debug(4) << "data_max:" << data_max << "\n"; - // if this is "U", setup everything for matching "V" and compute the radial/tangential - if (wind_converter.compute_winds_if_input_is_u(i_point, sname, slevel, valid_time, data_files, ftype, - latlon_arr, lat_arr, lon_arr, grid, data_dp, tcrmw_grid)) - { - write_tc_pressure_level_data(nc_out, tcrmw_grid, - pressure_level_indices, data_info->level_attr(), i_point, - data_3d_vars[conf_info.radial_velocity_field_name.string()], - wind_converter.get_wind_r_arr()); - write_tc_pressure_level_data(nc_out, tcrmw_grid, - pressure_level_indices, data_info->level_attr(), i_point, - data_3d_vars[conf_info.tangential_velocity_field_name.string()], - wind_converter.get_wind_t_arr()); - } + // if this is "U", setup everything for matching "V" and compute the radial/tangential + if(wind_converter.compute_winds_if_input_is_u(i_point, sname, slevel, valid_time, data_files, ftype, + latlon_arr, lat_arr, lon_arr, grid, data_dp, tcrmw_grid)) { + write_tc_pressure_level_data(nc_out, tcrmw_grid, + pressure_level_indices, data_info->level_attr(), i_point, + data_3d_vars[conf_info.radial_velocity_field_name.string()], + wind_converter.get_wind_r_arr()); + write_tc_pressure_level_data(nc_out, tcrmw_grid, + pressure_level_indices, data_info->level_attr(), i_point, + data_3d_vars[conf_info.tangential_velocity_field_name.string()], + wind_converter.get_wind_t_arr()); + } // Write data - if (variable_levels[data_info->name_attr()].size() > 1) { + if(variable_levels[data_info->name_attr()].size() > 1) { write_tc_pressure_level_data(nc_out, tcrmw_grid, pressure_level_indices, data_info->level_attr(), i_point, data_3d_vars[data_info->name_attr()], data_dp.data()); - } else { + } + else { write_tc_data_rev(nc_out, tcrmw_grid, i_point, data_3d_vars[data_info->name_attr()], data_dp.data()); } diff --git a/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.cc b/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.cc index 9cd48fdf93..0755bcdeb5 100644 --- a/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.cc +++ b/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.cc @@ -36,25 +36,25 @@ static void wind_ne_to_ra(const TcrmwGrid&, void TCRMW_WindConverter::_free_winds_arrays(void) { - if (_windR != NULL) { + if (_windR != nullptr) { delete [] _windR; - _windR = NULL; + _windR = nullptr; } - if (_windT != NULL) { + if (_windT != nullptr) { delete [] _windT; - _windT = NULL; + _windT = nullptr; } } //////////////////////////////////////////////////////////////////////// TCRMW_WindConverter::TCRMW_WindConverter(void) : - _windR(NULL), - _windT(NULL), + _windR(nullptr), + _windT(nullptr), _foundUInInput(false), _foundVInInput(false), _unitsU("Unknown"), - _conf(NULL), + _conf(nullptr), _computeWinds(false) { } diff --git a/src/tools/tc_utils/tc_stat/tc_stat.cc b/src/tools/tc_utils/tc_stat/tc_stat.cc index 0d9464c9cf..16fa40ca1b 100644 --- a/src/tools/tc_utils/tc_stat/tc_stat.cc +++ b/src/tools/tc_utils/tc_stat/tc_stat.cc @@ -148,7 +148,7 @@ void process_command_line(int argc, char **argv) { void process_search_dirs() { // Retrieve the file lists - tcst_files = get_filenames(tcst_source, NULL, tc_stat_file_ext); + tcst_files = get_filenames(tcst_source, nullptr, tc_stat_file_ext); // Check for matching files if(tcst_files.n_elements() == 0) { diff --git a/src/tools/tc_utils/tc_stat/tc_stat_job.cc b/src/tools/tc_utils/tc_stat/tc_stat_job.cc index 62283ee98d..917df5b7cf 100644 --- a/src/tools/tc_utils/tc_stat/tc_stat_job.cc +++ b/src/tools/tc_utils/tc_stat/tc_stat_job.cc @@ -237,6 +237,7 @@ void TCStatJob::clear() { StatFile.clear(); close_stat_file(); StatOut = (ofstream *) 0; + stat_row = 0; // Set to default values WaterOnly = default_water_only; @@ -728,7 +729,7 @@ bool TCStatJob::is_keeper_line(const TCStatLine &line, else if(Desc.n() > 0 && !Desc.has(line.desc())) { keep = false; n.RejDesc++; } else if(StormId.n() > 0 && - !has_storm_id(StormId, (string)line.basin(), (string)line.cyclone(), line.init())) + !has_storm_id(StormId, (string)line.basin(), (string)line.cyclone(), line.init())) { keep = false; n.RejStormId++; } else if(Basin.n() > 0 && !Basin.has(line.basin())) { keep = false; n.RejBasin++; } @@ -1169,6 +1170,8 @@ void TCStatJob::open_stat_file() { close_stat_file(); + stat_row = 0; + if(StatFile.empty()) return; StatOut = new ofstream; @@ -1431,7 +1434,7 @@ ConcatString TCStatJob::serialize() const { if(DumpFile.length() > 0) s << "-dump_row " << DumpFile << " "; if(StatFile.length() > 0) - s << "-stat_row " << StatFile << " "; + s << "-out_stat " << StatFile << " "; return(s); } @@ -2591,7 +2594,7 @@ void TCStatJobSummary::compute_fsp(NumArray &total, NumArray &best, // Check if FSP should be computed for this column s = to_upper(Column[i]); - if(strstr(s.c_str(), "-") == NULL && strstr(s.c_str(), "ERR") == NULL) { + if(strstr(s.c_str(), "-") == nullptr && strstr(s.c_str(), "ERR") == nullptr) { mlog << Debug(4) << "Skipping frequency of superior performance for " << "column \"" << Column[i] << "\" since it is not an " @@ -2964,6 +2967,8 @@ void TCStatJobRIRW::clear() { for(i=0; i<4; i++) DumpFileCTC[i].clear(); close_dump_file(); + close_stat_file(); + // Set to default values OutAlpha = default_tc_alpha; OutLineType.clear(); @@ -3163,9 +3168,17 @@ void TCStatJobRIRW::do_job(const StringArray &file_list, close_dump_file(); // Process the RI/RW job output - if(JobOut) do_output(*JobOut); - else do_output(cout); - + if(StatOut) + do_stat_output(*StatOut); + else if(JobOut) + do_output(*JobOut); + else + do_output(cout); + + // Close the stat file + if(StatOut) + close_stat_file(); + return; } @@ -3300,7 +3313,21 @@ void TCStatJobRIRW::process_pair(TrackPairInfo &pair) { << bprv << sep << bcur << sep << bdlt << sep << (is_bad_data(b) ? na_str : bool_to_string(b)) << sep << cat; + + // Track unique header column strings + cur_map[key].AModel.add_uniq(pair.tcmpr_line(i)->amodel()); + cur_map[key].BModel.add_uniq(pair.tcmpr_line(i)->bmodel()); + cur_map[key].Desc.add_uniq(pair.tcmpr_line(i)->desc()); + cur_map[key].Basin.add_uniq(pair.tcmpr_line(i)->basin()); + cur_map[key].InitMask.add_uniq(pair.tcmpr_line(i)->init_mask()); + cur_map[key].ValidMask.add_uniq(pair.tcmpr_line(i)->valid_mask()); + + // Track timing information cur_map[key].Hdr.add(cur); + cur_map[key].Init.add(pair.tcmpr_line(i)->init()); + cur_map[key].Lead.add(pair.tcmpr_line(i)->lead()); + cur_map[key].Valid.add(pair.tcmpr_line(i)->valid()); + } // end for i // Add the current map @@ -3355,7 +3382,17 @@ void TCStatJobRIRW::add_map(map&m) { RIRWMap[it->first].Info.cts.set_fn_on( RIRWMap[it->first].Info.cts.fn_on() + it->second.Info.cts.fn_on()); - RIRWMap[it->first].Hdr.add(it->second.Hdr); + + RIRWMap[it->first].Hdr.add_uniq(it->second.Hdr); + RIRWMap[it->first].AModel.add_uniq(it->second.AModel); + RIRWMap[it->first].BModel.add_uniq(it->second.BModel); + RIRWMap[it->first].Desc.add_uniq(it->second.Desc); + RIRWMap[it->first].Basin.add_uniq(it->second.Basin); + RIRWMap[it->first].InitMask.add_uniq(it->second.InitMask); + RIRWMap[it->first].ValidMask.add_uniq(it->second.ValidMask); + RIRWMap[it->first].Init.add(it->second.Init); + RIRWMap[it->first].Lead.add(it->second.Lead); + RIRWMap[it->first].Valid.add(it->second.Valid); } } // end for it @@ -3459,7 +3496,7 @@ void TCStatJobRIRW::do_cts_output(ostream &out) { StringArray sa; int i, r, c; AsciiTable out_at; - + // Format the output table out_at.set_size((int) RIRWMap.size() + 1, 9 + ByColumn.n() + n_cts_columns); @@ -3634,6 +3671,271 @@ void TCStatJobRIRW::do_mpr_output(ostream &out) { return; } +//////////////////////////////////////////////////////////////////////// + +void TCStatJobRIRW::setup_stat_file(int n_row) { + STATLineType cur_lt, out_lt; + StringArray out_sa; + int i, c, n_col; + + // + // Nothing to do if no output STAT file stream is defined + // + if(!StatOut) return; + + // + // Check for a single output line type + // + out_sa = (OutLineType.n() > 0 ? + OutLineType : LineType); + + out_lt = (out_sa.n() == 1 ? + string_to_statlinetype(out_sa[0].c_str()) : no_stat_line_type); + + // + // Loop through the output line types and determine the number of + // output columns + // + for(i=0, c=0, n_col=0; i " + << "unexpected stat line type \"" << statlinetype_to_string(cur_lt) + << "\"!\n" + << "The line type \"" << statlinetype_to_string(cur_lt) + << "\" is not supported when -out_stat is requested.\n\n"; + exit(1); + } + if(c > n_col) n_col = c; + } + + // + // Add the header columns + // + n_col += n_header_columns; + + // + // Create table from scratch + // + if(stat_row == 0) { + + // + // Multiply the number of rows by the number of + // output line types to avoid resizing later + // + n_row *= max(1, out_sa.n()); + + // + // Setup the STAT table + // + stat_at.set_size(n_row, n_col); + justify_stat_cols(stat_at); + stat_at.set_precision(Precision); + stat_at.set_bad_data_value(bad_data_double); + stat_at.set_bad_data_str(na_str); + stat_at.set_delete_trailing_blank_rows(1); + + // + // Write the STAT header row + // + switch(out_lt) { + case stat_ctc: + write_header_row(ctc_columns, n_ctc_columns, 1, stat_at, 0, 0); + break; + + case stat_cts: + write_header_row(cts_columns, n_cts_columns, 1, stat_at, 0, 0); + break; + + // Write only header columns for unspecified line type + case no_stat_line_type: + write_header_row((const char **) 0, 0, 1, stat_at, 0, 0); + break; + + default: + mlog << Error << "\nSTATAnalysisJob::setup_stat_file() -> " + << "unexpected stat line type \"" << statlinetype_to_string(out_lt) + << "\"!\n\n"; + exit(1); + } + // + // Increment row counter + // + stat_row++; + } + // + // Expand the table, if needed + // + else { + + // + // Determine the required dimensions + // + int need_rows = max(stat_at.nrows(), stat_row + n_row); + int need_cols = max(stat_at.ncols(), n_col); + + if(need_rows > stat_at.nrows() || need_cols > stat_at.ncols()) { + // + // Resize the STAT table + // + stat_at.expand(need_rows, need_cols); + justify_stat_cols(stat_at); + stat_at.set_precision(Precision); + stat_at.set_bad_data_value(bad_data_double); + stat_at.set_bad_data_str(na_str); + stat_at.set_delete_trailing_blank_rows(1); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void TCStatJobRIRW::do_stat_output(ostream &out) { + map::iterator it; + StatHdrColumns shc; + StringArray sa; + ConcatString cs; + TimeArray Valid; + unixtime valid_beg, valid_end; + int lead_time; + int i, j, r, c; + + // Setup -out_stat file + setup_stat_file(1 + (int) RIRWMap.size()); + + // + // Setup stat header columns + // + + // Set the output FCST_VAR column + cs << cs_erase << "RIRW_" << sec_to_timestring(RIRWTimeADeck) + << (!RIRWExactADeck ? "_MAX" : ""); + shc.set_fcst_var(cs); + + // Set the output OBS_VAR column + cs << cs_erase << "RIRW_" << sec_to_timestring(RIRWTimeBDeck) + << (!RIRWExactBDeck ? "_MAX" : ""); + shc.set_obs_var(cs); + + // Set the output FCST_THRESH and OBS_THRESH columns + shc.set_fcst_thresh(RIRWThreshADeck); + shc.set_obs_thresh(RIRWThreshBDeck); + + mlog << Debug(2) << "Computing output for " + << (int) RIRWMap.size() << " case(s).\n"; + + // + // Loop through the map + // + for(it = RIRWMap.begin(), r=1; it != RIRWMap.end(); it++) { + + // Set the output MODEL column + shc.set_model(write_css(it->second.AModel).c_str()); + + // Set the output OBTYPE column + shc.set_obtype(write_css(it->second.BModel).c_str()); + + // Set the output VX_MASK column + cs << cs_erase; + + // Add -out_init_mask name, if specified + if(OutInitMaskName.nonempty()) { + cs << OutInitMaskName; + } + // Add -out_valid_mask name, if specified + if(OutValidMaskName.nonempty()) { + if(cs.nonempty()) cs << ","; + cs << OutValidMaskName; + } + // If neither are specified, use input mask and/or basin names + if(cs.empty()) { + StringArray sa; + sa.add_uniq(it->second.InitMask); + sa.add_uniq(it->second.ValidMask); + + // Use the basin names instead + if(sa.n() == 1 && sa[0] == na_str) { + sa.clear(); + sa.add_uniq(it->second.Basin); + } + cs = write_css(sa); + } + + // Set shc mask name + shc.set_mask(cs.c_str()); + + // Set shc lead-time and valid-time variables + lead_time = it->second.Lead.max(); + valid_beg = it->second.Valid.min(); + valid_end = it->second.Valid.max(); + + shc.set_fcst_lead_sec(lead_time); + shc.set_fcst_valid_beg(valid_beg); + shc.set_fcst_valid_end(valid_end); + shc.set_obs_lead_sec(bad_data_int); + shc.set_obs_valid_beg(valid_beg); + shc.set_obs_valid_end(valid_end); + + // + // Write the output STAT header columns + // + + // + // Initialize + // + c = 0; + + // + // CTC output line + // + if(OutLineType.has(stat_ctc_str)) { + shc.set_alpha(bad_data_double); + shc.set_line_type(stat_ctc_str); + write_header_cols(shc, stat_at, stat_row); + write_ctc_cols(it->second.Info, stat_at, stat_row++, n_header_columns); + } + + // + // CTS output line + // + if(OutLineType.has(stat_cts_str)) { + + // + // Store the alpha information in the CTSInfo object + // + it->second.Info.allocate_n_alpha(1); + it->second.Info.alpha[0] = OutAlpha; + shc.set_alpha(OutAlpha); + + // + // Compute the stats and confidence intervals for this + // CTSInfo object + // + + it->second.Info.compute_stats(); + it->second.Info.compute_ci(); + + // + // Write the data line + // + shc.set_line_type(stat_cts_str); + write_header_cols(shc, stat_at, stat_row); + write_cts_cols(it->second.Info, 0, stat_at, stat_row++, n_header_columns); + } + } // end for it + + // Write the table + out << stat_at << "\n" << flush; + + return; +} + + //////////////////////////////////////////////////////////////////////// // // Code for class TCStatJobProbRIRW diff --git a/src/tools/tc_utils/tc_stat/tc_stat_job.h b/src/tools/tc_utils/tc_stat/tc_stat_job.h index cb4b250a47..0a0db927de 100644 --- a/src/tools/tc_utils/tc_stat/tc_stat_job.h +++ b/src/tools/tc_utils/tc_stat/tc_stat_job.h @@ -74,6 +74,15 @@ struct SummaryMapData { struct RIRWMapData { CTSInfo Info; StringArray Hdr; + StringArray AModel; + StringArray BModel; + StringArray Desc; + StringArray Basin; + StringArray InitMask; + StringArray ValidMask; + TimeArray Init; + NumArray Lead; + TimeArray Valid; }; //////////////////////////////////////////////////////////////////////// @@ -317,6 +326,8 @@ class TCStatJob { // Derived output statistics ConcatString StatFile; // File name for output statistics std::ofstream *StatOut; // Output statistics file stream + AsciiTable stat_at; // AsciiTable for buffering output STAT data + int stat_row; // Counter for the current stat row // Polyline masking regions ConcatString OutInitMaskFile; @@ -486,11 +497,14 @@ class TCStatJobRIRW : public TCStatJob { void add_map(std::map&); - void do_output (std::ostream &); - void do_ctc_output(std::ostream &); - void do_cts_output(std::ostream &); - void do_mpr_output(std::ostream &); - + void setup_stat_file(int n_row); + + void do_output (std::ostream &); + void do_ctc_output (std::ostream &); + void do_cts_output (std::ostream &); + void do_mpr_output (std::ostream &); + void do_stat_output(std::ostream &); + // Store the case information StringArray ByColumn;