diff --git a/met/docs/Users_Guide/reformat_point.rst b/met/docs/Users_Guide/reformat_point.rst index cca0be2891..2dfdce4b1c 100644 --- a/met/docs/Users_Guide/reformat_point.rst +++ b/met/docs/Users_Guide/reformat_point.rst @@ -952,4 +952,58 @@ The point2grid tool will output a gridded NetCDF file containing the following: 6. The probability field which is the probability of the event defined by the **-prob_cat_thresh** command line option. The output variable name includes the threshold used to define the probability. Ranges from 0 to 1. -7. The probability mask field which is a binary field that represents whether or not there is probability data at that grid point. Can be either “0” or “1” with “0” meaning the probability value does not exist and a value of “1” meaning that the probability value does exist. +7. The probability mask field which is a binary field that represents whether or not there is probability data at that grid point. Can be either “0” or “1” with “0” meaning the probability value does not exist and a value of “1” meaning that the probability value does exist. + +For MET observation input and CF complaint NetCDF input with 2D time variable: The latest observation time within the target grid is saved as the observation time. If the "valid_time" is configured at the configuration file, the valid_time from the configuration file is saved into the output file. + +point2grid configuration file +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +The default configuration file for the point2grid tool named **Point2GridConfig_default** can be found in the installed *share/met/config* directory. It is recommended that users make a copy of this file prior to modifying its contents. + +The point2grid configuration file is optional and only necessary when defining the variable name instead of GRIB code or filtering by time. The contents of the default MADIS2NC configuration file are described below. + +_______________ + + +.. code-block:: none + + version = "VN.N"; + + +The configuration options listed above are common to many MET tools and are described in :numref:`config_options`. + +__________________ + + +.. code-block:: none + + valid_time = "YYYYMMDD_HHMMSS"; + +This entry is a string to override the obseration time into the output and to filter observation data by time. + +.. code-block:: none + + obs_window = { + beg = -5400; + end = 5400; + } + +The configuration option listed above is common to many MET tools and are described in :numref:`config_options`. + +.. code-block:: none + + var_name_map = [ + { key = "1"; val = "PRES"; }, // GRIB: Pressure + { key = "2"; val = "PRMSL"; }, // GRIB: Pressure reduced to MSL + { key = "7"; val = "HGT"; }, // GRIB: Geopotential height + { key = "11"; val = "TMP"; }, // GRIB: Temperature + { key = "15"; val = "TMAX"; }, // GRIB: Max Temperature + ... + ] + +This entry is an array of dictionaries, each containing a **GRIB code** string and mathcing **variable name** string which define a mapping of GRIB code to the output variable names. + + + diff --git a/met/src/libcode/vx_data2d_nccf/nccf_file.cc b/met/src/libcode/vx_data2d_nccf/nccf_file.cc index f6499d0d47..4e2ae19e45 100644 --- a/met/src/libcode/vx_data2d_nccf/nccf_file.cc +++ b/met/src/libcode/vx_data2d_nccf/nccf_file.cc @@ -95,6 +95,8 @@ void NcCfFile::init_from_scratch() _xDim = (NcDim *)0; _yDim = (NcDim *)0; _tDim = (NcDim *)0; + _latVar = (NcVar *)0; + _lonVar = (NcVar *)0; _xCoordVar = (NcVar *)0; _yCoordVar = (NcVar *)0; @@ -238,6 +240,8 @@ bool NcCfFile::open(const char * filepath) valid_time_var = Var[j].var; _time_var_info = &Var[j]; } + else if( "latitude" == att_value ) _latVar = Var[j].var; + else if( "longitude" == att_value ) _lonVar = Var[j].var; } if ( Var[j].name == "time" && (valid_time_var == 0)) { valid_time_var = Var[j].var; @@ -280,7 +284,8 @@ bool NcCfFile::open(const char * filepath) else { // Store the dimension for the time variable as the time dimension - if (get_dim_count(valid_time_var) > 0) { + int time_dim_count = get_dim_count(valid_time_var); + if (time_dim_count == 1) { NcDim tDim = get_nc_dim(valid_time_var, 0); if (IS_VALID_NC(tDim)) { _tDim = new NcDim(tDim); @@ -314,15 +319,23 @@ bool NcCfFile::open(const char * filepath) } if (units_att) delete units_att; - // Determine the number of times present. int n_times = (int) get_data_size(valid_time_var); - - bool no_leap_year = get_att_no_leap_year(valid_time_var); - for(int i=0; i 1 ) { + double latest_time = bad_data_double; + for(int i=0; igetSize(), _xDim->getSize()); + status = true; + } if (!status || !((_xDim && _yDim) || @@ -1896,90 +1923,8 @@ void NcCfFile::get_grid_mapping_latitude_longitude(const NcVar *grid_mapping_var exit(1); } - // Figure out the dlat/dlon values from the dimension variables - - double lat_values[lat_counts]; - - //_yCoordVar->get(lat_values, &lat_counts); - get_nc_data(_yCoordVar, lat_values); - - - double lon_values[lon_counts]; - - //_xCoordVar->get(lon_values, &lon_counts); - get_nc_data(_xCoordVar, lon_values); - - // Calculate dlat and dlon assuming they are constant. MET requires that - // dlat be equal to dlon - - double dlat = lat_values[1] - lat_values[0]; - double dlon = rescale_lon(lon_values[1] - lon_values[0]); - - if (fabs(dlat - dlon) > DELTA_TOLERANCE) - { - mlog << Error << "\n" << method_name << " -> " - << "MET can only process Latitude/Longitude files where the delta lat and delta lon are the same\n\n"; - exit(1); - } - - // As a sanity check, make sure that the deltas are constant through the - // entire grid. CF compliancy doesn't require this, but MET does. + get_grid_from_lat_lon_vars(_yCoordVar, _xCoordVar, lat_counts, lon_counts); - ConcatString point_nccf; - bool skip_sanity_check = get_att_value_string(_ncFile, nc_att_met_point_nccf, point_nccf); - if (!skip_sanity_check) { - get_env(nc_att_met_point_nccf, point_nccf); - skip_sanity_check = point_nccf == "yes"; - } - if (!skip_sanity_check) { - for (int i = 1; i < lat_counts; ++i) - { - double curr_delta = lat_values[i] - lat_values[i-1]; - if (fabs(curr_delta - dlat) > DELTA_TOLERANCE) - { - mlog << Error << "\n" << method_name << " -> " - << "MET can only process Latitude/Longitude files where the lat delta is constant\n\n"; - exit(1); - } - } - - for (int i = 1; i < lon_counts; ++i) - { - double curr_delta = rescale_lon(lon_values[i] - lon_values[i-1]); - if (fabs(curr_delta - dlon) > DELTA_TOLERANCE) - { - mlog << Error << "\n" << method_name << " -> " - << "MET can only process Latitude/Longitude files where the lon delta is constant\n\n"; - exit(1); - } - } - } - - // Fill in the data structure. Remember to negate the longitude - // values since MET uses the mathematical coordinate system centered on - // the center of the earth rather than the regular map coordinate system. - - // Note that I am assuming that the data is ordered from the lower-left - // corner. I think this will generally be the case, but it is not - // guaranteed anywhere that I see. But if this is not the case, then we - // will probably also need to reorder the data itself. - - LatLonData data; - - data.name = latlon_proj_type; - data.lat_ll = lat_values[0]; - data.lon_ll = -lon_values[0]; - data.delta_lat = dlat; - data.delta_lon = dlon; - data.Nlat = _yDim->getSize(); - data.Nlon = _xDim->getSize(); - if (dlat < 0) { - data.delta_lat = -dlat; - data.lat_ll = lat_values[lat_counts-1]; - } - - grid.set(data); - if (dlat < 0) grid.set_swap_to_north(true); } @@ -2716,14 +2661,11 @@ bool NcCfFile::get_grid_from_coordinates(const NcVar *data_var) { long lat_counts = GET_NC_SIZE(cur_yDim); long lon_counts = GET_NC_SIZE(cur_xDim); - bool two_dim_corrd = false; - - if (get_data_size(_xCoordVar) == (lon_counts*lat_counts) || - get_data_size(_yCoordVar) == (lon_counts*lat_counts)) { - two_dim_corrd = true; - } - else if (get_data_size(_xCoordVar) != lon_counts || - get_data_size(_yCoordVar) != lat_counts) + long x_size = get_data_size(_xCoordVar); + long y_size = get_data_size(_yCoordVar); + long latlon_counts = lon_counts*lat_counts; + if ((x_size != lon_counts && x_size != latlon_counts) || + (y_size != lat_counts && x_size != latlon_counts)) { mlog << Error << "\n" << method_name << " -> " << "Coordinate variables don't match dimension sizes in netCDF file.\n\n"; @@ -2731,108 +2673,11 @@ bool NcCfFile::get_grid_from_coordinates(const NcVar *data_var) { exit(1); } - // Figure out the dlat/dlon values from the dimension variables - - double lat_values[lat_counts]; - double lon_values[lon_counts]; - - //_yCoordVar->get(lat_values, &lat_counts); - if (two_dim_corrd) { - long cur[2], length[2]; - for (int i=0; i<2; i++) { - cur[i] = 0; - length[i] = 1; - } - length[0] = lat_counts; - get_nc_data(_yCoordVar,lat_values, length, cur); - length[1] = lon_counts; - length[0] = 1; - get_nc_data(_xCoordVar,lon_values, length, cur); - } - else { - get_nc_data(_yCoordVar,lat_values); - get_nc_data(_xCoordVar,lon_values); - } - - // Calculate dlat and dlon assuming they are constant. MET requires that - // dlat be equal to dlon - - double dlat = lat_values[1] - lat_values[0]; - double dlon = rescale_lon(lon_values[1] - lon_values[0]); - - if (fabs(dlat - dlon) > DELTA_TOLERANCE) - { - mlog << Error << "\n" << method_name << " -> " - << "MET can only process Latitude/Longitude files where the delta lat and delta lon are the same\n\n"; - if (coordinates_att) delete coordinates_att; - exit(1); - } - - // As a sanity check, make sure that the deltas are constant through the - // entire grid. CF compliancy doesn't require this, but MET does. - - ConcatString point_nccf; - bool skip_sanity_check = get_att_value_string(_ncFile, nc_att_met_point_nccf, point_nccf); - if (!skip_sanity_check) { - get_env(nc_att_met_point_nccf, point_nccf); - skip_sanity_check = point_nccf == "yes"; - } - if (!skip_sanity_check) { - for (int i = 1; i < (int)lat_counts; ++i) - { - if ((fabs(lat_missing_value - lat_values[i]) < DELTA_TOLERANCE) || - (fabs(lat_missing_value - lat_values[i-1]) < DELTA_TOLERANCE)) continue; - double curr_delta = lat_values[i] - lat_values[i-1]; - if (fabs(curr_delta - dlat) > DELTA_TOLERANCE) - { - mlog << Error << "\n" << method_name << " -> " - << "MET can only process Latitude/Longitude files where the lat delta is constant (dlat=" - << dlat <<", dlon=" << dlon << ")\n\n"; - if (coordinates_att) delete coordinates_att; - exit(1); - } - } - - for (int i = 1; i < (int)lon_counts; ++i) - { - if ((fabs(lon_missing_value - lon_values[i]) < DELTA_TOLERANCE) || - (fabs(lon_missing_value - lon_values[i-1]) < DELTA_TOLERANCE)) continue; - double curr_delta = rescale_lon(lon_values[i] - lon_values[i-1]); - if (fabs(curr_delta - dlon) > DELTA_TOLERANCE) - { - mlog << Error << "\n" << method_name << " -> " - << "MET can only process Latitude/Longitude files where the lon delta is constant\n\n"; - if (coordinates_att) delete coordinates_att; - exit(1); - } - } - } - - // Fill in the data structure. Remember to negate the longitude - // values since MET uses the mathematical coordinate system centered on - // the center of the earth rather than the regular map coordinate system. - - // Note that I am assuming that the data is ordered from the lower-left - // corner. I think this will generally be the case, but it is not - // guaranteed anywhere that I see. But if this is not the case, then we - // will probably also need to reorder the data itself. - - LatLonData data; - - data.name = latlon_proj_type; - data.lat_ll = lat_values[0]; - data.lon_ll = -lon_values[0]; - data.delta_lat = dlat; - data.delta_lon = dlon; - data.Nlat = lat_counts; - data.Nlon = lon_counts; - if (dlat < 0) { - data.delta_lat = -dlat; - data.lat_ll = lat_values[lat_counts-1]; + if (coordinates_att) { + delete coordinates_att; + coordinates_att = (NcVarAtt *)0; } - - grid.set(data); - if (dlat < 0) grid.set_swap_to_north(true); + get_grid_from_lat_lon_vars(_yCoordVar, _xCoordVar, lat_counts, lon_counts); } if (coordinates_att) delete coordinates_att; @@ -2963,26 +2808,37 @@ bool NcCfFile::get_grid_from_dimensions() exit(1); } - bool two_dim_corrd = false; long lat_counts = GET_NC_SIZE_P(_yDim); long lon_counts = GET_NC_SIZE_P(_xDim); - if (get_data_size(_xCoordVar) == (lon_counts*lat_counts) || - get_data_size(_yCoordVar) == (lon_counts*lat_counts)) { - two_dim_corrd = true; - } - else if (get_data_size(_xCoordVar) != lon_counts || - get_data_size(_yCoordVar) != lat_counts) - { - mlog << Error << "\n" << method_name << " -> " - << "Coordinate variables don't match dimension sizes in netCDF file.\n\n"; - exit(1); - } + get_grid_from_lat_lon_vars(_yCoordVar, _xCoordVar, lat_counts, lon_counts); + + return true; +} + + +//////////////////////////////////////////////////////////////////////// + +void NcCfFile::get_grid_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, + const long lat_counts, const long lon_counts) { + static const string method_name = "NcCfFile::get_grid_from_lat_lon_vars()"; // Figure out the dlat/dlon values from the dimension variables double lat_values[lat_counts]; double lon_values[lon_counts]; + bool two_dim_corrd = false; + long x_size = get_data_size(lon_var); + long y_size = get_data_size(lat_var); + long latlon_counts = lon_counts*lat_counts; + if( x_size == latlon_counts || y_size == latlon_counts ) two_dim_corrd = true; + else if( x_size != lon_counts || y_size != lat_counts) + { + mlog << Error << "\n" << method_name << " -> " + << "Coordinate variables don't match dimension sizes in netCDF file.\n\n"; + exit(1); + } + if (two_dim_corrd) { long cur[2], length[2]; for (int i=0; i<2; i++) { @@ -2990,14 +2846,14 @@ bool NcCfFile::get_grid_from_dimensions() length[i] = 1; } length[0] = lat_counts; - get_nc_data(_yCoordVar,lat_values, length, cur); + get_nc_data(lat_var,lat_values, length, cur); length[1] = lon_counts; length[0] = 1; - get_nc_data(_xCoordVar,lon_values, length, cur); + get_nc_data(lon_var,lon_values, length, cur); } else { - get_nc_data(_yCoordVar,lat_values); - get_nc_data(_xCoordVar,lon_values); + get_nc_data(lat_var,lat_values); + get_nc_data(lon_var,lon_values); } // Calculate dlat and dlon assuming they are constant. MET requires that @@ -3006,36 +2862,64 @@ bool NcCfFile::get_grid_from_dimensions() double dlat = lat_values[1] - lat_values[0]; double dlon = rescale_lon(lon_values[1] - lon_values[0]); - if (fabs(dlat - dlon) > DELTA_TOLERANCE) - { - mlog << Error << "\n" << method_name << " -> " - << "MET can only process Latitude/Longitude files where the delta lat and delta lon are the same\n\n"; - exit(1); + ConcatString point_nccf; + bool skip_sanity_check = get_att_value_string(_ncFile, nc_att_met_point_nccf, point_nccf); + if (!skip_sanity_check) { + get_env(nc_att_met_point_nccf, point_nccf); + skip_sanity_check = (point_nccf == "yes"); } // As a sanity check, make sure that the deltas are constant through the // entire grid. CF compliancy doesn't require this, but MET does. - for (int i = 1; i < (int)lat_counts; ++i) - { - double curr_delta = lat_values[i] - lat_values[i-1]; - if (fabs(curr_delta - dlat) > DELTA_TOLERANCE) - { - mlog << Error << "\n" << method_name << " -> " - << "MET can only process Latitude/Longitude files where the lat delta is constant\n\n"; - exit(1); + if (!skip_sanity_check) { + float lat_missing_value = bad_data_double; + float lon_missing_value = bad_data_double; + NcVarAtt *missing_value_att = (NcVarAtt*) 0; + missing_value_att = get_nc_att(lat_var, (string)"_FillValue"); + if (IS_VALID_NC_P(missing_value_att)) { + lat_missing_value = get_att_value_double(missing_value_att); } - } + if( missing_value_att ) delete missing_value_att; + missing_value_att = get_nc_att(lon_var, (string)"_FillValue"); + if (IS_VALID_NC_P(missing_value_att)) { + lon_missing_value = get_att_value_double(missing_value_att); + } + if( missing_value_att ) delete missing_value_att; - for (int i = 1; i < (int)lon_counts; ++i) - { - double curr_delta = rescale_lon(lon_values[i] - lon_values[i-1]); - if (fabs(curr_delta - dlon) > DELTA_TOLERANCE) + if (fabs(dlat - dlon) > DELTA_TOLERANCE) { mlog << Error << "\n" << method_name << " -> " - << "MET can only process Latitude/Longitude files where the lon delta is constant\n\n"; + << "MET can only process Latitude/Longitude files where the delta lat and delta lon are the same\n\n"; exit(1); } + + for (int i = 1; i < (int)lat_counts; ++i) + { + if ((fabs(lat_missing_value - lat_values[i]) < DELTA_TOLERANCE) || + (fabs(lat_missing_value - lat_values[i-1]) < DELTA_TOLERANCE)) continue; + double curr_delta = lat_values[i] - lat_values[i-1]; + if (fabs(curr_delta - dlat) > DELTA_TOLERANCE) + { + mlog << Error << "\n" << method_name << " -> " + << "MET can only process Latitude/Longitude files where the lat delta is constant (dlat=" + << dlat <<", dlon=" << dlon << ")\n\n"; + exit(1); + } + } + + for (int i = 1; i < (int)lon_counts; ++i) + { + if ((fabs(lon_missing_value - lon_values[i]) < DELTA_TOLERANCE) || + (fabs(lon_missing_value - lon_values[i-1]) < DELTA_TOLERANCE)) continue; + double curr_delta = rescale_lon(lon_values[i] - lon_values[i-1]); + if (fabs(curr_delta - dlon) > DELTA_TOLERANCE) + { + mlog << Error << "\n" << method_name << " -> " + << "MET can only process Latitude/Longitude files where the lon delta is constant\n\n"; + exit(1); + } + } } // Fill in the data structure. Remember to negate the longitude @@ -3054,17 +2938,16 @@ bool NcCfFile::get_grid_from_dimensions() data.lon_ll = -lon_values[0]; data.delta_lat = dlat; data.delta_lon = dlon; - data.Nlat = _yDim->getSize(); - data.Nlon = _xDim->getSize(); + data.Nlat = lat_counts; + data.Nlon = lon_counts; if (dlat < 0) { data.delta_lat = -dlat; data.lat_ll = lat_values[lat_counts-1]; } + grid.set(data); if (dlat < 0) grid.set_swap_to_north(true); - - return true; + } - //////////////////////////////////////////////////////////////////////// diff --git a/met/src/libcode/vx_data2d_nccf/nccf_file.h b/met/src/libcode/vx_data2d_nccf/nccf_file.h index b9646ee08d..b7b42f70e7 100644 --- a/met/src/libcode/vx_data2d_nccf/nccf_file.h +++ b/met/src/libcode/vx_data2d_nccf/nccf_file.h @@ -136,6 +136,8 @@ class NcCfFile { NcDim *_yDim; NcDim *_tDim; + NcVar *_latVar; + NcVar *_lonVar; NcVar *_xCoordVar; NcVar *_yCoordVar; NcVarInfo *_time_var_info; @@ -176,7 +178,8 @@ class NcCfFile { bool get_grid_from_coordinates(const NcVar *data_var); bool get_grid_from_dimensions(); - + void get_grid_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, + const long lat_counts, const long lon_counts); }; diff --git a/met/src/libcode/vx_nc_util/nc_utils.cc b/met/src/libcode/vx_nc_util/nc_utils.cc index f49e725a8f..eea6bb495c 100644 --- a/met/src/libcode/vx_nc_util/nc_utils.cc +++ b/met/src/libcode/vx_nc_util/nc_utils.cc @@ -1408,7 +1408,7 @@ void _apply_scale_factor(float *data, const T *packed_data, if (data[idx] > 0) positive_cnt++; if (min_value > data[idx]) min_value = data[idx]; if (max_value < data[idx]) max_value = data[idx]; - unpacked_count++; + if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) unpacked_count++; } } mlog << Debug(4) << method_name << " unpacked data: count=" @@ -1459,7 +1459,7 @@ bool get_nc_data(NcVar *var, float *data) { switch ( type_id ) { case NcType::nc_INT: - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) { + { int fill_value = bad_data_int; int min_value = 2147483647; int max_value = -2147483648; @@ -1469,14 +1469,14 @@ bool get_nc_data(NcVar *var, float *data) { fill_value = get_att_value_int(att_fill_value); var->getVar(packed_data); - _apply_scale_factor(data, packed_data, - cell_count, fill_value, min_value, max_value, "int", - add_offset, scale_factor); + _apply_scale_factor(data, packed_data, cell_count, + fill_value, min_value, max_value, "int", + add_offset, scale_factor); delete [] packed_data; } break; case NcType::nc_SHORT: - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) { + { short fill_value = (short)bad_data_int; short *packed_data = new short[cell_count]; @@ -1486,8 +1486,13 @@ bool get_nc_data(NcVar *var, float *data) { var->getVar(packed_data); if (unsigned_value) { - int value, unsigned_fill_value; - unsigned_fill_value = (unsigned short)fill_value; + unsigned short value; + int positive_cnt = 0; + int raw_min_value = 70000; + int raw_max_value = -70000; + float min_value = 10e10; + float max_value = -10e10; + unsigned short unsigned_fill_value = (unsigned short)fill_value; for (int idx=0; idx tmp_value) raw_min_value = tmp_value; - if (raw_max_value < tmp_value) raw_max_value = tmp_value; + if (raw_min_value > value) raw_min_value = value; + if (raw_max_value < value) raw_max_value = value; if (data[idx] > 0) positive_cnt++; if (min_value > data[idx]) min_value = data[idx]; if (max_value < data[idx]) max_value = data[idx]; @@ -1532,10 +1525,10 @@ bool get_nc_data(NcVar *var, float *data) { } break; case NcType::nc_USHORT: - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) { - unsigned short fill_value = (unsigned short)bad_data_int; + { unsigned short min_value = 65535; unsigned short max_value = 0; + unsigned short fill_value = (unsigned short)bad_data_int; unsigned short *packed_data = new unsigned short[cell_count]; if (IS_VALID_NC_P(att_fill_value)) @@ -1543,14 +1536,14 @@ bool get_nc_data(NcVar *var, float *data) { var->getVar(packed_data); - _apply_scale_factor(data, packed_data, - cell_count, fill_value, min_value, max_value, "unsigned short", - add_offset, scale_factor); + _apply_scale_factor(data, packed_data, cell_count, + fill_value, min_value, max_value, "unsigned short", + add_offset, scale_factor); delete [] packed_data; } break; case NcType::nc_BYTE: - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) { + { ncbyte fill_value = (ncbyte)bad_data_int; ncbyte *packed_data = new ncbyte[cell_count]; @@ -1559,28 +1552,22 @@ bool get_nc_data(NcVar *var, float *data) { } if (unsigned_value) { - int value, unsigned_fill_value; - unsigned_fill_value = (ncbyte)fill_value; - for (int idx=0; idx tmp_value) raw_min_value = tmp_value; - if (raw_max_value < tmp_value) raw_max_value = tmp_value; + value = packed_data[idx]; + if (unsigned_fill_value == value) + data[idx] = bad_data_float; + else { + data[idx] = (value * scale_factor) + add_offset; + unpacked_count++; + if (raw_min_value > value) raw_min_value = value; + if (raw_max_value < value) raw_max_value = value; if (data[idx] > 0) positive_cnt++; if (min_value > data[idx]) min_value = data[idx]; if (max_value < data[idx]) max_value = data[idx]; @@ -1596,15 +1583,15 @@ bool get_nc_data(NcVar *var, float *data) { else { ncbyte min_value = 127; ncbyte max_value = -127; - _apply_scale_factor(data, packed_data, - cell_count, fill_value, min_value, max_value, "ncbyte", - add_offset, scale_factor); + _apply_scale_factor(data, packed_data, cell_count, + fill_value, min_value, max_value, "ncbyte", + add_offset, scale_factor); } delete [] packed_data; } break; case NcType::nc_UBYTE: - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) { + { unsigned char min_value = 255; unsigned char max_value = 0; unsigned char fill_value = (unsigned char)-99; @@ -1614,9 +1601,9 @@ bool get_nc_data(NcVar *var, float *data) { fill_value = get_att_value_char(att_fill_value); } - _apply_scale_factor(data, packed_data, - cell_count, fill_value, min_value, max_value, "unsigned char", - add_offset, scale_factor); + _apply_scale_factor(data, packed_data, cell_count, + fill_value, min_value, max_value, "unsigned char", + add_offset, scale_factor); delete [] packed_data; } break; @@ -1697,7 +1684,7 @@ int _apply_scale_factor(double *data, const T *packed_data, if (data[idx] > 0) positive_cnt++; if (min_value > data[idx]) min_value = data[idx]; if (max_value < data[idx]) max_value = data[idx]; - unpacked_count++; + if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) unpacked_count++; } } mlog << Debug(4) << method_name << " unpacked data: count=" @@ -1749,7 +1736,7 @@ bool get_nc_data(NcVar *var, double *data) { switch ( type_id ) { case NcType::nc_INT: - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) { + { int fill_value = bad_data_int; int min_value = 2147483647; int max_value = -2147483648; @@ -1759,14 +1746,14 @@ bool get_nc_data(NcVar *var, double *data) { fill_value = get_att_value_int(att_fill_value); var->getVar(packed_data); - _apply_scale_factor(data, packed_data, - cell_count, fill_value, min_value, max_value, "int", - add_offset, scale_factor); + _apply_scale_factor(data, packed_data, cell_count, + fill_value, min_value, max_value, "int", + add_offset, scale_factor); delete [] packed_data; } break; case NcType::nc_SHORT: - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) { + { short fill_value = (short)bad_data_int; short *packed_data = new short[cell_count]; @@ -1776,8 +1763,13 @@ bool get_nc_data(NcVar *var, double *data) { var->getVar(packed_data); if (unsigned_value) { - int value, unsigned_fill_value; - unsigned_fill_value = (unsigned short)fill_value; + int value; + int positive_cnt = 0; + int raw_min_value = 70000; + int raw_max_value = -70000; + float min_value = 10e10; + float max_value = -10e10; + int unsigned_fill_value = (unsigned short)fill_value; for (int idx=0; idx tmp_value) raw_min_value = tmp_value; - if (raw_max_value < tmp_value) raw_max_value = tmp_value; + if (raw_min_value > value) raw_min_value = value; + if (raw_max_value < value) raw_max_value = value; if (data[idx] > 0) positive_cnt++; if (min_value > data[idx]) min_value = data[idx]; if (max_value < data[idx]) max_value = data[idx]; @@ -1821,7 +1802,7 @@ bool get_nc_data(NcVar *var, double *data) { } break; case NcType::nc_USHORT: - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) { + { unsigned short fill_value = (unsigned short)bad_data_int; unsigned short *packed_data = new unsigned short[cell_count]; @@ -1839,7 +1820,7 @@ bool get_nc_data(NcVar *var, double *data) { } break; case NcType::nc_BYTE: - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) { + { ncbyte fill_value = (ncbyte)bad_data_int; ncbyte *packed_data = new ncbyte[cell_count]; @@ -1847,29 +1828,25 @@ bool get_nc_data(NcVar *var, double *data) { fill_value = get_att_value_char(att_fill_value); } + var->getVar(packed_data); + if (unsigned_value) { - int value, unsigned_fill_value; - unsigned_fill_value = (ncbyte)fill_value; - for (int idx=0; idx tmp_value) raw_min_value = tmp_value; - if (raw_max_value < tmp_value) raw_max_value = tmp_value; + value = packed_data[idx]; + if (unsigned_fill_value == value) + data[idx] = bad_data_double; + else { + data[idx] = (value * scale_factor) + add_offset; + unpacked_count++; + if (raw_min_value > value) raw_min_value = value; + if (raw_max_value < value) raw_max_value = value; if (data[idx] > 0) positive_cnt++; if (min_value > data[idx]) min_value = data[idx]; if (max_value < data[idx]) max_value = data[idx]; @@ -1885,15 +1862,17 @@ bool get_nc_data(NcVar *var, double *data) { else { ncbyte min_value = 127; ncbyte max_value = -127; - _apply_scale_factor(data, packed_data, - cell_count, fill_value, min_value, max_value, "ncbyte", - add_offset, scale_factor); + _apply_scale_factor(data, packed_data, cell_count, + fill_value, min_value, max_value, "ncbyte", + add_offset, scale_factor); } delete [] packed_data; } break; case NcType::nc_UBYTE: - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) { + { + signed char min_value = 255; + signed char max_value = 0; signed char fill_value = (signed char)bad_data_int; signed char *packed_data = new signed char[cell_count]; @@ -1901,11 +1880,11 @@ bool get_nc_data(NcVar *var, double *data) { fill_value = get_att_value_char(att_fill_value); } - signed char min_value = 255; - signed char max_value = 0; - _apply_scale_factor(data, packed_data, - cell_count, fill_value, min_value, max_value, "ncbyte", - add_offset, scale_factor); + var->getVar(packed_data); + + _apply_scale_factor(data, packed_data, cell_count, + fill_value, min_value, max_value, "ncbyte", + add_offset, scale_factor); delete [] packed_data; } break; @@ -3444,6 +3423,8 @@ NcVar get_nc_var_time(const NcFile *nc) { //if (is_nc_name_time(name)) found = true; if (get_nc_att_value(&(*itVar).second, "standard_name", name)) { if (is_nc_name_time(name)) found = true; + mlog << Debug(7) << method_name << "checked variable \"" + << name << "\" is_time: " << found << "\n"; } if (!found && get_nc_att_value(&(*itVar).second, "units", name)) { if (is_nc_unit_time(name.c_str())) { diff --git a/met/src/tools/other/point2grid/point2grid.cc b/met/src/tools/other/point2grid/point2grid.cc index 8e52a52dc2..5902e4e141 100644 --- a/met/src/tools/other/point2grid/point2grid.cc +++ b/met/src/tools/other/point2grid/point2grid.cc @@ -139,8 +139,9 @@ static void set_adp(const StringArray &); static void set_gaussian_dx(const StringArray &); static void set_gaussian_radius(const StringArray &); +static unixtime compute_unixtime(NcVar *time_var, unixtime var_value); static bool get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, - NcVar var_lat, NcVar var_lon); + NcVar var_lat, NcVar var_lon, bool *skip_times); static bool get_grid_mapping(Grid to_grid, IntArray *cellMapping, const IntArray obs_index_array, const int *obs_hids, const float *hdr_lats, const float *hdr_lons); @@ -172,7 +173,7 @@ static IntArray qc_flags; static void process_goes_file(NcFile *nc_in, MetConfig &config, VarInfo *, const Grid fr_grid, const Grid to_grid); -static unixtime find_valid_time(NcFile *nc_in); +static unixtime find_valid_time(NcVar time_var); static ConcatString get_goes_grid_input(MetConfig config, Grid fr_grid, Grid to_grid); static void get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, ConcatString geostationary_file); @@ -349,6 +350,7 @@ void process_data_file() { mlog << Debug(1) << "Reading data file: " << InputFilename << "\n"; nc_in = open_ncfile(InputFilename.c_str()); + //Get the obs type before opening NetCDF int obs_type = get_obs_type(nc_in); bool goes_data = (obs_type == TYPE_GOES || obs_type == TYPE_GOES_ADP); @@ -513,11 +515,14 @@ bool get_nc_data_string_array(NcFile *nc, const char *var_name, int get_obs_type(NcFile *nc) { int obs_type = TYPE_UNKNOWN; - ConcatString att_val; + ConcatString att_val_scene_id; + ConcatString att_val_project; ConcatString input_type; static const char *method_name = "get_obs_type() -> "; - if (get_global_att(nc, (string)"scene_id", att_val)) { + bool has_project = get_global_att(nc, (string)"project", att_val_project); + bool has_scene_id = get_global_att(nc, (string)"scene_id", att_val_scene_id); + if( has_scene_id && has_project && att_val_project == "GOES" ) { obs_type = TYPE_GOES; input_type = "GOES"; if (0 < AdpFilename.length()) { @@ -803,7 +808,7 @@ void process_point_file(NcFile *nc_in, MetConfig &config, VarInfo *vinfo, if (!is_eq(bad_data_int, conf_info.end_ds)) valid_end_ut += conf_info.end_ds; for(idx=0; idxgrid(); + int from_size = fr_grid.nx() * fr_grid.ny(); static const char *method_name = "process_point_file_with_latlon() -> "; NcVar var_lat = get_nc_var_lat(nc_in); @@ -1134,10 +1144,46 @@ void process_point_nccf_file(NcFile *nc_in, MetConfig &config, usage(); } - unixtime valid_time = find_valid_time(nc_in); + bool is_2d_time = false; + unixtime valid_time = bad_data_int; + valid_beg_ut = valid_end_ut = conf_info.valid_time; + + NcVar time_var = get_nc_var_time(nc_in); + if( IS_VALID_NC(time_var) ) { + if( 1 < get_dim_count(&time_var) ) { + is_2d_time = true; + double max_time = bad_data_double; + skip_times = new bool[from_size]; + valid_times = new double[from_size]; + if (get_nc_data(&time_var, valid_times)) { + int sec_per_unit; + bool no_leap_year; + unixtime ref_ut, tmp_time; + if( conf_info.valid_time > 0 ) { + if (!is_eq(bad_data_int, conf_info.beg_ds)) valid_beg_ut += conf_info.beg_ds; + if (!is_eq(bad_data_int, conf_info.end_ds)) valid_end_ut += conf_info.end_ds; + ref_ut = get_reference_unixtime(&time_var, sec_per_unit, no_leap_year); + } + for (int i=0; i 0 ) { + tmp_time = add_to_unixtime(ref_ut, sec_per_unit, + valid_times[i], no_leap_year); + skip_times[i] = (valid_beg_ut > tmp_time || tmp_time > valid_end_ut); + if( skip_times[i]) filtered_by_time++; + } + else skip_times[i] = false; + if (max_time < valid_times[i]) max_time = valid_times[i]; + } + valid_time = compute_unixtime(&time_var, max_time); + } + } + else valid_time = find_valid_time(time_var); + } to_dp.set_size(to_grid.nx(), to_grid.ny()); IntArray *cellMapping = new IntArray[to_grid.nx() * to_grid.ny()]; - get_grid_mapping(fr_grid, to_grid, cellMapping, var_lat, var_lon); + get_grid_mapping(fr_grid, to_grid, cellMapping, var_lat, var_lon, skip_times); + if( skip_times ) delete [] skip_times; + if( valid_times ) delete [] valid_times; // Loop through the requested fields for(int i=0; i " << unix_to_yyyymmdd_hhmmss(obs_time) << "\n"; + } + + return obs_time; +} + +//////////////////////////////////////////////////////////////////////// + static bool get_grid_mapping(Grid to_grid, IntArray *cellMapping, const IntArray obs_index_array, const int *obs_hids, const float *hdr_lats, const float *hdr_lons) { @@ -1739,8 +1809,9 @@ static bool get_grid_mapping(Grid to_grid, IntArray *cellMapping, //////////////////////////////////////////////////////////////////////// static void get_grid_mapping_latlon( - DataPlane from_dp, DataPlane to_dp, Grid to_grid, IntArray *cellMapping, - float *latitudes, float *longitudes, int from_lat_count, int from_lon_count) { + DataPlane from_dp, DataPlane to_dp, Grid to_grid, + IntArray *cellMapping, float *latitudes, float *longitudes, + int from_lat_count, int from_lon_count, bool *skip_times) { double x, y; float lat, lon; int idx_x, idx_y, to_offset; @@ -1763,6 +1834,7 @@ static void get_grid_mapping_latlon( for (int xIdx=0; xIdx " << unix_to_yyyymmdd_hhmmss(valid_time) << "\n"; + valid_time = compute_unixtime(&time_var, time_values[0]); } else { mlog << Error << "\n" << method_name << "-> " @@ -1896,7 +1961,7 @@ unixtime find_valid_time(NcFile *nc_in) { } } - if (valid_time < 0) { + if (valid_time == bad_data_int) { mlog << Error << "\n" << method_name << "-> " << "trouble finding time variable from \"" << InputFilename << "\"\n\n"; @@ -2070,7 +2135,7 @@ void get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, else { check_lat_lon(data_size, latitudes, longitudes); get_grid_mapping_latlon(from_dp, to_dp, to_grid, cellMapping, latitudes, - longitudes, from_lat_count, from_lon_count); + longitudes, from_lat_count, from_lon_count, 0); } if (latitudes_buf) delete [] latitudes_buf; @@ -2507,7 +2572,10 @@ bool has_lat_lon_vars(NcFile *nc) { bool has_time_var = IS_VALID_NC(get_nc_var_time(nc)); //TODO: chech if this is a gridded data or a point data here!!! - + mlog << Debug(7) << "has_lat_lon_vars() " + << " has_lat_var: " << has_lat_var + << ", has_lon_var: " << has_lon_var + << ", has_time_var: " << has_time_var << "\n"; return (has_lat_var && has_lon_var && has_time_var); } diff --git a/test/config/Point2GridConfig_valid_time b/test/config/Point2GridConfig_valid_time new file mode 100644 index 0000000000..9463d22cc1 --- /dev/null +++ b/test/config/Point2GridConfig_valid_time @@ -0,0 +1,82 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Point2Grid configuration file. +// +// For additional information, please see the MET User's Guide. +// +//////////////////////////////////////////////////////////////////////////////// + +// +// NetCDF output variable timing information: +// - valid_time in YYYYMMDD[_HH[MMSS]] format +// +valid_time = "20201022_173000"; + +// +// Observation time window +// +//obs_window = { +// beg = -5400; +// end = 5400; +//} + +//////////////////////////////////////////////////////////////////////////////// + + +// +// Observation message type +// +//message_type = []; + +//////////////////////////////////////////////////////////////////////////////// + +// +// Mapping of input variable names to output variables names. +// +//var_name_map = [ +// { key = "1"; val = "PRES"; }, // GRIB: Pressure +// { key = "2"; val = "PRMSL"; }, // GRIB: Pressure reduced to MSL +// { key = "7"; val = "HGT"; }, // GRIB: Geopotential height +// { key = "11"; val = "TMP"; }, // GRIB: Temperature +// { key = "15"; val = "TMAX"; }, // GRIB: Max Temperature +// { key = "16"; val = "TMIN"; }, // GRIB: Min Temperature +// { key = "17"; val = "DPT"; }, // GRIB: Dewpoint +// { key = "20"; val = "VIS"; }, // GRIB: Visibility +// { key = "31"; val = "WDIR"; }, // GRIB: Wind direction +// { key = "32"; val = "WIND"; }, // GRIB: Wind speed +// { key = "33"; val = "UGRD"; }, // GRIB: u-component of wind +// { key = "34"; val = "VGRD"; }, // GRIB: v-component of wind +// { key = "52"; val = "RH"; }, // GRIB: Relative humidity +// { key = "61"; val = "APCP"; }, // GRIB: Total precipitation +// { key = "66"; val = "SNOD"; }, // GRIB: Snow Cover/Depth +// { key = "111"; val = "NETSOLAR"; }, // SRAD: Surface Radiation +// { key = "112"; val = "NETIR"; }, // SRAD: Surface Radiation +// { key = "117"; val = "TOTALNET"; }, // SRAD: Surface Radiation +// //{ key = "129"; val = "AOT"; }, // AeroNet: +// { key = "161"; val = "DIRECT"; }, // SRAD: Surface Radiation +// { key = "167"; val = "DIFFUSE"; }, // SRAD: Surface Radiation +// { key = "180"; val = "GUST"; }, // GRIB: Wind Gust +// { key = "204"; val = "DW_PSP"; }, // SRAD: Surface Radiation +// { key = "205"; val = "DW_PIR"; }, // SRAD: Surface Radiation +// { key = "211"; val = "UW_PSP"; }, // SRAD: Surface Radiation +// { key = "212"; val = "UW_PIR"; }, // SRAD: Surface Radiation +// { key = "0"; val = "WWSIS"; }, // WWSIS: Western Wind and Solar Integration Study +// { key = "-1"; val = "DW_CASETEMP"; }, // SRAD: Surface Radiation +// { key = "-2"; val = "DW_DOMETEMP"; }, // SRAD: Surface Radiation +// { key = "-3"; val = "UW_CASETEMP"; }, // SRAD: Surface Radiation +// { key = "-4"; val = "UW_DOMETEMP"; }, // SRAD: Surface Radiation +// { key = "-5"; val = "UVB"; }, // SRAD: Surface Radiation +// { key = "-6"; val = "PAR"; } // SRAD: Surface Radiation +// //{ key = ""; val = ""; }, // GRIB: +//]; + +//////////////////////////////////////////////////////////////////////////////// + +//quality_mark_thresh = 2; + +//////////////////////////////////////////////////////////////////////////////// + +tmp_dir = "/tmp"; +version = "V10.0.0"; + +//////////////////////////////////////////////////////////////////////////////// diff --git a/test/xml/unit_point2grid.xml b/test/xml/unit_point2grid.xml index 2f26db48c9..4b2be9eb82 100644 --- a/test/xml/unit_point2grid.xml +++ b/test/xml/unit_point2grid.xml @@ -157,7 +157,7 @@ -field 'name="AOD_Smoke"; level="(*,*)";' \ -adp &DATA_DIR_MODEL;/goes_16/OR_ABI-L2-ADPC-M6_G16_s20192662141196_e20192662143569_c20192662144526.nc \ -qc 1,2 -method MAX \ - -v 5:warning + -v 1 &OUTPUT_DIR;/point2grid/point2grid_GOES_16_ADP.nc @@ -202,20 +202,20 @@ - + &MET_BIN;/point2grid MET_TMP_DIR &OUTPUT_DIR; \ &DATA_DIR_OBS;/point_obs/UKMet_tripolar/prodm_op_am-hr.gridT_20200907_00.036_temp.nc \ - G231 \ - &OUTPUT_DIR;/point2grid/point2grid_NCCF_TO_G231.nc \ + "latlon 500 300 40 -30 0.1 0.1" \ + &OUTPUT_DIR;/point2grid/point2grid_NCCF_UKMet.nc \ -field 'name="votemper"; level="(0,0,*,*)";' \ -v 1 - &OUTPUT_DIR;/point2grid/point2grid_NCCF_TO_G231.nc + &OUTPUT_DIR;/point2grid/point2grid_NCCF_UKMet.nc @@ -236,4 +236,21 @@ &OUTPUT_DIR;/point2grid/point2grid_GOES_16_AOD_TO_G212_unsigned.nc + + + &MET_BIN;/point2grid + + MET_TMP_DIR &OUTPUT_DIR; + + \ + &DATA_DIR_OBS;/point_obs/VOLCAT_HIMAWARI-8_FLDK_s2020296_173000_v300250_VCB_w167_FLDK_b2020295_204000_g001.nc \ + "latlon 150 150 45 155 0.1 0.1" \ + &OUTPUT_DIR;/point2grid/point2grid_2D_time_west_bering_sea.nc \ + -field 'name="brightness_temperature_4micron"; level="(*,*)";' \ + -v 1 + + + &OUTPUT_DIR;/point2grid/point2grid_2D_time_west_bering_sea.nc + +